content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from posixpath import join
import threading
from civis.response import PaginatedResponse, convert_response_data_type
def tostr_urljoin(*x):
return join(*map(str, x))
class CivisJobFailure(Exception):
def __init__(self, err_msg, response=None):
self.error_message = err_msg
self.response = response
def __str__(self):
return self.error_message
class CivisAPIError(Exception):
def __init__(self, response):
if response.content: # the API itself gave an error response
json = response.json()
self.error_message = json["errorDescription"]
else: # this was something like a 502
self.error_message = response.reason
self.status_code = response.status_code
self._response = response
def __str__(self):
if self.status_code:
return "({}) {}".format(self.status_code, self.error_message)
else:
return self.error_message
class EmptyResultError(Exception):
pass
class CivisAPIKeyError(Exception):
pass
class Endpoint:
_base_url = "https://api.civisanalytics.com/"
_lock = threading.Lock()
def __init__(self, session, return_type='civis'):
self._session = session
self._return_type = return_type
def _build_path(self, path):
if not path:
return self._base_url
return tostr_urljoin(self._base_url, path.strip("/"))
def _make_request(self, method, path=None, params=None, data=None,
**kwargs):
url = self._build_path(path)
with self._lock:
response = self._session.request(method, url, json=data,
params=params, **kwargs)
if response.status_code in [204, 205]:
return
if response.status_code == 401:
auth_error = response.headers["www-authenticate"]
raise CivisAPIKeyError(auth_error) from CivisAPIError(response)
if not response.ok:
raise CivisAPIError(response)
return response
def _call_api(self, method, path=None, params=None, data=None, **kwargs):
iterator = kwargs.pop('iterator', False)
if iterator:
return PaginatedResponse(path, params, self)
else:
resp = self._make_request(method, path, params, data, **kwargs)
resp = convert_response_data_type(resp,
return_type=self._return_type)
return resp
| python |
class InstantTest:
pass
| python |
import os
import numpy as np
from PIL import Image
import cv2
import pickle
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
image_dir = os.path.join(BASE_DIR, "images")
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
current_id = 0
label_ids = {}
y_labels = []
x_train = []
for root, dirs, files in os.walk(image_dir):
for file in files:
if file.endswith("png") or file.endswith("jpg") or file.endswith("JPG"):
path = os.path.join(root, file)
label = os.path.basename(root).replace(" ","-").lower()
if label not in label_ids:
label_ids[label] = current_id
current_id += 1
id_ = label_ids[label]
pil_image = Image.open(path).convert("L") #grayscale
size = (550,550)
final_image = pil_image.resize(size,Image.ANTIALIAS)
image_array = np.array(pil_image, "uint8")
faces = face_cascade.detectMultiScale(image_array)
for (x,y,w,h) in faces:
roi = image_array[y: y+h, x: x+h]
x_train.append(roi)
y_labels.append(id_)
#print(y_labels)
#print(x_train)
with open("labels.pickle", 'wb') as f:
pickle.dump(label_ids,f)
recognizer.train(x_train,np.array(y_labels))
recognizer.save("trainer.yml") | python |
from rest_framework import status
from .base_test import BaseTestCase
class TestProfile(BaseTestCase):
"""Test the User profile GET responses"""
all_profiles_url = 'http://127.0.0.1:8000/api/profiles/'
my_profile_url = 'http://127.0.0.1:8000/api/profiles/jane'
def test_get_all_profiles_without_account_activation(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.get(self.all_profiles_url)
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('Your account is inactive', str(response.data))
def test_get_all_profiles_without_login2(self):
response = self.client.get(self.profile_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_cannot_update_my_profiles_without_login(self):
self.client.credentials(HTTP_AUTHORIZATION=self.token)
response = self.client.put(self.my_profile_url)
self.assertNotEqual(response.status_code, status.HTTP_200_OK)
def test_cannot_update_my_profiles_without_login2(self):
response = self.client.put(self.my_profile_url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| python |
import lambdser
import multiprocessing as mp
def make_proxy(para, *funcs):
# make proxy for the mp
ser_list = []
for f in funcs:
ser_list.append(lambdser.dumps(f))
return para, ser_list
def processor(*ser):
# unzip the proxy and to the work
para, funcs = ser
funcs = [lambdser.loads(ser) for ser in funcs]
res = None
for f in funcs:
res = f(para)
print(res)
return res
def do_stuff():
two = "2"
ser = make_proxy("4", lambda x: x + two)
mp.Process(target=processor, args=ser).start()
if __name__ == "__main__":
do_stuff()
| python |
from numbers import Number
import torch
from torch.distributions import constraints, Gamma, MultivariateNormal
from torch.distributions.multivariate_normal import _batch_mv, _batch_mahalanobis
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, _standard_normal
from scipy import stats
import math
__all__ = ('GeneralizedNormal', 'DoubleGamma', 'MultivariateT')
class GeneralizedNormal(Distribution):
r"""
Creates a Generalized Normal distribution parameterized by :attr:`loc`, :attr:`scale`, and :attr:`beta`.
Example::
>>> m = GeneralizedNormal(torch.tensor([0.0]), torch.tensor([1.0]), torch.tensor(0.5))
>>> m.sample() # GeneralizedNormal distributed with loc=0, scale=1, beta=0.5
tensor([ 0.1337])
Args:
loc (float or Tensor): mean of the distribution
scale (float or Tensor): scale of the distribution
beta (float or Tensor): shape parameter of the distribution
"""
arg_constraints = {'loc': constraints.real, 'scale': constraints.positive, 'beta': constraints.positive}
support = constraints.real
has_rsample = False
@property
def mean(self):
return self.loc
@property
def variance(self):
return self.scale.pow(2) * (torch.lgamma(3/self.beta) - torch.lgamma(1/self.beta)).exp()
@property
def stddev(self):
return self.variance()**0.5
def __init__(self, loc, scale, beta, validate_args=None):
self.loc, self.scale = broadcast_all(loc, scale)
(self.beta,) = broadcast_all(beta)
self.scipy_dist = stats.gennorm(loc=self.loc.cpu().detach().numpy(),
scale=self.scale.cpu().detach().numpy(),
beta=self.beta.cpu().detach().numpy())
if isinstance(loc, Number) and isinstance(scale, Number):
batch_shape = torch.Size()
else:
batch_shape = self.loc.size()
super(GeneralizedNormal, self).__init__(batch_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(GeneralizedNormal, _instance)
batch_shape = torch.Size(batch_shape)
new.loc = self.loc.expand(batch_shape)
new.scale = self.scale.expand(batch_shape)
super(GeneralizedNormal, new).__init__(batch_shape, validate_args=False)
new._validate_args = self._validate_args
return new
def sample(self, sample_shape=torch.Size()):
sample_shape = sample_shape + self.loc.size()
return torch.tensor(self.scipy_dist.rvs(
list(sample_shape),
random_state=torch.randint(2**32, ()).item()), # Make deterministic if torch is seeded
dtype=self.loc.dtype, device=self.loc.device)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
return (-torch.log(2 * self.scale) - torch.lgamma(1/self.beta) + torch.log(self.beta)
- torch.pow((torch.abs(value - self.loc) / self.scale), self.beta))
def cdf(self, value):
if isinstance(value, torch.Tensor):
value = value.numpy()
return torch.tensor(self.scipy_dist.cdf(value),
dtype=self.loc.dtype, device=self.loc.device)
def icdf(self, value):
raise NotImplementedError
def entropy(self):
return (1/self.beta) - torch.log(self.beta) + torch.log(2*self.scale) + torch.lgamma(1/self.beta)
class DoubleGamma(Gamma):
mean = 0.
@property
def variance(self):
return self.concentration * (1 + self.concentration) / self.rate.pow(2)
def rsample(self, sample_shape=torch.Size()):
x = super().rsample(sample_shape)
sign = torch.randint(0, 2, x.size(), device=x.device, dtype=x.dtype).mul_(2).sub_(1)
return x*sign
def log_prob(self, value):
return super().log_prob(value.abs()) - math.log(2)
entropy = NotImplemented
_log_normalizer = NotImplemented
class MultivariateT(MultivariateNormal):
"""
Multivariate Student-t distribution, using hierarchical Gamma sampling.
(see https://arxiv.org/abs/1402.4306)
We only allow degrees of freedom > 2 for now,
because otherwise the covariance is undefined.
Uses the parameterization from Shah et al. 2014, which makes it covariance
equal to the covariance matrix.
"""
arg_constraints = {'df': constraints.positive,
'loc': constraints.real_vector,
'covariance_matrix': constraints.positive_definite,
'precision_matrix': constraints.positive_definite,
'scale_tril': constraints.lower_cholesky}
support = constraints.real
has_rsample = True
expand = NotImplemented
def __init__(self,
event_shape: torch.Size,
df=3.,
loc=0.,
covariance_matrix=None,
precision_matrix=None,
scale_tril=None,
validate_args=None):
super().__init__(loc=loc,
covariance_matrix=covariance_matrix,
precision_matrix=precision_matrix,
scale_tril=scale_tril,
validate_args=validate_args)
# self._event_shape is inferred from the mean vector and covariance matrix.
old_event_shape = self._event_shape
if not len(event_shape) >= len(old_event_shape):
raise NotImplementedError("non-elliptical MVT not in this class")
assert len(event_shape) >= 1
assert event_shape[-len(old_event_shape):] == old_event_shape
# Cut dimensions from the end of `batch_shape` so the `total_shape` is
# the same
total_shape = list(self._batch_shape) + list(self._event_shape)
self._batch_shape = torch.Size(total_shape[:-len(event_shape)])
self._event_shape = torch.Size(event_shape)
self.df, _ = broadcast_all(df, torch.ones(self._batch_shape))
self.gamma = Gamma(concentration=self.df/2., rate=1/2)
def rsample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
eps = _standard_normal(shape, dtype=self.loc.dtype, device=self.loc.device)
r_inv = self.gamma.rsample(sample_shape=sample_shape)
scale = ((self.df-2) / r_inv).sqrt()
# We want 1 gamma for every `event` only. The size of self.df and this
# `.view` provide that
scale = scale.view(scale.size() + torch.Size([1] * len(self._event_shape)))
return self.loc + scale * _batch_mv(self._unbroadcasted_scale_tril, eps)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
diff = value - self.loc
M = _batch_mahalanobis(self._unbroadcasted_scale_tril, diff)
n_dim = len(self._event_shape)
p = diff.size()[-n_dim:].numel()
if n_dim > 1:
M = M.sum(tuple(range(-n_dim+1, 0)))
log_diag = self._unbroadcasted_scale_tril.diagonal(dim1=-2, dim2=-1).log()
if n_dim > log_diag.dim():
half_log_det = log_diag.sum() * (p / log_diag.numel())
else:
half_log_det = log_diag.sum(tuple(range(-n_dim, 0))) * (
p / log_diag.size()[-n_dim:].numel())
lambda_ = self.df - 2.
lp = torch.lgamma((p+self.df)/2.) \
- ((p/2.) * torch.log(math.pi * lambda_)) \
- torch.lgamma(self.df / 2.) \
- half_log_det \
- ((self.df+p)/2.) * torch.log(1 + M/lambda_)
return lp
| python |
import json
from pathlib import Path
from typing import Tuple
from segmantic.seg import dataset
def dataset_mockup(root_path: Path, size: int = 3) -> Tuple[Path, Path]:
image_dir, labels_dir = root_path / "image", root_path / "label"
image_dir.mkdir()
labels_dir.mkdir()
for idx in range(size):
(image_dir / f"img-{idx}.nii.gz").touch()
(labels_dir / f"img-{idx}.nii.gz").touch()
return image_dir, labels_dir
def test_PairedDataSet(tmp_path: Path):
image_dir, labels_dir = dataset_mockup(root_path=tmp_path, size=3)
ds = dataset.PairedDataSet(
image_dir=image_dir, labels_dir=labels_dir, valid_split=0.2
)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
ds = dataset.PairedDataSet(
image_dir=image_dir, labels_dir=labels_dir, valid_split=0
)
assert len(ds.training_files()) == 3
assert len(ds.validation_files()) == 0
ds.check_matching_filenames()
def test_load_from_json(tmp_path: Path):
image_dir, labels_dir = dataset_mockup(root_path=tmp_path, size=3)
dataset_file = tmp_path / "dataset.json"
dataset_file.write_text(
json.dumps(
{
"training": [
{
"image": f"{image_dir.name}/*.nii.gz",
"label": f"{labels_dir.name}/*.nii.gz",
}
]
}
)
)
ds = dataset.PairedDataSet.load_from_json(dataset_file, valid_split=0.2)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
# now dump and try to re-load
dataset_file2 = tmp_path / "dataset_dump.json"
dataset_file2.write_text(ds.dump_dataset())
ds = dataset.PairedDataSet.load_from_json(dataset_file2, valid_split=0.2)
assert len(ds.training_files()) == 2
assert len(ds.validation_files()) == 1
ds.check_matching_filenames()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Created on Mon Apr 10 17:41:24 2017
# DEPENDENCIES:
import numpy as np
import random
# FUNCTION THAT CREATES GAUSSIAN MULTIVARIATE 2D DATASETS, D = features, N = observations
def create_multivariate_Gauss_2D_dataset(mean, sigma, N_observations):
np.random.seed(444445) # Seeding for consistency and reproducibility seed>100000 prefereably,
MEAN_2D = np.array([mean,mean])
I_2D = np.matrix(np.eye(2)) # Creating m1,aka MEAN1 as an np.array
COV_MATRIX_2D = sigma*I_2D # Could use np.array as well instead of eye, np.array([[1,0,0],[0,1,0],[0,0,1]])
SAMPLE_SET = np.random.multivariate_normal(MEAN_2D,COV_MATRIX_2D , N_observations).T
#print("MEAN_2D:\n", MEAN_2D); print("\nCOV_MATRIX_2D:\n", COV_MATRIX_2D); print("\nI_2D:\n", I_2D) ; print("\nSAMPLE_SET.shape:", SAMPLE_SET.shape)
return(SAMPLE_SET)
#%%
# Calling create_multivariate_Gauss_2D_dataset function with desired parameters:
SAMPLE_SET_220 = (create_multivariate_Gauss_2D_dataset(1,0.5,220))
SAMPLE_SET_280 = (create_multivariate_Gauss_2D_dataset(-1,0.75,280))
# Merge into one unified unlabeled dataset:
DATASET = np.concatenate((SAMPLE_SET_220, SAMPLE_SET_280), axis=1)
#%%
# CODE BLOCK FOR PLOTTING UNIFIED DATASET, NO LABELS:
from matplotlib import pyplot as plt
#from mpl_toolkits.mplot3d import Axes3D
#from mpl_toolkits.mplot3d import proj3d
from matplotlib import style
style.use('bmh')
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
#plt.rcParams['legend.fontsize'] = 7
ax.plot(SAMPLE_SET_220 [0,:], SAMPLE_SET_220 [1,:], '.', markersize=8, color='yellow', alpha=0.567, label='SUBSET 220')
ax.plot(SAMPLE_SET_280 [0,:], SAMPLE_SET_280 [1,:], '.', markersize=8, color='teal', alpha=0.567, label='SUBSET 280')
plt.title('DATA POINTS OF THE TWO SUBSETS')
ax.legend(loc='lower left')
plt.show()
## for the maxiters_counter, upon loop completion do: maxiters_counter -=1
#def K_MEANS(X, k, maxiters):#maxiters_counter = maxiters
# Foolproofing iteration through dataset; for i in x_vectors take sample, observation (D,) array AND NOT feature (N,) array!
#%%
# Temporarily dumped here:
def K_means(DATASET, k, maxiters):
X_vectors = [j for j in DATASET.T] #x_vector.shape = (1,2) ; type(x_vector) = matrix
# Generate a list with k random samples from the DATASET as first centroids:
random_k_centroids_list = [random.choice(X_vectors) for k in range(0,k)]
#for i in range reps:
iter_counter = 0
# Init just once and outside while
centroids_list = random_k_centroids_list
SSSE = 0 # Sum of Sum Standard Errors of k clusters
while iter_counter != maxiters: # or maxiters_counter!=0: #Converge or stop it!
# A list that denotes the label has an obeservation (D,) of the dataset e.g. [0, 0, 1, 2 , 0 ..]
# label is the cluster number, 1,2 etc
y = []
# Initalizing a dict with as many keys as the number of clusters, k
clusters_dict = {}
# Looping through k number of centroids to create k keys of the dictionary:
# each key is a cluster label
for i in range(0,len(centroids_list)):
# Initializing each dictionary key's values, setting it as an empty list
# Key values will be populated with the samples allocated to the cluster
clusters_dict[i] = []
# Looping through observations to calculate distance from centroids & allocate to centroid with minimum distance
for j in X_vectors:
distances = [np.linalg.norm(j - c) for c in centroids_list] # calculating at once distances from all centroids
label = distances.index(min(distances)) # the index of the min distance is the label of the cluster
clusters_dict[label].append(j) # append the observation of this loop, to the values of the dict key with the respective label
y.append(label) # keep a list that holds in which cluster the observations have been allocated;
SSSE+= distances[label] #distortion measure , Bishop 9.1 ?
for i in range(0,k):
print("centroid_"+str(i),": ", (centroids_list)[i].T) # temporary, just for checking the random centroids
centroids_from_mean = [] # initialize a list that will hold the new centroids, as calculated by the mean of all observations that made it in the cluster
for u in range(0,k):
try:
centroids_from_mean.append(sum(clusters_dict[u])/len(clusters_dict[u])) # mean calculation for each key-value pair
except:
centroids_from_mean.append(0*clusters_dict[u][0]) #handling zero div error, if no sample has been allocated to a cluster
print("cluster_"+str(u),": ", len(clusters_dict[u]))
print("cluster_"+str(u),"mean: ", sum(clusters_dict[u])/len(clusters_dict[u]))
#centroids_list = centroids_list
print("\n\ncentroids_from_mean:", centroids_from_mean)
print("\n\ncentroids_list:", centroids_list)
print("len(y)", len(y))
#print(centroids_from_mean)
# Check for convergence or keep them centroids dancing around:
# np.allclose found here: http://stackoverflow.com/questions/10580676/comparing-two-numpy-arrays-for-equality-element-wise
# np.allclse official docum page:
if np.allclose(np.matrix(centroids_list),np.matrix(centroids_from_mean)) == False: # if this was True it would mean that the centroids only slightly change, tolerance = 0.001, very low
centroids_list = centroids_from_mean # assign centroids_from_mean to the centroids_list, for the following iter
iter_counter += 1 # substract 1, like a stopwatch, when counter==0 , break bc enough is enough
print("iteration:" ,iter_counter)
else:
from matplotlib import style
style.use('bmh')
colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
for cluster in clusters_dict:
color = colors[cluster]
for vector in np.asarray(clusters_dict[cluster]):
plt.scatter(vector[0], vector[1], marker="o", color=color, s=2, linewidths=4, alpha=0.876)
for centroid in range(0,len(centroids_from_mean)):
plt.scatter(centroids_from_mean[centroid][0], centroids_from_mean[centroid][1], marker="x", color="black", s=100, linewidths=4)
plt.title("Clustering (K-means) with k = "+str(k)+" and SSSE = "+str(int(SSSE)) )
plt.savefig("clustering_Kmeans_with_k_eq_"+str(k)+"_cristina_"+str(int(SSSE))+".png", dpi=300)
return(SSSE, y, centroids_from_mean, plt.show())
break
#==============================================================================
# #%%
#==============================================================================
# print("\n\ntype(SAMPLE_SET_220)", type(SAMPLE_SET_220))
# print("\n\nSAMPLE_SET_220.shape:", SAMPLE_SET_220.shape)
# print("type(clusters_dict[0])",type(clusters_dict[0]))
# print("\n\ntype(np.asarray(clusters_dict[0]))", type(np.asarray(clusters_dict[0])))
# print("\n\nnp.asarray(clusters_dict[0])", np.asarray(clusters_dict[0]).shape)
#==============================================================================
#==============================================================================
# RUN FOR REPS:
# clusterings = []
# for k in range(1,10):
# clusterings.append(K_means(DATASET,5, 100))
# #
#==============================================================================
#==============================================================================
#clustering_0 = K_means(DATASET,4, 100)
#%%
# CAUTION!! BUILT-INS KICK IN :
#%% elbow plot: Distortion - Number of Clusters
#==============================================================================
# FIND OUT HOW MANY k YOU SHOULD USE FOR THE CLUSTERING, "Elbow Method"
#==============================================================================
#==============================================================================
# from sklearn.cluster import KMeans
# import matplotlib.pyplot as plt
# distortions = [] # Distortion, the Sum of Squared errors within a cluster.
# for i in range(1, 11): # Let's test the performance of clusterings with different k, kE[1,11]
# km = KMeans(n_clusters=i,
# init='k-means++',
# n_init=10,
# max_iter=300,
# random_state=0)
# km.fit(DATASET.T) # sklearn wants the data .T if you have them Features x Observations
# distortions.append(km.inertia_)
# plt.plot(range(1,11), distortions, marker='o', color = "coral")
# plt.xlabel('Number of clusters')
# plt.ylabel('Distortion')
# plt.title("Elbow Curve Method: Choose Optimal Number of Centroids", fontsize = 10) # color = "teal")
#
# plt.show()
#==============================================================================
#==============================================================================
# #%%
# from sklearn.cluster import KMeans
# km = KMeans(n_clusters=3,
# init='k-means++',
# n_init=10,
# max_iter=300,
# tol=1e-04,
# random_state=0)
# y_km = km.fit_predict(DATASET.T)
#
#
#
# import numpy as np
# from matplotlib import cm
# from sklearn.metrics import silhouette_samples
# cluster_labels = np.unique(y_km)
# n_clusters = cluster_labels.shape[0]
# silhouette_vals = silhouette_samples(DATASET.T, y_km, metric='euclidean')
#
# y_ax_lower, y_ax_upper = 0, 0
# yticks = []
#
#
# colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
# for i, c in enumerate(cluster_labels):
# c_silhouette_vals = silhouette_vals[y_km == c]
# c_silhouette_vals.sort()
# y_ax_upper += len(c_silhouette_vals)
# color = colors[i]
#
# plt.barh(range(y_ax_lower, y_ax_upper),
# c_silhouette_vals,
# height=1.0,
# edgecolor='none',
# color=color)
#
# yticks.append((y_ax_lower + y_ax_upper) / 2)
# y_ax_lower += len(c_silhouette_vals)
#
# silhouette_avg = np.mean(silhouette_vals)
# plt.axvline(silhouette_avg, color="red", linestyle="--")
#
# plt.yticks(yticks, cluster_labels + 1)
# plt.ylabel('Cluster')
# plt.xlabel('Silhouette coefficient')
# plt.title("Silhouette coefficient plot for k = 3")
# plt.savefig("silh_coeff_k_eq3"+".png", dpi=300)
# plt.show()
#==============================================================================
#%%
#%%
#==============================================================================
# from sklearn.cluster import KMeans
# km = KMeans(n_clusters=2,
# init='k-means++',
# n_init=10,
# max_iter=300,
# tol=1e-04,
# random_state=0)
# y_km = km.fit_predict(DATASET.T)
#
#==============================================================================
#==============================================================================
#
# import numpy as np
# from matplotlib import cm
# from sklearn.metrics import silhouette_samples
# cluster_labels = np.unique(y_km)
# n_clusters = cluster_labels.shape[0]
# silhouette_vals = silhouette_samples(DATASET.T, y_km, metric='euclidean')
#
# y_ax_lower, y_ax_upper = 0, 0
# yticks = []
#
#
# colors = [ "teal","coral", "yellow", "#37BC61", "pink","#CC99CC","teal", 'coral']
# for i, c in enumerate(cluster_labels):
# c_silhouette_vals = silhouette_vals[y_km == c]
# c_silhouette_vals.sort()
# y_ax_upper += len(c_silhouette_vals)
# color = colors[i]
#
# plt.barh(range(y_ax_lower, y_ax_upper),
# c_silhouette_vals,
# height=1.0,
# edgecolor='none',
# color=color)
#
# yticks.append((y_ax_lower + y_ax_upper) / 2)
# y_ax_lower += len(c_silhouette_vals)
#
# silhouette_avg = np.mean(silhouette_vals)
# plt.axvline(silhouette_avg, color="red", linestyle="--")
#
# plt.yticks(yticks, cluster_labels + 1)
# plt.ylabel('Cluster')
# plt.xlabel('Silhouette coefficient')
# plt.title("Silhouette coefficient plot for k = 2")
# plt.savefig("silh_coeff_k_eq2"+".png", dpi=300)
# plt.show()
#
#==============================================================================
| python |
# Generated by Django 3.1.7 on 2021-03-17 12:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backoffice', '0027_auto_20210317_1314'),
]
operations = [
migrations.AlterField(
model_name='partitionformulla',
name='input',
field=models.IntegerField(default=1, verbose_name='Quantité en entrée'),
),
migrations.AlterField(
model_name='partitionformulla',
name='input_unit',
field=models.CharField(default='', max_length=100, verbose_name='Unité de mesure en entrée'),
),
]
| python |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from users.models import UserProfile
# Create your models here.
class Tab(models.Model):
name = models.CharField(max_length=50, verbose_name='标签名称')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
def getNodes(self):
return Node.objects.filter(tab=self)
def __unicode__(self):
return self.name
class Node(models.Model):
name = models.CharField(max_length=50, verbose_name='节点名称', unique=True)
tab = models.ForeignKey(Tab, verbose_name='所属标签', null=True)
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
desc = models.CharField(default='', max_length=200, verbose_name='描述')
image = models.ImageField(max_length=200, upload_to='image/%Y/%m', null=True, default='image/default/node.png',
verbose_name='节点图片')
class Meta:
verbose_name = '论坛节点'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class Topic(models.Model):
title = models.CharField(max_length=100, verbose_name='标题')
content = models.TextField(verbose_name='内容')
node = models.ForeignKey(Node, verbose_name='节点', null=True)
created_by = models.ForeignKey(UserProfile, verbose_name='创建者')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
modify_time = models.DateTimeField(verbose_name='修改时间', blank=True, null=True)
click_nums = models.IntegerField(default=0, verbose_name='点击数')
# last_reply_user = models.CharField(max_length=50, verbose_name='最新回复用户名', null=True, default='')
class Meta:
verbose_name = '主题'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.title
class Reply(models.Model):
content = models.TextField(verbose_name='内容')
created_by = models.ForeignKey(UserProfile, verbose_name='创建者')
add_time = models.DateTimeField(default=datetime.now, verbose_name='添加时间')
modify_time = models.DateTimeField(verbose_name='修改时间', blank=True, null=True)
topic = models.ForeignKey(Topic, verbose_name='所属主题')
seq_num = models.IntegerField(verbose_name='序号')
class Meta:
verbose_name = '主题回复'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.topic.title[:20] + str(self.seq_num) + 'L 回复'
| python |
import unittest
import requests
from pyalt.api.objects import AltObject
class TestAPIObjects(unittest.TestCase):
def setUp(self):
url_fmt = "https://online-shkola.com.ua/api/v2/users/1269/thematic/subject/{}"
self.responses = {
requests.get(url_fmt.format(n))
for n in (3, 4, 6)
}
def _verify(self, src, dest):
if isinstance(src, list):
for src_item, dest_item in zip(src, dest):
self._verify(src_item, dest_item)
return
if isinstance(src, dict):
for key, src_value in src.items():
dest_value = getattr(dest, key)
self._verify(src_value, dest_value)
return
self.assertEqual(src, dest)
def test__from_json(self):
for response in self.responses:
self._verify(
response.json(),
AltObject.from_json(response.content)
)
def test__from_request(self):
for response in self.responses:
self._verify(
response.json(),
AltObject.from_response(response),
)
| python |
import logging
import re
from collections import OrderedDict
from io import StringIO
import numpy as np
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register
from .._mesh import CellBlock, Mesh
float_pattern = r"[+-]?(?:\d+\.?\d*|\d*\.?\d+)"
float_re = re.compile(float_pattern)
point_pattern = r"{0}\s+{0}\s+{0}(?:\s+{0})?".format(float_pattern)
point_re = re.compile(point_pattern)
triangle_pattern = r"\(\s*\(\s*({})\s*\)\s*\)".format(
r"\s*,\s*".join(point_pattern for _ in range(4))
)
triangle_re = re.compile(triangle_pattern)
tin_pattern = fr"TIN\s*\((?:\s*{triangle_pattern}\s*,?)*\s*\)"
tin_re = re.compile(tin_pattern)
def read_str(s):
s = s.strip()
tin_match = tin_re.match(s)
if tin_match is None:
raise ReadError("Invalid WKT TIN")
point_idxs = OrderedDict()
tri_idxs = []
for tri_match in triangle_re.finditer(tin_match.group()):
tri_point_idxs = []
for point_match in point_re.finditer(tri_match.group()):
point = []
for float_match in float_re.finditer(point_match.group()):
point.append(float(float_match.group()))
point = tuple(point)
if point not in point_idxs:
point_idxs[point] = len(point_idxs)
tri_point_idxs.append(point_idxs[point])
if tri_point_idxs[-1] != tri_point_idxs[0]:
raise ValueError("Triangle is not a closed linestring")
tri_idxs.append(tri_point_idxs[:-1])
try:
point_arr = np.array(list(point_idxs), np.float64)
except ValueError as e:
if len({len(p) for p in point_idxs}) > 1:
raise ReadError("Points have mixed dimensionality")
else:
raise e
tri_arr = np.array(tri_idxs, np.uint64)
return Mesh(point_arr, [CellBlock("triangle", tri_arr)])
def arr_to_str(arr):
return " ".join(str(item) for item in arr)
def read(filename):
with open_file(filename) as f:
return read_str(f.read())
def write(filename, mesh):
with open_file(filename, "w") as f:
write_buffer(f, mesh)
def write_buffer(f, mesh):
skip = [c for c in mesh.cells if c.type != "triangle"]
if skip:
logging.warning('WTK only supports triangle cells. Skipping {", ".join(skip)}.')
triangles = mesh.get_cells_type("triangle")
f.write("TIN (")
joiner = ""
for tri_points in mesh.points[triangles]:
f.write(
"{0}(({1}, {2}, {3}, {1}))".format(
joiner, *(arr_to_str(p) for p in tri_points)
)
)
joiner = ", "
f.write(")")
def write_str(mesh):
buf = StringIO()
write_buffer(buf, mesh)
buf.seek(0)
return buf.read()
register("wkt", [".wkt"], read, {"wkt": write})
| python |
# -*- coding:utf-8 -*-
# author:Anson
from __future__ import unicode_literals
import os
import sys
import re
from datetime import date, datetime, timedelta
from docx import Document
import xlwt
from settings import MD_PATH, SITE_1, SITE_2, CELL
reload(sys)
sys.setdefaultencoding('utf-8')
def get_file_path(path, week_of, table1, table2, first_date, today, worksheet, site_1, site_2,
first_date_of, today_of):
style = xlwt.XFStyle()
bl = xlwt.Borders()
bl.left = xlwt.Borders.THIN
bl.right = xlwt.Borders.THIN
bl.top = xlwt.Borders.THIN
bl.bottom = xlwt.Borders.THIN
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
style.alignment = al
style.borders = bl
nums = 0
file_date = date.today().strftime('%Y-%m')
for filename in os.listdir(path):
file_path = os.path.join(path, filename)
group_name = re.findall(r'.*2019-08-(.*)..*', filename)[0][0:-2]
fd = filename[:7]
md = file_path[-2:]
if md == 'md':
if fd == file_date:
with open(file_path) as f:
lines = f.readlines()
lines = [i.strip('-').strip() for i in lines]
if len(lines) == 0:
first_index = 0
else:
for key, value in enumerate(lines):
if value == week_of:
first_index = key
else:
first_index = 0
k = 0
line_list = []
index = 0
while k < len(lines):
if lines[k] == week_of:
index += 1
first_index = k
line_list.append(lines[k])
else:
if k > first_index:
if lines[k][:1] == '#':
break
else:
line_list.append(lines[k])
k += 1
line = [i.strip('#').strip() for i in line_list]
d = 0
trade_today = False
yearst_today = False
s1 = ''
s2 = ''
sor_index = 0
while d < len(line):
if line[d].strip()[:1] == '*':
if sor_index != 0:
worksheet.write(site_1, 2, s1, style)
worksheet.write(site_2, 2, s2, style)
s1 = ''
s2 = ''
yearst_today = False
nums += 1
site_1 += 1
site_2 += 1
name = line[d].strip('*').strip()
worksheet.write(site_1, 1, str(nums), style)
worksheet.write(site_1, 3, first_date, style)
worksheet.write(site_1, 4, today, style)
worksheet.write(site_1, 5, name, style)
worksheet.write(site_2, 1, str(nums), style)
worksheet.write(site_2, 3, first_date_of, style)
worksheet.write(site_2, 4, today_of, style)
worksheet.write(site_2, 5, name, style)
table1.rows[nums].cells[0].add_paragraph(str(nums))
table1.rows[nums].cells[2].add_paragraph(first_date)
table1.rows[nums].cells[3].add_paragraph(today)
table1.rows[nums].cells[4].add_paragraph(name)
table1.rows[nums].cells[5].add_paragraph(group_name)
table2.rows[nums].cells[0].add_paragraph(str(nums))
table2.rows[nums].cells[2].add_paragraph(first_date_of)
table2.rows[nums].cells[3].add_paragraph(today_of)
table2.rows[nums].cells[4].add_paragraph(name)
table2.rows[nums].cells[5].add_paragraph(group_name)
d += 1
sor_index += 1
if line[d] == '本周工作':
trade_today = True
d += 1
if (line[d].strip()[1:2] == '.' or line[d].strip()[1:2] == ')') and trade_today:
# 本周工作内容
table1.rows[nums].cells[1].add_paragraph(line[d])
s1 = s1 + ' ' + line[d]
if line[d] == '下周工作' or line[d] == '下周计划':
trade_today = False
yearst_today = True
d += 1
if (line[d].strip()[1:2] == '.' or line[d].strip()[1:2] == ')') and yearst_today:
# 下周工作内容
table2.rows[nums].cells[1].add_paragraph(line[d])
s2 = s2 + ' ' + line[d]
d += 1
worksheet.write(site_1, 2, s1, style)
worksheet.write(site_2, 2, s2, style)
def get_week_of_month(year, month, day):
"""
获取指定的某天是某个月中的第几周
周一作为一周的开始
"""
end = int(datetime(year, month, day).strftime("%W"))
begin = int(datetime(year, month, 1).strftime("%W"))
star_date = end - begin + 1
if star_date == 1:
week_of = '# 第一周'
elif star_date == 2:
week_of = '# 第二周'
elif star_date == 3:
week_of = '# 第三周'
elif star_date == 4:
week_of = '# 第四周'
elif star_date == 5:
week_of = '# 第五周'
else:
week_of = '# 第六周'
return week_of
def create_table_one_cell(document, content):
"""创建单行列表"""
create_table = document.add_table(rows=1, cols=1, style='Table Grid')
create_table.rows[0].cells[0].add_paragraph(content)
def create_table_more_cell(document, rows, cols, contents):
"""创建多行多列的列表"""
create_table = document.add_table(rows=rows, cols=cols, style='Table Grid')
index = 0
for content in contents:
for key, value in enumerate(content):
create_table.rows[index].cells[key].add_paragraph(value)
index += 1
def create_fixed_cell(document, first_date, end_date):
"""表前半部分固定内容"""
create_table_one_cell(document, '项目基本情况')
create_table_more_cell(document, 2, 2, [['项目名称', '厦开项目组'], ['客户名称', '中国建设银行厦门开发中心']])
create_table_more_cell(document, 3, 6, [['客户负责人', '李晓敦', '电话', '', 'Email', ''],
['(必填)', '闫立志', '电话', '', 'Email', ''],
['', '', '电话', '', 'Email', '']])
create_table_more_cell(document, 4, 2, [['开始日期', first_date], ['项目经理', '赖志勇'],
['项目组成员', '柳惠阳、许华语、郭健超、何卧岩、郑炜、黄惠章、朱俊龙、李稳定、'
'黄建鸣、陈浩1、叶晟君、张叶桃、陈晓衍、曾国荣、肖凯、刘安森、'
'林秋霞、姜渊、肖金平、周丽荣、钟晓杰、黄祯鸿、李志阳、刘程川、'
'张俊钦、邓松进、林丹丹、姜琪、钟高镇、方若琳、、谢源鑫、罗庭颖、'
'魏治邦、白艺伟、付敏、肖金龙、颜炳煜、庄华琼、董凯华、黄忠强、'
'徐鸿能、江养根、何龙伙、肖丽琴、罗万春、曾林华、、张一浓、郭吉、、'
'吴招辉、林泉、、苏雪梅、张祖琦、、陈浩'],
['项目描述', '']])
create_table_one_cell(document, '计划关键时间点(必填)')
create_table_more_cell(document, 6, 4, [['关键时间点', '预计完成时间', '关键时间点', '预计完成时间'],
['1、需求分析', '', '6、技术测试(单元测试)', ''],
['2、技术方案(项目实施方案)', '', '7、业务测试(集成测试)', ''],
['3、概要设计', '', '8、上线时间', ''],
['4、详细设计', '', '9、后期维护', ''],
['5、编码', '', '10、结项', '']])
create_table_one_cell(document, '实际关键时间点(必填)')
create_table_more_cell(document, 6, 4, [['关键时间点', '实际完成时间', '关键时间点', '实际完成时间'],
['1、需求分析', '', '6、技术测试(单元测试)', ''],
['2、技术方案(项目实施方案)', '', '7、业务测试(集成测试)', ''],
['3、概要设计', '', '8、上线时间', ''],
['4、详细设计', '', '9、后期维护', ''],
['5、编码', '', '10、结项', '']])
create_table_one_cell(document, '人力资源状况(包括人员的入职、离职;入场、离场、休假、请假等情况).'
'时间以到达、离开现场为准')
create_table_one_cell(document, '预计新增资源(必填)')
create_table_more_cell(document, 4, 6, [['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', ''],
['姓名', '', '预计到场时间', '', '任务描述', '']])
create_table_one_cell(document, '预计撤离资源(必填)')
create_table_more_cell(document, 3, 6, [['姓名', '', '预计离场时间', '', '撤离原因', ''],
['姓名', '', '预计离场时间', '', '撤离原因', ''],
['姓名', '', '预计离场时间', '', '撤离原因', '']])
create_table_one_cell(document, '本周人员变动情况(必填)')
create_table_more_cell(document, 5, 4, [['序号', '到场人员姓名', '到场时间', '备注'],
['1', '', '', ''], ['2', '', '', ''],
['3', '', '', ''], ['4', '', '', '']])
create_table_more_cell(document, 5, 4, [['序号', '离场人员姓名', '离场时间', '备注'],
['1', '', '', ''], ['2', '', '', ''],
['3', '', '', ''], ['4', '', '', '']])
create_table_one_cell(document, '本周项目情况')
create_table_one_cell(document, '项目所处阶段(必填)')
create_table_more_cell(document, 2, 5, [['1、需求分析', '2、概要设计', '3、详细设计', '4、编码', '5、技术测试'],
['6、业务测试', '7、试运行 ', '8、部分上线', '9、整体完工', '10、后期维护']])
create_table_one_cell(document, '项目经理自评(必填)')
create_table_more_cell(document, 5, 2, [['是否完成以下事项', '未完成的理由及说明'],
['是否组织周例会会议纪要? 【□是 □否】', ''],
['本周工作是否按计划完成?【□是 □否】', ''],
['是否跟客户项目负责人汇报本周工作?【□是 □否】', ''],
['下周计划安排是否与项目成员落实?【□是 □否】 ', '']])
create_table_one_cell(document, '需求变更情况(必填)')
create_table_more_cell(document, 3, 2, [['需求变更描述', '对后续的影响'], ['无', ''], ['', '']])
create_table_one_cell(document, '方案变更情况(必填)')
create_table_more_cell(document, 3, 2, [['方案变更描述', '对后续的影响'], ['', ''], ['', '']])
create_table_one_cell(document, '项目计划变更情况(必填)')
create_table_more_cell(document, 3, 2, [['项目计划变更描述', '对后续的影响'], ['', ''], ['', '']])
create_table_one_cell(document, '本周未完成的任务情况(必填)')
create_table_more_cell(document, 4, 3, [['未完成的任务描述', '任务未完成的原因', '对后续的影响'],
['', '', ''], ['', '', ''], ['', '', '']])
create_table_one_cell(document, '存在的问题及解决方案(必填)')
create_table_more_cell(document, 5, 4, [['问题描述及原因分析', '解决方案', '预计完成日期', '负责人'],
['', '', '', ''], ['', '', '', ''], ['', '', '', ''],
['', '', '', '']])
create_table_one_cell(document, '说明:如需求、技术方案有变化,请将信的需求文档、技术方案文档与周报一起,提交给公司归档')
create_table_one_cell(document, '项目进展和计划')
create_table_one_cell(document, '一、本周工作完成情况( {0}日至 {1}) (以下必填)'.format(first_date, end_date))
create_table_more_cell(document, 12, 4, [['编号', '本周重要里程碑事件', '完成日期', '完成标志'],
['1', '', '', ''], ['2', '', '', ''], ['', '', '', ''],
['编号', '上周计划的工作内容,但本周已完成', '完成日期', '负责人'],
['1', '', '', ''], ['2', '', '', ''], ['3', '', '', ''],
['4', '', '', ''], ['5', '', '', ''], ['6', '', '', ''],
['7', '', '', '']])
def create_fixed_cell_tow(document):
"""表后半部分固定内容"""
create_table_one_cell(document, '项目组下周预计借支情况')
create_table_more_cell(document, 5, 3, [['借支内容摘要', '金额', '备注'], ['', '', ''], ['', '', ''],
['合计', '', '']])
create_table_one_cell(document, '已提交给客户的阶段性文档和代码(必填)')
create_table_more_cell(document, 4, 4, [['资料名称', '提交时间', '接收人', '备注']])
create_table_one_cell(document, '已提交给公司的阶段性文档和代码(必填)')
create_table_more_cell(document, 4, 4, [['资料名称', '提交时间', '接收人', '备注']])
create_table_one_cell(document, '负责人对此项目本周工作的反馈意见')
create_table_more_cell(document, 3, 2, [['对项目进展评价', ''],
['对“项目情况”中,变更情况及存在问题的评述', ''],
['后续项目实施建议', '']])
def to_excel(worksheet, first_date, end_date):
style = xlwt.XFStyle()
title_str = '新一代核心系统建设项目周报\n' \
'\n' \
'(周期:{0}至{1})'.format(first_date, end_date)
bl = xlwt.Borders()
bl.left = xlwt.Borders.THIN
bl.right = xlwt.Borders.THIN
bl.top = xlwt.Borders.THIN
bl.bottom = xlwt.Borders.THIN
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
al.wrap = 1 # 自动换行
style.alignment = al
style.borders = bl
worksheet.write_merge(0, 3, 0, 9, title_str, style)
worksheet.write_merge(SITE_1, SITE_2-1, 0, 0, '一.本周计划进展情况', style)
worksheet.write(SITE_1, 1, '序号', style)
worksheet.write(SITE_1, 2, '工作事项名称', style)
worksheet.write(SITE_1, 3, '开始时间', style)
worksheet.write(SITE_1, 4, '完成时间', style)
worksheet.write(SITE_1, 5, '责任人', style)
worksheet.write(SITE_1, 6, '计划%', style)
worksheet.write(SITE_1, 7, '实际%', style)
worksheet.write(SITE_1, 8, '偏差%', style)
worksheet.write(SITE_1, 9, '进展说明', style)
worksheet.write_merge(SITE_2, SITE_2+31, 0, 0, '二.下周工作计划', style)
worksheet.write(SITE_2, 1, '序号', style)
worksheet.write(SITE_2, 2, '工作事项名称', style)
worksheet.write(SITE_2, 3, '开始时间', style)
worksheet.write(SITE_2, 4, '完成时间', style)
worksheet.write(SITE_2, 5, '责任人', style)
worksheet.write_merge(SITE_2, SITE_2, 6, 8, '计划输出结果', style)
worksheet.write(SITE_2, 9, '说明', style)
worksheet.write_merge(SITE_2+32, SITE_2+41, 0, 0, '三.目前存在的问题以及需要协调解决的事项', style)
worksheet.write(SITE_2+32, 1, '序号', style)
worksheet.write(SITE_2+32, 2, '问题名称', style)
worksheet.write_merge(SITE_2+32, SITE_2+32, 3, 4, '问题描述', style)
worksheet.write(SITE_2+32, 5, '提出日期', style)
worksheet.write(SITE_2+32, 6, '提出人团体', style)
worksheet.write(SITE_2+32, 7, '解决责任团队', style)
worksheet.write(SITE_2+32, 8, '预期解决时间', style)
worksheet.write(SITE_2+32, 9, '解决建议方案和计划', style)
worksheet.write_merge(SITE_2+42, SITE_2+47, 0, 0, '四.本周质量管理方面的工作总结', style)
worksheet.write(SITE_2+42, 1, '序号', style)
worksheet.write_merge(SITE_2+42, SITE_2+42, 2, 9, '进展说明', style)
worksheet.write_merge(SITE_2+48, SITE_2+53, 0, 0, '五.本周配置管理方面的工作总结', style)
worksheet.write(SITE_2+48, 1, '序号', style)
worksheet.write_merge(SITE_2+48, SITE_2+48, 2, 9, '进展说明', style)
def main():
site_1 = SITE_1
site_2 = SITE_2
time_now = date.today()
# time_now = date(2019, 7, 26)
today = time_now.strftime("%Y-%m-%d")
first_date = (time_now + timedelta(days=-4)).strftime("%Y-%m-%d")
end_date = (time_now + timedelta(days=2)).strftime("%Y-%m-%d")
first_date_of = (time_now + timedelta(days=3)).strftime("%Y-%m-%d")
end_date_of = (time_now + timedelta(days=7)).strftime("%Y-%m-%d")
# 生成excel表格
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('周报', cell_overwrite_ok=True)
to_excel(worksheet, first_date, end_date)
# 获取第几周
week = get_week_of_month(time_now.year, time_now.month, time_now.day)
# week = get_week_of_month(2019, 8, 2)
document = Document()
document.add_heading('项目周报({0})'.format(week.strip('#').strip()), level=1)
document.add_paragraph('填表人:廖虹媛 报告周期:{date1}到{date2} 填表日期:{date3}'.format(
date1=first_date, date2=end_date, date3=today))
# # 创建固定列表函数
# create_fixed_cell(document, first_date, end_date)
# 本周工作内容表格
table1 = document.add_table(rows=CELL, cols=6, style='Table Grid')
table1.rows[0].cells[0].add_paragraph('编号')
table1.rows[0].cells[1].add_paragraph('本周工作内容')
table1.rows[0].cells[2].add_paragraph('计划完成时间')
table1.rows[0].cells[3].add_paragraph('实际完成时间')
table1.rows[0].cells[4].add_paragraph('负责人')
table1.rows[0].cells[5].add_paragraph('项目组')
# 下周工作内容表格
create_table_one_cell(document, '项目进展和计划')
create_table_one_cell(document, '一、下周工作完成情况( {0}至 {1}) (以下必填)'.format(first_date, end_date))
table2 = document.add_table(rows=CELL, cols=6, style='Table Grid')
table2.rows[0].cells[0].add_paragraph('编号')
table2.rows[0].cells[1].add_paragraph('下周工作内容')
table2.rows[0].cells[2].add_paragraph('计划完成时间')
table2.rows[0].cells[3].add_paragraph('实际完成时间')
table2.rows[0].cells[4].add_paragraph('负责人')
table2.rows[0].cells[5].add_paragraph('项目组')
# 主要内容写入
get_file_path(MD_PATH, week, table1, table2, first_date, today, worksheet,
site_1, site_2, first_date_of, end_date_of)
# # 后半部函数
# create_fixed_cell_tow(document)
save_name = '厦开项目组周报{0}至{1}.docx'.format(first_date, end_date)
document.save(save_name)
excel_name = '新一代核心系统建设项目周报{0}_天用厦开安全项目组.xls'.format(end_date)
workbook.save(excel_name)
if __name__ == '__main__':
main()
| python |
##############################################################################
# Copyright 2016 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from pyxcli.response import XCLIResponse
from pyxcli.helpers.xml_util import ElementNotFoundException
from pyxcli.helpers import xml_util as etree
class XCLIError(Exception):
"""Base class of all XCLI-related errors"""
pass
class BaseScsiException(Exception):
pass
class CommandExecutionError(XCLIError):
"""
Base class of all XCLI command execution errors: invalid command,
parameters, operation failed, etc. This is the "stable API" for
catching XCLI exceptions - there are subclasses for specific errors,
but these should be considered unstable and may change over time
"""
KNOWN_CODES = {}
KNOWN_LEVELS = {}
def __init__(self, code, status, xml, return_value=None):
XCLIError.__init__(self, code, status, xml)
self.code = code
self.status = status
self.xml = xml
if return_value is not None:
self.return_value = return_value
else:
self.return_value = XCLIResponse(xml)
def __str__(self):
return self.status
@classmethod
def instantiate(cls, rootelem, cmdroot, encoding):
try:
# "code/@value"
code = etree.xml_find(cmdroot, "code", "value")
# "status/@value"
level = etree.xml_find(cmdroot, "status", "value")
# "status_str/@value"
status = etree.xml_find(cmdroot, "status_str", "value")
except ElementNotFoundException:
code = None
level = None
status = "Unknown reason"
xcli_response = XCLIResponse.instantiate(cmdroot, encoding)
if code in cls.KNOWN_CODES:
concrete = cls.KNOWN_CODES[code]
elif level in cls.KNOWN_LEVELS:
concrete = cls.KNOWN_LEVELS[level]
else:
concrete = CommandFailedUnknownReason
return concrete(code, status, cmdroot, xcli_response)
@classmethod
def register(cls, *codes):
def deco(concrete):
for code in codes:
cls.KNOWN_CODES[code] = concrete
return concrete
return deco
@classmethod
def register_level(cls, *codes):
def deco(concrete):
for code in codes:
cls.KNOWN_LEVELS[code] = concrete
return concrete
return deco
class CommandFailedUnknownReason(CommandExecutionError):
pass
##############################################################################
# Concrete Error Levels
##############################################################################
@CommandExecutionError.register_level("1")
class CommandFailedConnectionError(CommandExecutionError):
pass
@CommandExecutionError.register_level("2")
class CommandFailedSyntaxError(CommandExecutionError):
pass
@CommandExecutionError.register_level("3")
class CommandFailedRuntimeError(CommandExecutionError):
pass
@CommandExecutionError.register_level("4")
class CommandFailedPassiveManager(CommandExecutionError):
pass
@CommandExecutionError.register_level("5")
class CommandFailedInternalError(CommandExecutionError):
pass
##############################################################################
# Concrete Error Codes
##############################################################################
@CommandExecutionError.register("MCL_TIMEOUT")
class MCLTimeoutError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PARTIAL_SUCCESS")
class PartialSuccessError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("TRNS_ERROR_WITH_EXTENDED_INFO")
class OperationFailedWithExtendedInfoError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BAD_NAME")
class VolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_BAD_NAME")
class SourceVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("TARGET_VOLUME_BAD_NAME")
class TargetVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BASE_VOLUME_BAD_NAME")
class BaseVolumeBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BASE_VOLUME_INVALID")
class BaseVolumeInvalidError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_EXISTS")
class VolumeExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_MAPPED")
class VolumeIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_SIZE_ABOVE_LIMIT")
class VolumeSizeAboveLimitError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_NO_MIRROR")
class VolumeHasNoMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_DATA_MIGRATION")
class VolumeHasDataMigrationError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BELONGS_TO_MIRRORED_CONS_GROUP")
class VolumeIsPartOfMirroredCgError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ALU_BAD_NAME")
class ALUBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_BAD_NAME")
class CgBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NO_MIRROR")
class CgHasNoMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_NOT_SYNCHRONIZED")
class MirrorNotSynchronizedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_ASYNC")
class MirrorIsAsyncError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_INITIAL")
class MirrorInitializingError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_ACTIVE")
class MirrorActiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SYNC_ALREADY_INACTIVE")
class SyncAlreadyInactiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SYNC_ALREADY_ACTIVE")
class SyncAlreadyActiveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MIRROR_IS_NON_OPERATIONAL")
class MirrorNonOperationalError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("REMOTE_TARGET_NOT_CONNECTED")
class RemoteTargetNotConnectedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LOCAL_PEER_IS_NOT_MASTER")
class LocalIsNotMasterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SPACE")
class PoolOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_HARD_SPACE")
class PoolOutOfHardSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SNAPSHOT_SPACE")
class PoolOutOfSnapshotSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NO_SPACE")
class SystemOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("NOT_ENOUGH_SPACE_ON_REMOTE_MACHINE")
class RemotePoolOutOfSpaceError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_SNAPSHOT")
class OperationNotPermittedOnSnapshotError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("BAD_PARAMS")
class BadParameterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_NAME_EXISTS")
class HostNameAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_PORT_EXISTS")
class HostWithPortIdAlreadyDefined(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("POOL_DOES_NOT_EXIST")
class PoolDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("POOL_SNAPSHOT_LIMIT_REACHED")
class PoolSnapshotLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("REMOTE_VOLUME_IS_MASTER")
class RemoteVolumeIsMasterError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONF_PATH_DOES_NOT_EXIST")
class PathDoesNotExistInConfigurationError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ILLEGAL_VALUE")
class IllegalValueForArgumentError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("ILLEGAL_NAME")
class IllegalNameForObjectError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("COMPONENT_TYPE_MUST_HAVE_COMPONENT_ID")
class ComponentTypeMustHaveComponentIDError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_PROFILE_UPDATE_TOO_FREQUENT")
class HostProfileUpdateTooFrequentError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("HOST_BAD_NAME")
class HostBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CLUSTER_BAD_NAME")
class ClusterBadNameError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_HOST_PROFILES_REACHED")
class MaxHostProfilesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SSD_CACHING_NOT_ENABLED")
class SSDCachingNotEnabledError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("UNRECOGNIZED_EVENT_CODE")
class UnrecognizedEventCodeError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("UNRECOGNIZED_COMMAND")
class UnrecognizedCommandError(CommandFailedSyntaxError):
pass
@CommandExecutionError.register("CAN_NOT_SHRINK_VOLUME")
class VolumeSizeCannotBeDecreased(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("OBJECT_BAD_NAME")
class ReferencedObjectDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("OPERATION_FORBIDDEN_FOR_USER_CATEGORY")
class OperationForbiddenForUserCategoryError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("ACCESS_DENIED")
class AccessDeniedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMMAND_NOT_SUPPORTED_FOR_OLVM_VOLUMES")
class CommandNotSupportedForOLVMVolumes(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_LOCKED")
class VolumeLocked(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_OLVM")
class VolumeHasOlvm(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_MIRROR")
class VolumeHasMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_BELONGS_TO_CG")
class VolumeBelongsToCGError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_MAX_DB_REACHED")
class MetadataServiceMaxDBReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_DB_DOES_NOT_EXIST")
class MetadataServiceDBDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_DB_ALREADY_EXISTS")
class MetadataServiceDBAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_KEY_DOES_NOT_EXIST")
class MetadataServiceKeyDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_KEY_ALREADY_EXISTS")
class MetadataServiceKeyAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_MAX_ENTRIES_REACHED")
class MetadataServiceMaxEntriesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("STATUS_METADATA_SERVICE_INVALID_TOKEN")
class MetadataServiceInvalidTokenError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LDAP_AUTHENTICATION_IS_NOT_ACTIVE")
class LDAPAuthenticationIsNotActive(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("LDAP_IS_NOT_FULLY_CONFIGURED")
class LDAPIsNotFullyConfigured(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_INCOMPATIBLE_SIZE")
class VolumeIncompatibleSizeError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSION_DISABLED")
class CompressionDisabledError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSION_REQUIRES_THIN_PROVISIONED_POOL")
class CompressionRequiresThinPoolError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUMES_LIMIT_REACHED")
class CompressedVolumesLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_CAPACITY_LIMIT_REACHED")
class CompressedCapacityLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_TOO_BIG")
class CompressedVolumeTooBigError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_TOO_SMALL")
class CompressedVolumeTooSmallError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_COMPRESSED_TARGET_UNCOMPRESSED")
class SourceVolumeCompressedTargetUncompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SOURCE_VOLUME_UNCOMPRESSED_TARGET_COMPRESSED")
class SourceVolumeUncompressedTargetCompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CANNOT_SHRINK_COMPRESSED_VOLUME")
class CannotShrinkCompressedVolumeError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_HAS_TRANSFORM")
class VolumeHasTransformError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_IS_COMPRESSED")
class VolumeIsCompressedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("COMPRESSED_VOLUME_IS_MAPPED")
class CompressedVolumeIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CAN_NOT_MAP_SLAVE_COMPRESSED_VOLUME")
class CannotMapSlaveError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NAME_EXISTS")
class CgNameExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_DOES_NOT_EXIST")
class CgDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_CONS_GROUPS_REACHED")
class CgLimitReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_HAS_MIRROR")
class CgHasMirrorError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_NOT_EMPTY")
class CgNotEmptyError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_EMPTY")
class CgEmptyError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MISMATCH")
class CgMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MIRROR_PARAMS_MISMATCH")
class CgMirrorParamsMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("CONS_GROUP_MIRRORING_NOT_SUPPORTED_IN_TARGET")
class CgMirroringNotSupportedOnTargetError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_BAD_NAME")
class SnapshotGroupDoesNotExistError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_IS_MAPPED")
class SnapshotIsMappedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_HAS_ACTIVE_SYNC_JOB")
class SnapshotIsSynchronisingError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("MAX_VOLUMES_REACHED")
class MaxVolumesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("DOMAIN_MAX_VOLUMES_REACHED")
class DomainMaxVolumesReachedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_BAD_PREFIX")
class SnapshotGroupIsReservedError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("SNAPSHOT_GROUP_NAME_EXISTS")
class SnapshotGroupAlreadyExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register(
"OVERWRITE_SNAPSHOT_GROUP_DOES_NOT_BELONG_TO_GIVEN_GROUP")
class SnapshotGroupMismatchError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_NOT_CONNECTED_TO_ANY_PERF_CLASS")
class VolumeNotConnectedToPerfClassError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_BAD_NAME")
class PerfClassNotExistsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("VOLUME_ALREADY_IN_PERF_CLASS")
class VolumeAlreadyInPerfClassError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_HOSTS")
class PerfClassAssociatedWithHostError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_POOLS_OR_DOMAINS")
class PerfClassAssociatedWithPoolsOrDomainsError(CommandFailedRuntimeError):
pass
@CommandExecutionError.register("PERF_CLASS_ASSOCIATED_WITH_VOLUMES")
class PerfClassAssociatedWithVolumesError(CommandFailedRuntimeError):
pass
##############################################################################
# CredentialsError
# we explicitly want to differentiate CredentialsError from
# CommandExecutionError, so although it is raised by _build_response,
# it derives from XCLIError directly
##############################################################################
@CommandExecutionError.register("LOGIN_FAILURE_USER_FAILED_TO_LOGIN",
"USER_NAME_DOES_NOT_EXIST",
"DEFAULT_USER_IS_NOT_DEFINED",
"INCORRECT_PASSWORD",
"LOGIN_FAILURE_USER_NOT_FOUND_IN_LDAP_SERVERS",
"LOGIN_FAILURE_USER_NOT_AUTHENTICATED_BY_ \
LDAP_SERVER")
class CredentialsError(XCLIError):
"""Raises when an XCLI command fails due to invalid credentials.
Inherits directly from XCLIError, not CommandExecutionError,
although it is raised during the execution of a command
to explicitly differentiate the two
"""
def __init__(self, code, status, xml, return_value=None):
XCLIError.__init__(self, code, status, xml)
self.code = code
self.status = status
self.xml = xml
if return_value is not None:
self.return_value = return_value
else:
self.return_value = XCLIResponse(xml)
def __str__(self):
ret_str = ""
if isinstance(self.xml, str):
ret_str = "%s\n\n%s" % (self.status, self.xml)
else:
ret_str = "%s\n\n%s" % (etree.tostring(self.xml))
return ret_str
##############################################################################
# AServer ("delivery") errors
##############################################################################
class CommandFailedAServerError(CommandExecutionError):
"""AServer related errors"""
REMOTE_TARGET_ERRORS = frozenset(["TARGET_IS_NOT_CONNECTED",
"TARGET_DOES_NOT_EXIST",
"SEND_TO_TARGET_FAILED",
"GETTING_RESPONSE_FROM_TARGET_FAILED"])
@classmethod
def instantiate(cls, aserver, rootelem):
if aserver in cls.REMOTE_TARGET_ERRORS:
return CommandFailedRemoteTargetError(aserver, aserver, rootelem)
else:
return CommandFailedAServerError(aserver, aserver, rootelem)
class CommandFailedRemoteTargetError(CommandFailedAServerError):
pass
##############################################################################
# Misc
##############################################################################
class UnsupportedNextraVersion(XCLIError):
pass
class CorruptResponse(XCLIError):
pass
##############################################################################
# Transport
##############################################################################
class TransportError(XCLIError):
"""Base class of all transport-related errors"""
pass
class ConnectionError(TransportError):
"""Represents errors that occur during connection"""
pass
| python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict as odict
from copy import deepcopy
from functools import partial
import sys
import bindings as bi
from custom import get_customizations_for, reformat_block
PY3 = sys.version_info[0] == 3
str_type = str if PY3 else (str, unicode)
get_customizations_for = partial(get_customizations_for, 'R')
def get_customizations_or_defaults_for(algo, prop, default=None):
return get_customizations_for(algo, prop, get_customizations_for('defaults', prop, default))
# ----------------------------------------------------------------------------------------------------------------------
# Generate per-model classes
# ----------------------------------------------------------------------------------------------------------------------
def gen_module(schema, algo, module):
# print(str(schema))
rest_api_version = get_customizations_for(algo, 'rest_api_version', 3)
doc_preamble = get_customizations_for(algo, 'doc.preamble')
doc_returns = get_customizations_for(algo, 'doc.returns')
doc_seealso = get_customizations_for(algo, 'doc.seealso')
doc_references = get_customizations_for(algo, 'doc.references')
doc_examples = get_customizations_for(algo, 'doc.examples')
required_params = get_customizations_or_defaults_for(algo, 'extensions.required_params', [])
extra_params = get_customizations_or_defaults_for(algo, 'extensions.extra_params', [])
model_name = algo_to_modelname(algo)
update_param_defaults = get_customizations_for('defaults', 'update_param')
update_param = get_customizations_for(algo, 'update_param')
yield "# This file is auto-generated by h2o-3/h2o-bindings/bin/gen_R.py"
yield "# Copyright 2016 H2O.ai; Apache License Version 2.0 (see LICENSE for details) \n#'"
yield "# -------------------------- %s -------------------------- #" % model_name
# start documentation
if doc_preamble:
yield "#'"
yield reformat_block(doc_preamble, prefix="#' ")
yield "#'"
# start doc for signature
required_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in required_params])
schema_params = odict([(p['name'], p)
for p in schema['parameters']])
extra_params = odict([(p[0] if isinstance(p, tuple) else p, p[1] if isinstance(p, tuple) else None)
for p in extra_params])
all_params = list(required_params.keys()) + list(schema_params.keys()) + list(extra_params.keys())
def get_schema_params(pname):
param = deepcopy(schema_params[pname])
updates = None
for update_fn in [update_param, update_param_defaults]:
if callable(update_fn):
updates = update_fn(pname, param)
if updates is not None:
param = updates
break
return param if isinstance(param, (list, tuple)) else [param] # always return array to support deprecated aliases
tag = "@param"
pdocs = odict()
for pname in all_params:
if pname in pdocs: # avoid duplicates (esp. if already included in required_params)
continue
if pname in schema_params:
for param in get_schema_params(pname): # retrieve potential aliases
pname = param.get('name')
if pname:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname, get_help(param, indent=len(tag)+4))
else:
pdocs[pname] = get_customizations_or_defaults_for(algo, 'doc.params.'+pname)
for pname, pdoc in pdocs.items():
if pdoc:
yield reformat_block("%s %s %s" % (tag, pname, pdoc.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_returns:
tag = "@return"
yield reformat_block("%s %s" % (tag, doc_returns.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_seealso:
tag = "@seealso"
yield reformat_block("%s %s" % (tag, doc_seealso.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_references:
tag = "@references"
yield reformat_block("%s %s" % (tag, doc_references.lstrip('\n')), indent=len(tag)+1, indent_first=False, prefix="#' ")
if doc_examples:
yield "#' @examples"
yield "#' \dontrun{"
yield reformat_block(doc_examples, prefix="#' ")
yield "#' }"
yield "#' @export"
# start function signature
sig_pnames = []
sig_params = []
for k, v in required_params.items():
sig_pnames.append(k)
sig_params.append(k if v is None else '%s = %s' % (k, v))
for pname in schema_params:
params = get_schema_params(pname)
for param in params:
pname = param.get('name') # override local var as param can be an alias of pname
if pname in required_params or not pname: # skip schema params already added by required_params, and those explicitly removed
continue
sig_pnames.append(pname)
sig_params.append("%s = %s" % (pname, get_sig_default_value(param)))
for k, v in extra_params.items():
sig_pnames.append(k)
sig_params.append("%s = %s" % (k, v))
param_indent = len("h2o.%s <- function(" % module)
yield reformat_block("h2o.%s <- function(%s)" % (module, ',\n'.join(sig_params)), indent=param_indent, indent_first=False)
# start function body
yield "{"
validate_frames = get_customizations_or_defaults_for(algo, 'extensions.validate_frames')
if validate_frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
yield reformat_block(validate_frames, indent=2)
else:
frames = get_customizations_or_defaults_for(algo, 'extensions.frame_params', [])
if frames:
yield " # Validate required training_frame first and other frame args: should be a valid key or an H2OFrame object"
for frame in frames:
if frame in sig_pnames:
required_val = str(frame in required_params).upper()
yield " {frame} <- .validate.H2OFrame({frame}, required={required})".format(frame=frame, required=required_val)
validate_required_params = get_customizations_or_defaults_for(algo, 'extensions.validate_required_params')
if validate_required_params:
yield ""
yield " # Validate other required args"
yield reformat_block(validate_required_params, indent=2)
validate_params = get_customizations_or_defaults_for(algo, 'extensions.validate_params')
if validate_params:
yield ""
yield " # Validate other args"
yield reformat_block(validate_params, indent=2)
yield ""
yield " # Build parameter list to send to model builder"
yield " parms <- list()"
set_required_params = get_customizations_or_defaults_for(algo, 'extensions.set_required_params')
if set_required_params:
yield reformat_block(set_required_params, indent=2)
skip_default_set_params = get_customizations_or_defaults_for(algo, 'extensions.skip_default_set_params_for', [])
yield ""
for pname in schema_params:
if pname in skip_default_set_params:
continue
# leave the special handling of 'loss' param here for now as it is used by several algos
if pname == "loss":
yield " if(!missing(loss)) {"
yield " if(loss == \"MeanSquare\") {"
yield " warning(\"Loss name 'MeanSquare' is deprecated; please use 'Quadratic' instead.\")"
yield " parms$loss <- \"Quadratic\""
yield " } else "
yield " parms$loss <- loss"
yield " }"
else:
yield " if (!missing(%s))" % pname
yield " parms$%s <- %s" % (pname, pname)
set_params = get_customizations_or_defaults_for(algo, 'extensions.set_params')
if set_params:
yield ""
yield reformat_block(set_params, indent=2)
yield ""
yield " # Error check and build model"
verbose = 'verbose' if 'verbose' in extra_params else 'FALSE'
yield " model <- .h2o.modelJob('%s', parms, h2oRestApiVersion=%d, verbose=%s)" % (algo, rest_api_version, verbose)
with_model = get_customizations_for(algo, 'extensions.with_model')
if with_model:
yield ""
yield reformat_block(with_model, indent=2)
yield " return(model)"
yield "}"
# start additional functions
module_extensions = get_customizations_for(algo, 'extensions.module')
if module_extensions:
yield ""
yield module_extensions
def algo_to_modelname(algo):
if algo == "aggregator": return "H2O Aggregator Model"
if algo == "deeplearning": return "Deep Learning - Neural Network"
if algo == "xgboost": return "XGBoost"
if algo == "drf": return "Random Forest Model in H2O"
if algo == "gbm": return "Gradient Boosting Machine"
if algo == "glm": return "H2O Generalized Linear Models"
if algo == "glrm": return "Generalized Low Rank Model"
if algo == "kmeans": return "KMeans Model in H2O"
if algo == "naivebayes": return "Naive Bayes Model in H2O"
if algo == "pca": return "Principal Components Analysis"
if algo == "svd": return "Singular Value Decomposition"
if algo == "stackedensemble": return "H2O Stacked Ensemble"
if algo == "psvm": return "Support Vector Machine"
if algo == "targetencoder": return "Target Encoder"
return algo
def get_help(param, indent=0):
pname = param.get('name')
ptype = param.get('type')
pvalues = param.get('values')
pdefault = param.get('default_value')
phelp = param.get('help')
if not phelp:
return
if ptype == 'boolean':
phelp = "\code{Logical}. " + phelp
if pvalues:
phelp += " Must be one of: %s." % ", ".join('"%s"' % v for v in pvalues)
if pdefault is not None:
phelp += " Defaults to %s." % get_doc_default_value(param)
return bi.wrap(phelp, width=120-indent)
def get_doc_default_value(param):
ptype = param['type']
ptype = 'str' if ptype.startswith('enum') else ptype # for doc, default value is actually a str for enum types.
return as_R_repr(ptype, param.get('default_value'))
def get_sig_default_value(param):
ptype = param['type']
value = (param.get('values') if ptype.startswith('enum') # for signature, default value is whole enum (to provide parameter hint).
else param.get('default_value'))
return as_R_repr(ptype, value)
def as_R_repr(ptype, value):
if value is None:
return (0 if ptype in ['short', 'int', 'long', 'double']
else "list()" if ptype == 'list'
else 'NULL')
if ptype == 'boolean':
return str(value).upper()
if ptype == 'double':
return '%.10g' % value
if ptype == 'list':
return "list(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.startswith('enum'):
return "c(%s)" % ', '.join('"%s"' % v for v in value)
if ptype.endswith('[]'):
return "c(%s)" % ', '.join('%s' % v for v in value)
return value
# ----------------------------------------------------------------------------------------------------------------------
# MAIN:
# ----------------------------------------------------------------------------------------------------------------------
def main():
bi.init("R", "../../../h2o-r/h2o-package/R", clear_dir=False)
for name, mb in bi.model_builders().items():
module = name
file_name = name
if name == "drf":
module = "randomForest"
file_name = "randomforest"
if name == "isolationforest": module = "isolationForest"
if name == "naivebayes": module = "naiveBayes"
if name == "stackedensemble": module = "stackedEnsemble"
if name == "pca": module = "prcomp"
bi.vprint("Generating model: " + name)
bi.write_to_file("%s.R" % file_name, gen_module(mb, name, module))
if __name__ == "__main__":
main()
| python |
"""
1. Clarification
2. Possible solutions
- Dynamic programming
- Divide and Conquer
3. Coding
4. Tests
"""
# T=O(n), S=O(1)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if not nums: return 0
maxn, subSum = -math.inf, 0
for num in nums:
subSum += num
maxn = max(maxn, subSum)
if subSum < 0:
subSum = 0
return maxn
# T=O(n), S=O(lgn)
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
def divide_and_conquer(nums, left, right):
if left == right: return (nums[left], nums[left], nums[left], nums[left])
mid = (left + right) >> 1
a1, m1, b1, s1 = divide_and_conquer(nums, left, mid)
a2, m2, b2, s2 = divide_and_conquer(nums, mid + 1, right)
a = max(a1, s1 + a2)
b = max(b2, s2 + b1)
m = max(m1, m2, b1 + a2)
s = s1 + s2
return (a, m, b, s)
if not nums: return 0
_, m, _, _ = divide_and_conquer(nums, 0, len(nums) - 1)
return m
| python |
import logging
from easyjoblite import state, constants
from easyjoblite.utils import kill_process
logger = logging.getLogger(__name__)
class WorkerManager(object):
@staticmethod
def stop_all_workers(worker_type):
"""
stops all the workers of the given type
:param worker_type:
:return:
"""
logger = logging.getLogger("stop_all_workers")
service_state = state.ServiceState()
worker_type_list = [constants.WORK_QUEUE, constants.RETRY_QUEUE, constants.DEAD_LETTER_QUEUE]
if worker_type in worker_type_list:
WorkerManager.kill_workers(service_state, worker_type)
logger.info("Done stopping all the workers of worker_type {}".format(worker_type))
elif worker_type == constants.STOP_TYPE_ALL:
for local_type in worker_type_list:
WorkerManager.kill_workers(service_state, local_type)
logger.info("Done stopping all the workers ")
else:
raise KeyError
service_state.refresh_all_workers_pid()
@staticmethod
def kill_workers(service_state, type):
"""
function to kill all the workers of the given type
:param service_state: current state of the service
:param type: the type of the worker to kill
:return:
"""
logger.info("Started killing : " + type + " with list " + str(service_state.get_pid_list(type)))
pid_list = list(service_state.get_pid_list(type))
for pid in pid_list:
kill_process(pid)
logging.info("Done killing : " + str(pid))
| python |
#!/usr/bin/env python2
# Copyright (C) 2001 Jeff Epler <[email protected]>
# Copyright (C) 2006 Csaba Henk <[email protected]>
# Copyright (C) 2011 Marek Kubica <[email protected]>
#
# This program can be distributed under the terms of the GNU LGPLv3.
import os, sys
from errno import *
from stat import *
import fcntl
import fuse
from fuse import Fuse
import os.path
import errno
from logbook import FileHandler, debug, DEBUG
log_handler = FileHandler('/tmp/libraryfuse.log', level=DEBUG)
log_handler.push_application()
debug('Starting')
fuse.fuse_python_api = (0, 2)
fuse.feature_assert('stateful_files', 'has_init')
directories_to_merge = ['/var', '/usr']
class LibraryFuse(Fuse):
def __init__(self, *args, **kw):
Fuse.__init__(self, *args, **kw)
self.directories_to_merge = directories_to_merge
def getattr(self, path):
debug('getattr with %s' % path)
for library_part in self.directories_to_merge:
real_path = library_part + path
debug('trying %s' % real_path)
if os.path.exists(real_path):
return os.lstat(real_path)
def readlink(self, path):
debug('readlink called with {}'.format(path))
for library_part in self.directories_to_merge:
real_path = library_part + path
if os.path.exists(real_path):
return os.readlink(real_path)
def readdir(self, path, offset):
debug('readdir called with {0} and offset {1}'.format(path, offset))
elements = set()
# gather elements
for library_part in self.directories_to_merge:
real_path = library_part + path
if not os.path.exists(real_path):
continue
for e in os.listdir(real_path):
elements.add(e)
# return elements
for element in elements:
yield fuse.Direntry(element)
def unlink(self, path):
debug('unlink called')
return -ENOSYS
os.unlink("." + path)
def rmdir(self, path):
debug('rmdir')
return -ENOSYS
os.rmdir("." + path)
def symlink(self, path, path1):
debug('symlink')
return -ENOSYS
os.symlink(path, "." + path1)
def rename(self, path, path1):
debug('rename')
return -ENOSYS
os.rename("." + path, "." + path1)
def link(self, path, path1):
debug('link')
return -ENOSYS
os.link("." + path, "." + path1)
def chmod(self, path, mode):
debug('chmod')
return -ENOSYS
os.chmod("." + path, mode)
def chown(self, path, user, group):
debug('chown')
return -ENOSYS
os.chown("." + path, user, group)
def truncate(self, path, len):
debug('truncate')
return -ENOSYS
f = open("." + path, "a")
f.truncate(len)
f.close()
def mknod(self, path, mode, dev):
debug('mknod')
return -ENOSYS
os.mknod("." + path, mode, dev)
def mkdir(self, path, mode):
debug('mkdir')
return -ENOSYS
os.mkdir("." + path, mode)
def utime(self, path, times):
debug('utime')
return -ENOSYS
os.utime("." + path, times)
# The following utimens method would do the same as the above utime method.
# We can't make it better though as the Python stdlib doesn't know of
# subsecond preciseness in acces/modify times.
#
# def utimens(self, path, ts_acc, ts_mod):
# os.utime("." + path, (ts_acc.tv_sec, ts_mod.tv_sec))
def access(self, path, mode):
debug('access {0} in mode {1}'.format(path, mode))
for library_part in self.directories_to_merge:
real_path = library_part + path
if os.path.exists(real_path):
if not os.access(real_path, mode):
return -errno.EACCES
def statfs(self):
"""
Should return an object with statvfs attributes (f_bsize, f_frsize...).
Eg., the return value of os.statvfs() is such a thing (since py 2.2).
If you are not reusing an existing statvfs object, start with
fuse.StatVFS(), and define the attributes.
To provide usable information (ie., you want sensible df(1)
output, you are suggested to specify the following attributes:
- f_bsize - preferred size of file blocks, in bytes
- f_frsize - fundamental size of file blcoks, in bytes
[if you have no idea, use the same as blocksize]
- f_blocks - total number of blocks in the filesystem
- f_bfree - number of free blocks
- f_files - total number of file inodes
- f_ffree - nunber of free file inodes
"""
debug('statvfs')
return os.statvfs(".")
def main(self, *a, **kw):
return Fuse.main(self, *a, **kw)
def main():
server = LibraryFuse()
#server.parser.add_option(mountopt="root", metavar="PATH", default='/',
# help="mirror filesystem from under PATH [default: %default]")
server.parse(values=server, errex=1)
server.main()
if __name__ == '__main__':
main()
| python |
import igraph
import numpy as np
import pandas as pd
from tqdm import tqdm
from feature_engineering.tools import lit_eval_nan_proof
# this script adds the feature shortest_path to the files training_features and testing_features
# this script takes approximately 1000 minutes to execute
# progress bar for pandas
tqdm.pandas(tqdm())
# path
path_to_data = "data/"
# loading data
converter_dict = {'authors': lit_eval_nan_proof, 'journal': lit_eval_nan_proof,
'title': lit_eval_nan_proof, 'abstract': lit_eval_nan_proof}
nodes = pd.read_csv(path_to_data + "nodes_preprocessed.csv", converters=converter_dict)
nodes.set_index("id", inplace=True)
training = pd.read_csv(path_to_data + "training_features.txt")
training.set_index("my_index", inplace=True)
testing = pd.read_csv(path_to_data + "testing_features.txt")
testing.set_index("my_index", inplace=True)
# placeholders for graph features
shortest_path = []
# IDs for training set
id1 = training['id1'].values
id2 = training['id2'].values
target = training["target"].values
# creating graph of citations
# create empty directed graph
g = igraph.Graph(directed=True)
# some nodes may not be connected to any other node
# hence the need to create the nodes of the graph from node_info.csv,
# not just from the edge list
nodes = nodes.index.values
str_vec = np.vectorize(str)
nodes = str_vec(nodes)
# add vertices
g.add_vertices(nodes)
# create and add edges
edges = [(str(id1[i]), str(id2[i])) for i in range(len(id1)) if target[i] == 1]
g.add_edges(edges)
for i in tqdm(range(len(id1))):
if target[i] == 1:
g.delete_edges([(str(id1[i]), str(id2[i]))])
shortest_path.append(g.shortest_paths_dijkstra(source=str(id1[i]), target=str(id2[i]), mode="OUT")[0][0])
if target[i] == 1:
g.add_edge(str(id1[i]), str(id2[i]))
# adding feature to dataframe
training["shortest_path"] = shortest_path
# repeat process for test set
shortest_path_test = []
id1 = testing['id1'].values
id2 = testing['id2'].values
for i in tqdm(range(len(id1))):
shortest_path_test.append(g.shortest_paths_dijkstra(source=str(id1[i]), target=str(id2[i]), mode="OUT")[0][0])
if target[i] == 1:
g.add_edge(str(id1[i]), str(id2[i]))
testing["shortest_path"] = shortest_path_test
# save data sets
training.to_csv(path_to_data + "training_features.txt")
testing.to_csv(path_to_data + "testing_features.txt")
| python |
# encoding: utf-8
"""
lxml custom element classes for shape tree-related XML elements.
"""
from __future__ import absolute_import
from .autoshape import CT_Shape
from .connector import CT_Connector
from ...enum.shapes import MSO_CONNECTOR_TYPE
from .graphfrm import CT_GraphicalObjectFrame
from ..ns import qn
from .picture import CT_Picture
from .shared import BaseShapeElement
from ..xmlchemy import BaseOxmlElement, OneAndOnlyOne, ZeroOrOne
class CT_GroupShape(BaseShapeElement):
"""
Used for the shape tree (``<p:spTree>``) element as well as the group
shape (``<p:grpSp>``) element.
"""
nvGrpSpPr = OneAndOnlyOne('p:nvGrpSpPr')
grpSpPr = OneAndOnlyOne('p:grpSpPr')
_shape_tags = (
qn('p:sp'), qn('p:grpSp'), qn('p:graphicFrame'), qn('p:cxnSp'),
qn('p:pic'), qn('p:contentPart')
)
def add_autoshape(self, id_, name, prst, x, y, cx, cy):
"""
Append a new ``<p:sp>`` shape to the group/shapetree having the
properties specified in call.
"""
sp = CT_Shape.new_autoshape_sp(id_, name, prst, x, y, cx, cy)
self.insert_element_before(sp, 'p:extLst')
return sp
def add_cxnSp(self, id_, name, type_member, x, y, cx, cy, flipH, flipV):
"""
Append a new ``<p:cxnSp>`` shape to the group/shapetree having the
properties specified in call.
"""
prst = MSO_CONNECTOR_TYPE.to_xml(type_member)
cxnSp = CT_Connector.new_cxnSp(
id_, name, prst, x, y, cx, cy, flipH, flipV
)
self.insert_element_before(cxnSp, 'p:extLst')
return cxnSp
def add_pic(self, id_, name, desc, rId, x, y, cx, cy):
"""
Append a ``<p:pic>`` shape to the group/shapetree having properties
as specified in call.
"""
pic = CT_Picture.new_pic(id_, name, desc, rId, x, y, cx, cy)
self.insert_element_before(pic, 'p:extLst')
return pic
def add_placeholder(self, id_, name, ph_type, orient, sz, idx):
"""
Append a newly-created placeholder ``<p:sp>`` shape having the
specified placeholder properties.
"""
sp = CT_Shape.new_placeholder_sp(
id_, name, ph_type, orient, sz, idx
)
self.insert_element_before(sp, 'p:extLst')
return sp
def add_table(self, id_, name, rows, cols, x, y, cx, cy):
"""
Append a ``<p:graphicFrame>`` shape containing a table as specified
in call.
"""
graphicFrame = CT_GraphicalObjectFrame.new_table_graphicFrame(
id_, name, rows, cols, x, y, cx, cy
)
self.insert_element_before(graphicFrame, 'p:extLst')
return graphicFrame
def add_textbox(self, id_, name, x, y, cx, cy):
"""
Append a newly-created textbox ``<p:sp>`` shape having the specified
position and size.
"""
sp = CT_Shape.new_textbox_sp(id_, name, x, y, cx, cy)
self.insert_element_before(sp, 'p:extLst')
return sp
def get_or_add_xfrm(self):
"""
Return the ``<a:xfrm>`` grandchild element, newly-added if not
present.
"""
return self.grpSpPr.get_or_add_xfrm()
def iter_ph_elms(self):
"""
Generate each placeholder shape child element in document order.
"""
for e in self.iter_shape_elms():
if e.has_ph_elm:
yield e
def iter_shape_elms(self):
"""
Generate each child of this ``<p:spTree>`` element that corresponds
to a shape, in the sequence they appear in the XML.
"""
for elm in self.iterchildren():
if elm.tag in self._shape_tags:
yield elm
@property
def xfrm(self):
"""
The ``<a:xfrm>`` grandchild element or |None| if not found
"""
return self.grpSpPr.xfrm
class CT_GroupShapeNonVisual(BaseShapeElement):
"""
``<p:nvGrpSpPr>`` element.
"""
cNvPr = OneAndOnlyOne('p:cNvPr')
class CT_GroupShapeProperties(BaseOxmlElement):
"""
The ``<p:grpSpPr>`` element
"""
xfrm = ZeroOrOne('a:xfrm', successors=(
'a:noFill', 'a:solidFill', 'a:gradFill', 'a:blipFill', 'a:pattFill',
'a:grpFill', 'a:effectLst', 'a:effectDag', 'a:scene3d', 'a:extLst'
))
| python |
import sys
import sh
def app(name, *args, _out=sys.stdout, _err=sys.stderr, _tee=True, **kwargs):
try:
return sh.Command(name).bake(
*args, _out=_out, _err=_err, _tee=_tee, **kwargs
)
except sh.CommandNotFound:
return sh.Command(sys.executable).bake(
"-c",
(
f"import sys; import click; click.secho('Command `{name}` "
f"not found', fg='red'); sys.exit(1)"
),
)
# Shell commands
ls = app("ls")
rm = app("rm", "-rf")
cp = app("cp", "-rf")
find = app("find", _out=None)
mount = app("mount")
umount = app("umount", "-f")
# Python commands
python = app(sys.executable)
pip = app("pip")
pytest = app("py.test", "-s", _tee=False, _ok_code=[0, 1, 2, 3, 4, 5])
black = app("black")
flake8 = app("flake8", _ok_code=[0, 1])
pydocstyle = app("pydocstyle", _ok_code=[0, 1])
# Docker
docker = app("docker")
| python |
"""
This options file demonstrates how to run a stripping line
from a specific stripping version on a local MC DST file
It is based on the minimal DaVinci DecayTreeTuple example
"""
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
from Configurables import (
EventNodeKiller,
ProcStatusCheck,
DaVinci,
DecayTreeTuple
)
from GaudiConf import IOHelper
from DecayTreeTuple.Configuration import *
# Node killer: remove the previous Stripping
event_node_killer = EventNodeKiller('StripKiller')
event_node_killer.Nodes = ['/Event/AllStreams', '/Event/Strip']
# Build a new stream called 'CustomStream' that only
# contains the desired line
strip = 'stripping28r1'
streams = buildStreams(stripping=strippingConfiguration(strip),
archive=strippingArchive(strip))
line = 'D2hhPromptDst2D2KKLine'
custom_stream = StrippingStream('CustomStream')
custom_line = 'Stripping'+line
for stream in streams:
for sline in stream.lines:
if sline.name() == custom_line:
custom_stream.appendLines([sline])
# Create the actual Stripping configurable
filterBadEvents = ProcStatusCheck()
sc = StrippingConf(Streams=[custom_stream],
MaxCandidates=2000,
AcceptBadEvents=False,
BadEventSelection=filterBadEvents)
# Create an ntuple to capture D*+ decays from the StrippingLine line
dtt = DecayTreeTuple('TupleDstToD0pi_D0ToKK')
# The output is placed directly into Phys, so we only need to
# define the stripping line here
dtt.Inputs = ['/Event/Phys/{0}/Particles'.format(line)]
dtt.Decay = '[D*(2010)+ -> (D0 -> K- K+) pi+]CC'
# Configure DaVinci
# Important: The selection sequence needs to be inserted into
# the Gaudi sequence for the stripping to run
DaVinci().appendToMainSequence([event_node_killer, sc.sequence()])
DaVinci().UserAlgorithms += [dtt]
DaVinci().InputType = 'DST'
DaVinci().TupleFile = 'DVntuple.root'
DaVinci().PrintFreq = 1000
DaVinci().DataType = '2016'
DaVinci().Simulation = True
# Only ask for luminosity information when not using simulated data
DaVinci().Lumi = not DaVinci().Simulation
DaVinci().EvtMax = 5000
DaVinci().CondDBtag = 'sim-20161124-2-vc-md100'
DaVinci().DDDBtag = 'dddb-20150724'
# Use the local input data
IOHelper().inputFiles([
'./00062514_00000001_7.AllStreams.dst'
], clear=True)
| python |
from .base import BaseField
class IntegerField(BaseField):
pass
| python |
def ingredients(count):
"""Prints ingredients for making `count` arepas."""
print('{:.2} cups arepa flour'.format(0.1*count))
print('{:.2} cups cheese'.format(0.1*count))
print('{:.2} cups water'.format(0.025*count))
| python |
from __future__ import absolute_import, unicode_literals
import os
import celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings')
app = celery.Celery('test_project') # noqa: pylint=invalid-name
# Using a string here means the worker don't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object('django.conf:settings', namespace='CELERY')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self: celery.Task):
return 'Request: {0!r}'.format(self.request.task)
| python |
'''
Created on Apr 15, 2016
@author: Drew
'''
class CogTV:
def __init__(self):
pass
def setScreen(self, scene):
pass
| python |
import sys
import csv
import numpy as np
import statistics
import scipy.stats
def anova(index, norobot_data, video_data, robot_data):
norobot_mean = norobot_data.mean(axis = 0)[index]
video_mean = video_data.mean(axis = 0)[index]
robot_mean = robot_data.mean(axis = 0)[index]
group_means = [norobot_mean, video_mean, robot_mean]
total_mean = statistics.mean(group_means)
norobot_values = norobot_data[:,index]
video_values = video_data[:,index]
robot_values = robot_data[:,index]
SST = 0
for i in group_means:
SST += 5 * (i - total_mean)**2
MST = SST / 2 # MST = SST / (k - 1)
norobot_sse = 0
for value in norobot_values:
norobot_sse += (value - norobot_mean)**2
video_sse = 0
for value in video_values:
video_sse += (value - video_mean)**2
robot_sse = 0
for value in robot_values:
robot_sse += (value - robot_mean)**2
SSE = norobot_sse + video_sse + robot_sse
MSE = SSE / (15 - 3) # MSE = SSE / (n - k)
F = MST / MSE
pval = 1-scipy.stats.f.cdf(F, 2, 12)
# print(F)
# print("pval",pval)
###
SS = SSE + SST
ss = 0
for value in norobot_values:
ss += (value - total_mean)**2
for value in video_values:
ss += (value - total_mean)**2
for value in robot_values:
ss += (value - total_mean)**2
# print(ss, SS)
###
print("index", index)
print("SST", SST)
print("SSE", SSE)
print("MST", MST)
print("MSE", MSE)
print("SS", SS)
print("F", F)
print("P-value", pval)
print("\n")
return
def ttest(index, norobot_data, video_data, robot_data):
norobot_mean = norobot_data.mean(axis = 0)[index]
video_mean = video_data.mean(axis = 0)[index]
robot_mean = robot_data.mean(axis = 0)[index]
norobot_std = norobot_data.std(axis = 0)[index]
video_std = video_data.std(axis = 0)[index]
robot_std = robot_data.std(axis = 0)[index]
mean_0 = 0 # mean under the null - no improvement
norobot_t = norobot_mean/(norobot_std / (15)**0.5)
video_t = video_mean/(video_std / (15)**0.5)
robot_t = robot_mean/(robot_std / (15)**0.5)
norobot_pval = 1 - scipy.stats.t.cdf(norobot_t, 14)
video_pval = 1 - scipy.stats.t.cdf(video_t, 14)
robot_pval = 1 - scipy.stats.t.cdf(robot_t, 14)
print("Index", index)
print("Mean - no robot", norobot_mean)
print("T value - no robot", norobot_t)
print("P-value - no robot", norobot_pval)
print("Mean - video", video_mean)
print("T value - video", video_t)
print("P-value - video", video_pval)
print("Mean - robot", robot_mean)
print("T value - robot", robot_t)
print("P-value - robot", robot_pval)
print("\n")
def main(args):
df = args[1]
datafile = open(df, "r")
read_csv = csv.reader(datafile, delimiter=",")
data = []
for row in read_csv:
x = list()
# x.append(row[1])
if row[1] == "norobot":
x.append(1)
elif row[1] == "video":
x.append(2)
else:
x.append(3)
values = [eval(i) for i in row[2:]]
x += values
x.append(statistics.mean(values))
x.append(values[0] - values[1])
x.append(values[1] - values[2])
x.append(values[0] - values[2])
data.append(x)
norobot_data = []
video_data = []
robot_data = []
# print(data)
for trial in data:
if trial[0] == 1:
norobot_data.append(trial)
elif trial[0] == 2:
video_data.append(trial)
else:
robot_data.append(trial)
norobot_data = np.array(norobot_data)
video_data = np.array(video_data)
robot_data = np.array(robot_data)
# for i in [5, 6, 7]:
# anova(i, norobot_data, video_data, robot_data)
for i in [5, 6, 7]:
ttest(i, norobot_data, video_data, robot_data)
if __name__ == "__main__":
main(sys.argv)
'''
H_0 : mean_norobot = mean_video = mean_robot
H_a : not mean_norobot = mean_video = mean_robot
alpha = 0.05
qf(0.95, 2, 12) = 3.885294
Rejection Region: {F > 3.885294}
ANOVA Table RESULTS
time_1:
Source dof SS MS F
Treatments 2 95432.4 47716.2 0.60383
Error 12 948262.0 79021.8
Total 14 1043694.4
p-value 0.5625096331593546
time_2:
Source dof SS MS F
Treatments 2 17142.5 8571.2 0.16672
Error 12 616930.4 51410.9
Total 14 634072.9
p-value 0.8483630364091982
time_3:
Source dof SS MS F
Treatments 2 49522.8 24761.4 0.241145
Error 12 1232189.2 102682.4
Total 14 1281712.0
p-value 0.7894446486187324
Average Time:
Source dof SS MS F
Treatments 2 37014.0 18507.0 0.479521
Error 12 463136.6 38594.7
Total 14 500150.6
p-value 0.6304490558407776
Improvement from time_1 to time_2
Source dof SS MS F
Treatments 2 99302.9 49651.5 1.1005396
Error 12 541386.8 45115.6
Total 14 640689.7
p-value 0.36404861871620386
Improvement from time_2 to time_3
Source dof SS MS F
Treatments 2 34797.7 17398.9 0.1037937
Error 12 2011551.2 167629.2
Total 14 2046348.9
p-value 0.9022116073486796
Improvement from time_1 to time_3
Source dof SS MS F
Treatments 2 19066.8 9533.4 0.068463
Error 12 1670977.6 139248.1
Total 14 1690044.4
p-value 0.9341897168496459
'''
'''
H_0: mean improvement = 0
H_a: mean improvement > 0
Improvement between time_1 and time_2
Mean - no robot 262.2
T value - no robot 5.581827247691283
P-value - no robot 3.380587255563672e-05
Mean - video 63.8
T value - video 0.9839638259926194
P-value - video 0.17091676826650537
Mean - robot 146.6
T value - robot 5.158170177143269
P-value - robot 7.265008933243777e-05
Improvement between time_2 and time_3
Mean - no robot -89.2
T value - no robot -0.9274569021697335
P-value - no robot 0.815298302242971
Mean - video 23.4
T value - video 0.2024783964679772
P-value - video 0.4212278577733659
Mean - robot -2.4
T value - robot -0.036968008327296194
P-value - robot 0.5144837641036524
Improvement from time_1 to time_3
Mean - no robot 173.0
T value - no robot 2.5331918015827544
P-value - no robot 0.011941444190466166
Mean - video 87.2
T value - video 0.779810428227249
P-value - video 0.22424287864651182
Mean - robot 144.2
T value - robot 2.0169198592088846
P-value - robot 0.03165118966953784
''' | python |
from .sequence_tagger_model import SequenceTagger, MultiTagger
from .language_model import LanguageModel
from .text_classification_model import TextClassifier
from .pairwise_classification_model import TextPairClassifier
from .relation_extractor_model import RelationExtractor
from .entity_linker_model import EntityLinker
from .tars_model import FewshotClassifier
from .tars_model import TARSClassifier
from .tars_model import TARSTagger
| python |
def longestPalindromicSubstring(string):
longest = ""
for i in range(len(string)):
for j in range(i, len(string)):
substring = string[i : j + 1]
if len(substring) > len(longest) and isPalindrome(substring):
longest = substring
return longest
def isPalindrome(string):
leftIdx = 0
rightIdx = len(string)- 1
while leftIdx < rightIdx:
if string[leftIdx] != string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True | python |
from django.conf import settings
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from user.models import User
from user.serializers import UserSerializer
import redis
import uuid
import pycountry
# initiates the redis instance.
redis_instance = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
set_name = settings.REDIS_SET_NAME
# returns the top 50 users of the corresponding redis table.
def get_top_users(country, size):
top_users = redis_instance.zrevrange(country, 0, size-1, withscores=True)
IDs = []
points = []
for i in range(len(top_users)):
ID_str = top_users[i][0].decode('utf-8')
IDs.append(uuid.UUID(ID_str))
points.append(top_users[i][1])
return IDs, points
# Returns the individual country ranks of top users if the user requested global
# leaderboard, and returns the global ranks of the top users if the user requested
# country leaderboard.
def get_ranking(users, ID_list, is_global_ranking):
pipeline = redis_instance.pipeline()
for user_id in ID_list:
user = users.get(user_id=user_id)
pipeline.zrevrank(set_name if is_global_ranking else user.country, str(user_id))
pipeline_values = pipeline.execute()
return pipeline_values
class global_leaderboard(APIView):
def get(self, request):
leaderboard_size = 50
# gets the IDs and points of the top 50 users globally.
IDs, points = get_top_users(set_name, leaderboard_size)
users = User.objects.filter(user_id__in=IDs)
# gets the individual country ranks of those users, stores them in 'country_ranks'
# variable.
country_ranks = get_ranking(users, IDs, False)
# creates a list of users to be updated in the database. This list contains
# the most up to date values of those users, freshly received from the redis
# table.
for user in users:
user_index = IDs.index(user.user_id)
user.rank = user_index+1
user.points = points[user_index]
user.country_rank = country_ranks[user_index]+1
# updates the values of those users in the database.
User.objects.bulk_update(users, ['points', 'rank', 'country_rank'])
serializer = UserSerializer(users, many=True)
data = list(serializer.data)
data.reverse()
return Response(data, status=status.HTTP_200_OK)
# Follows a similar procedure to the global leaderboard class.
class country_leaderboard(APIView):
def get(self, request, country):
if not pycountry.countries.get(alpha_2=country):
return Response({'message': 'Invalid country ISO code. Please use ISO 3166-1 alpha-2 codes.'}, status=status.HTTP_400_BAD_REQUEST)
leaderboard_size = 50
IDs, points = get_top_users(country, leaderboard_size)
users = User.objects.filter(user_id__in=IDs)
global_ranks = get_ranking(users, IDs, True)
for user in users:
user_index = IDs.index(user.user_id)
user.country_rank = user_index+1
user.points = points[user_index]
user.rank = global_ranks[user_index]+1
User.objects.bulk_update(users, ['points', 'rank', 'country_rank'])
serializer = UserSerializer(users, many=True)
data = list(serializer.data)
data.reverse()
return Response(data, status=status.HTTP_200_OK) | python |
import cv2
import numpy as np
import BboxToolkit as bt
import pycocotools.mask as maskUtils
from mmdet.core import PolygonMasks, BitmapMasks
pi = 3.141592
def bbox2mask(bboxes, w, h, mask_type='polygon'):
polys = bt.bbox2type(bboxes, 'poly')
assert mask_type in ['polygon', 'bitmap']
if mask_type == 'bitmap':
masks = []
for poly in polys:
rles = maskUtils.frPyObjects([poly.tolist()], h, w)
masks.append(maskUtils.decode(rles[0]))
gt_masks = BitmapMasks(masks, h, w)
else:
gt_masks = PolygonMasks([[poly] for poly in polys], h, w)
return gt_masks
def switch_mask_type(masks, mtype='bitmap'):
if isinstance(masks, PolygonMasks) and mtype == 'bitmap':
width, height = masks.width, masks.height
bitmap_masks = []
for poly_per_obj in masks.masks:
rles = maskUtils.frPyObjects(poly_per_obj, height, width)
rle = maskUtils.merge(rles)
bitmap_masks.append(maskUtils.decode(rle).astype(np.uint8))
masks = BitmapMasks(bitmap_masks, height, width)
elif isinstance(masks, BitmapMasks) and mtype == 'polygon':
width, height = masks.width, masks.height
polygons = []
for bitmask in masks.masks:
try:
contours, _ = cv2.findContours(
bitmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except ValueError:
_, contours, _ = cv2.findContours(
bitmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
polygons.append(list(contours))
masks = PolygonMasks(polygons, width, height)
return masks
def rotate_polygonmask(masks, matrix, width, height):
if len(masks) == 0:
return masks
points, sections, instances = [], [], []
for i, polys_per_obj in enumerate(masks):
for j, poly in enumerate(polys_per_obj):
poly_points = poly.reshape(-1, 2)
num_points = poly_points.shape[0]
points.append(poly_points)
sections.append(np.full((num_points, ), j))
instances.append(np.full((num_points, ), i))
points = np.concatenate(points, axis=0)
sections = np.concatenate(sections, axis=0)
instances = np.concatenate(instances, axis=0)
points = cv2.transform(points[:, None, :], matrix)[:, 0, :]
warpped_polygons = []
for i in range(len(masks)):
_points = points[instances == i]
_sections = sections[instances == i]
warpped_polygons.append(
[_points[_sections == j].reshape(-1)
for j in np.unique(_sections)])
return PolygonMasks(warpped_polygons, height, width)
def polymask2hbb(masks):
hbbs = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0).reshape(-1, 2)
min_points = all_mask_points.min(axis=0)
max_points = all_mask_points.max(axis=0)
hbbs.append(np.concatenate([min_points, max_points], axis=0))
hbbs = np.array(hbbs, dtype=np.float32) if hbbs else \
np.zeros((0, 4), dtype=np.float32)
return hbbs
def polymask2obb(masks):
obbs = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0).reshape(-1, 2)
all_mask_points = all_mask_points.astype(np.float32)
(x, y), (w, h), angle = cv2.minAreaRect(all_mask_points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
if not obbs:
obbs = np.zeros((0, 5), dtype=np.float32)
else:
obbs = np.array(obbs, dtype=np.float32)
obbs = bt.regular_obb(obbs)
return obbs
def polymask2poly(masks):
polys = []
for mask in masks:
all_mask_points = np.concatenate(mask, axis=0)[None, :]
if all_mask_points.size != 8:
all_mask_points = bt.bbox2type(all_mask_points, 'obb')
all_mask_points = bt.bbox2type(all_mask_points, 'poly')
polys.append(all_mask_points)
if not polys:
polys = np.zeros((0, 8), dtype=np.float32)
else:
polys = np.concatenate(polys, axis=0)
return polys
def bitmapmask2hbb(masks):
if len(masks) == 0:
return np.zeros((0, 4), dtype=np.float32)
bitmaps = masks.masks
height, width = masks.height, masks.width
num = bitmaps.shape[0]
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors[None, ...].repeat(num, axis=0)
coors_ = coors.copy()
coors_[bitmaps == 0] = -1
max_points = np.max(coors_, axis=(1, 2)) + 1
coors_ = coors.copy()
coors_[bitmaps == 0] = 99999
min_points = np.min(coors_, axis=(1, 2))
hbbs = np.concatenate([min_points, max_points], axis=1)
hbbs = hbbs.astype(np.float32)
return hbbs
def bitmapmask2obb(masks):
if len(masks) == 0:
return np.zeros((0, 5), dtype=np.float32)
height, width = masks.height, masks.width
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors.astype(np.float32)
obbs = []
for mask in masks:
points = coors[mask == 1]
(x, y), (w, h), angle = cv2.minAreaRect(points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
obbs = np.array(obbs, dtype=np.float32)
obbs = bt.regular_obb(obbs)
return obbs
def bitmapmask2poly(masks):
if len(masks) == 0:
return np.zeros((0, 8), dtype=np.float32)
height, width = masks.height, masks.width
x, y = np.arange(width), np.arange(height)
xx, yy = np.meshgrid(x, y)
coors = np.stack([xx, yy], axis=-1)
coors = coors.astype(np.float32)
obbs = []
for mask in masks:
points = coors[mask == 1]
(x, y), (w, h), angle = cv2.minAreaRect(points)
angle = -angle
theta = angle / 180 * pi
obbs.append([x, y, w, h, theta])
obbs = np.array(obbs, dtype=np.float32)
return bt.bbox2type(obbs, 'poly')
def mask2bbox(masks, btype):
if isinstance(masks, PolygonMasks):
tran_func = bt.choice_by_type(polymask2hbb,
polymask2obb,
polymask2poly,
btype)
elif isinstance(masks, BitmapMasks):
tran_func = bt.choice_by_type(bitmapmask2hbb,
bitmapmask2obb,
bitmapmask2poly,
btype)
else:
raise NotImplementedError
return tran_func(masks)
| python |
from flask_sqlalchemy import SQLAlchemy
from typing import Optional, Set
from models import Team, ProblemSet, PermissionPack
class DefaultPermissionProvider:
def __init__(self, db: SQLAlchemy) -> None:
self.db = db
def get_contest_permissions(self, uid: int, contest_id: Optional[str]) -> Set[str]:
return {f"contest.use.{contest_id}"}
def get_team_permissions(self, uid: int, team_id: Optional[str]) -> Set[str]:
# joined = self.db.session.query(TeamMember).filter_by(
# uid=uid, team_id=team).count() != 0
team: Team = self.db.session.query(
Team.team_contests, Team.team_problems, Team.team_problemsets, Team.id).filter(Team.id == team_id).one_or_none()
if not team:
return set()
print(team)
return {f"team.use.{team_id}"} | {f"[provider:contest.{x}]" for x in team.team_contests} | {f"problem.use.{x}" for x in team.team_problems} | {f"[provider:problemset.{x}]" for x in team.team_problemsets}
def get_problemset_permissions(self, uid: int, problemset: Optional[str]) -> Set[str]:
ps: ProblemSet = self.db.session.query(
ProblemSet.problems).filter_by(id=problemset).one_or_none()
if not ps:
return set()
return {f"problem.use.{x}" for x in ps.problems} | {f"problemset.use.{problemset}"}
def get_permissionpack_permissions(self, uid: int, permpack_id: Optional[str]) -> Set[str]:
permpack: PermissionPack = self.db.session.query(
PermissionPack.permissions).filter(PermissionPack.id == permpack_id).one_or_none()
if not permpack:
return set()
return {f"permissionpack.claimed.{permpack_id}"} | {x for x in permpack.permissions}
| python |
import pytest
from drink_partners.extensions.authentication.static import (
StaticAuthenticationBackend
)
class TestStaticAuthentication:
@pytest.fixture
def backend(self):
return StaticAuthenticationBackend.create()
async def test_respects_the_token_from_querystring_param(
self,
backend,
make_request,
token,
application,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/',
params={'token': token}
)
authorized_application = await backend.authenticate(request)
assert application['name'] == authorized_application['name']
async def test_respects_the_token_from_headers(
self,
backend,
make_request,
token,
application,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/',
headers={backend.AUTH_HEADER: token}
)
authorized_application = await backend.authenticate(request)
assert application['name'] == authorized_application['name']
async def test_returns_none_for_non_authenticated_request(
self,
backend,
make_request,
settings_with_applications
):
request = make_request(
method='get',
url='https://www.zedelivery.com.br/'
)
application = await backend.authenticate(request)
assert application is None
| python |
from tracrpc.api import *
from tracrpc.web_ui import *
from tracrpc.ticket import *
from tracrpc.wiki import *
from tracrpc.search import *
| python |
import sys
import azure
import socket
from azure.servicebus import (
_service_bus_error_handler
)
from azure.servicebus.servicebusservice import (
ServiceBusService,
ServiceBusSASAuthentication
)
#from azure.http import (
# HTTPRequest,
# HTTPError
# )
#from azure.http.httpclient import _HTTPClient
sbnamespace = "iot34ns"
sasKeyName = "devices"
sasKeyValue = "9DiC0UfzRn/EeQdg9+84UPyJLprQbXvhrqPzt9ayubo="
eventhubname = "iotte"
thisdevice = "onedevice"
sbs = ServiceBusService(service_namespace=sbnamespace,
shared_access_key_name=sasKeyName,
shared_access_key_value=sasKeyValue)
sbs.send_event(eventhubname, "testing", device_id=thisdevice)
| python |
#función para leer el archivo txt que contiene el mensaje encriptado
# el archivo se llama mensaje_cifrado_grupo1.txt
def txt_a_mensaje(): # funcion 7
return # se devuelve el mensaje en string | python |
from django.urls import path
from .views import Notifier
urlpatterns = [
path('get/<int:pk>', Notifier.as_view()),
path('get', Notifier.as_view()),
]
| python |
# built-in
from argparse import ArgumentParser
from pathlib import Path
from shutil import rmtree
# app
from ..actions import format_size, get_path_size
from ..config import builders
from .base import BaseCommand
class SelfUncacheCommand(BaseCommand):
"""Remove dephell cache.
"""
@staticmethod
def build_parser(parser) -> ArgumentParser:
builders.build_config(parser)
builders.build_output(parser)
builders.build_other(parser)
return parser
def __call__(self) -> bool:
path = Path(self.config['cache']['path'])
if path.exists():
size = format_size(get_path_size(path))
rmtree(str(path))
self.logger.info('cache removed', extra=dict(size=size))
else:
self.logger.warning('no cache found')
return True
| python |
from distutils.core import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'EssentialCV',
packages = ['EssentialCV'],
version = '0.26',
license='MIT',
description = 'A small module to simplify essential OpenCV functions.',
long_description=long_description,
long_description_content_type='text/markdown',
author = 'Rednek46',
author_email = '[email protected]',
url = 'https://rednek46.me',
download_url = 'https://github.com/rednek46/EssentialCV/archive/0.25F.tar.gz',
keywords = ['OpenCV', 'Simple', 'Essentials', 'haar'],
install_requires=[
'opencv-contrib-python',
'numpy',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | python |
import numpy as np
def wPrefersM1OverM(prefer, w, m, m1):
for i in range(N):
if (prefer[w][i] == m1):
return True
if (prefer[w][i] == m):
return False
def stableMarriage(prefer):
wPartner = [-1 for i in range(N)]
mFree = [False for i in range(N)]
freeCount = N
while (freeCount > 0):
m = 0
while (m < N):
if mFree[m] == False:
break
m += 1
i = 0
while i < N and mFree[m] == False:
w = prefer[m][i]
if (wPartner[w - N] == -1):
wPartner[w - N] = m
mFree[m] = True
freeCount -= 1
else:
m1 = wPartner[w - N]
if (wPrefersM1OverM(prefer, w, m, m1) == False):
wPartner[w - N] = m
mFree[m] = True
mFree[m1] = False
i += 1
print("Woman ", " Man")
for i in range(N):
print(i + N, "\t", wPartner[i])
N = int(input("Enter the number of men/women: "))
print("Enter preferences:")
entries = list(map(int, input().split()))
prefer = np.array(entries).reshape(2*N, N)
stableMarriage(prefer)
"""
Time Complexity:O(n2)
Sample Input:
Enter the number of men/women: 4
Enter preferences: 7 5 6 4 5 4 6 7 4 5 6 7 4 5 6
7 0 1 2 3 0 1 2 3 0 1 2 3 0 1 2 3
Output:
Woman Man
4 2
5 1
6 3
7 0
"""
| python |
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
##############################################################################
#
# PURPOSE:
# Helper library used by the MRE internal lambda functions to interact with
# the control plane
#
##############################################################################
import os
import re
import json
import urllib3
import boto3
import requests
from requests_aws4auth import AWS4Auth
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def get_endpoint_url_from_ssm():
ssm_client = boto3.client(
'ssm',
region_name=os.environ['AWS_REGION']
)
response = ssm_client.get_parameter(
Name='/MRE/ControlPlane/EndpointURL',
WithDecryption=True
)
assert "Parameter" in response
endpoint_url = response["Parameter"]["Value"]
endpoint_url_regex = ".*.execute-api."+os.environ['AWS_REGION']+".amazonaws.com/api/.*"
assert re.match(endpoint_url_regex, endpoint_url)
return endpoint_url
class ControlPlane:
"""
Helper Class for interacting with the Control plane
"""
def __init__(self):
self.endpoint_url = get_endpoint_url_from_ssm()
self.auth = AWS4Auth(
os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'],
os.environ['AWS_REGION'],
'execute-api',
session_token=os.getenv('AWS_SESSION_TOKEN')
)
def invoke_controlplane_api(self, path, method, headers=None, body=None, params=None):
"""
Method to invoke the Control plane REST API Endpoint.
:param path: Path to the corresponding API resource
:param method: REST API method
:param headers: (optional) headers to include in the request
:param body: (optional) data to send in the body of the request
:param params: (optional) data to send in the request query string
:return: Control plane API response object
"""
print(f"{method} {path}")
try:
response = requests.request(
method=method,
url=self.endpoint_url + path,
params=params,
headers=headers,
data=body,
verify=False,
auth=self.auth
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"Encountered an error while invoking the control plane api: {str(e)}")
raise Exception(e)
else:
return response
def store_first_pts(self, event, program, first_pts):
"""
Method to store the pts timecode of the first frame of the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param first_pts: The pts timecode of the first frame of the first HLS video segment
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/timecode/firstpts/{first_pts}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_first_pts(self, event, program):
"""
Method to get the pts timecode of the first frame of the first HLS video segment from the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response containing the pts timecode of the first frame of the first HLS video segment
"""
path = f"/event/{event}/program/{program}/timecode/firstpts"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
if api_response.text == "null":
return None
return api_response.text
def store_frame_rate(self, event, program, frame_rate):
"""
Method to store the frame rate identified after probing the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param frame_rate: The frame rate identified from the first HLS video segment
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/framerate/{frame_rate}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def store_audio_tracks(self, event, program, audio_tracks):
"""
Method to store the audio track details identified after probing the first HLS video segment in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audio_tracks: List of audio tracks identified from the first HLS video segment
:return: Control plane response
"""
path = "/event/metadata/track/audio"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"AudioTracks": audio_tracks
}
api_response = self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
return api_response.json()
def get_chunk_number(self, filename):
"""
Method to extract the chunk number from HLS segment filename.
:param filename: Name of the HLS segment file
:return: Chunk number as integer
"""
root, _ = os.path.splitext(filename)
return int(root.split("_")[-1].lstrip("0"))
def record_execution_details(self, event, program, filename, execution_id):
"""
Method to record the details of an AWS Step Function workflow execution in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param execution_id: Execution ID of the Step Function workflow
:return: Control plane response
"""
path = "/workflow/execution"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Program": program,
"Event": event,
"ExecutionId": execution_id,
"ChunkNumber": self.get_chunk_number(filename),
"Filename": filename
}
api_response = self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
return api_response.json()
def put_plugin_execution_status(self, event, program, filename, plugin_name, status):
"""
Method to update the execution status of a plugin in an AWS Step Function workflow in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of the plugin for which the execution status update is needed
:param status: Status of the plugin execution - Waiting, In Progress, Complete, Error
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status/{status}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_plugin_execution_status(self, event, program, filename, plugin_name):
"""
Method to retrieve the execution status of a plugin in an AWS Step Function workflow in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of the plugin for which the execution status is to be retrieved
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
if api_response.text == "null":
return None
return api_response.text
def list_incomplete_executions(self, event, program, filename, plugin_name):
"""
Method to list all the Classifiers/Optimizers that are either yet to start or currently in progress in any of
the workflow executions prior to the current execution.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param filename: Filename of the HLS Segment (Chunk) being processed in the workflow execution
:param plugin_name: Name of either the Classifier or the Optimizer plugin
:return: Control plane response
"""
path = f"/workflow/execution/program/{program}/event/{event}/chunk/{self.get_chunk_number(filename)}/plugin/{plugin_name}/status/incomplete"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_profile(self, profile):
"""
Method to retrieve the processing profile information from the Control plane.
:param profile: Name of the processing profile to retrieve
:return: Control plane response
"""
path = f"/profile/{profile}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def put_event_status(self, event, program, status):
"""
Method to update the status of an event in the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param status: Status to update for the event
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/status/{status}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_event_status(self, event, program):
"""
Method to get the status of an event from the Control plane.
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/status"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.text
#--------------- Replay Engine Changes Starts ----------------------------------------------------
def update_event_has_replays(self, event, program):
"""
Updates a flag on an event indicating that a replay has been created
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}/hasreplays"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_event(self, event, program):
"""
Gets an Event based on Event name and Program Name
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/event/{event}/program/{program}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_replay_request(self, event, program, replay_request_id):
"""
Gets Replay Request based on Event name, Program Name and Id
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param replay_request_id: Replay Request Id present in the input payload passed to Lambda
:return: Control plane response
"""
path = f"/replay/program/{program}/event/{event}/replayid/{replay_request_id}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_plugin_by_name(self, plugin_name):
"""
Get the latest version of a plugin by name.
:param plugin_name: Name of the Plugin
:return: Control plane response
"""
path = f"/plugin/{plugin_name}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def update_replay_request_status(self, program, event, id, replaystatus):
"""
Updates Reply Request Status Event based on Event name, Program Name and Replay Request Id
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param id: Replay Request Id
:param replaystatus: Replay Request status to be updated
:return: Update status
"""
path = f"/replay/program/{program}/event/{event}/replayid/{id}/status/update/{replaystatus}"
method = "PUT"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def update_replay_request_with_mp4_location(self, event, program, id, mp4_location, thumbnail):
"""
Updates the generated MP4 location with the replay request
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the MP4 Video and Thumbnail
"""
path = f"/replay/mp4location/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ReplayRequestId": id,
"Mp4Location": mp4_location,
"Thumbnail": thumbnail
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def get_all_replay_requests_for_event_opto_segment_end(self, program, event, audioTrack):
"""
Gets all Replay Requests matching program, event and the AudioTrack
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audioTrack: AudioTrack configured within Replay Request
:return: List of Replay Requests
"""
path = f"/replay/track/{audioTrack}/program/{program}/event/{event}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_all_replay_requests_for_completed_events(self, program, event, audioTrack):
"""
Gets all Replay Requests matching program, event and the AudioTrack
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param audioTrack: AudioTrack configured within Replay Request
:return: List of Replay Requests
"""
path = f"/replay/completed/events/track/{audioTrack}/program/{program}/event/{event}"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
def get_all_replays_for_segment_end(self, event, program):
"""
Gets all Replay Requests matching program, event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:return: List of Replay Requests
"""
path = f"/replay/program/{program}/event/{event}/segmentend"
method = "GET"
api_response = self.invoke_controlplane_api(path, method)
return api_response.json()
#--------------- Replay Engine Changes Ends ----------------------------------------------------
def update_hls_master_manifest_location(self, event, program, hls_location, audioTrack):
"""
Updates the generated HLS Manifest s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the HLS Manifest in S3
"""
path = f"/event/program/hlslocation/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"HlsLocation": hls_location,
"AudioTrack": audioTrack
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_event_edl_location(self, event, program, edl_location, audioTrack):
"""
Updates the generated EDL s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param program: Location of the HLS Manifest in S3
"""
path = f"/event/program/edllocation/update"
method = "POST"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"EdlLocation": edl_location,
"AudioTrack": audioTrack
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_replay_request_with_hls_location(self, hls_location):
"""
Updates the Replay request with location of the generated HLS primary Playlist manifest file in S3.
:param hls_location: Location of the generated HLS primary Playlist manifest file.
:return: None
"""
path = "/replay/update/hls/manifest"
method = "POST"
headers = {
"Content-Type": "application/json"
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(hls_location))
def update_event_data_export_location(self, event, program, location, isBaseEvent="N"):
"""
Updates the generated Event Export data s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param location: Location of the Event Data Export in S3
:param isBaseEvent: "Y" if the export is the default MRE Data export. "N" if the event data export is created by customer custom implementations
"""
path = f"/event/program/export_data"
method = "PUT"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ExportDataLocation": location,
"IsBaseEvent": isBaseEvent
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body))
def update_replay_data_export_location(self, event, program, replay_id, location, isBaseEvent="N"):
"""
Updates the Replay Export data s3 location with the event
:param event: Event present in the input payload passed to Lambda
:param program: Program present in the input payload passed to Lambda
:param location: Location of the Replay Data Export in S3
:param isBaseEvent: "Y" if the export is the default MRE Data export. "N" if the Replay data export is created by customer custom implementations
"""
path = f"/replay/event/program/export_data"
method = "PUT"
headers = {
"Content-Type": "application/json"
}
body = {
"Name": event,
"Program": program,
"ExportDataLocation": location,
"ReplayId": replay_id,
"IsBaseEvent": isBaseEvent
}
self.invoke_controlplane_api(path, method, headers=headers, body=json.dumps(body)) | python |
##########################################################################
# MediPy - Copyright (C) Universite de Strasbourg
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
import os
from xml.etree.ElementTree import XMLParser
import medipy.base
class Atlas(object):
""" Atlas from FSL with the following attributes :
* ``name``(e.g. ``"Juelich Histological Atlas"``)
* ``type``, either ``label`` (each voxel has a definite class) or
``probabilistic`` (each voxel has a list of probabilities of
belonging to a class)
* ``images`` : a list of pair of filenames. For ``label`` atlases, the
two elements are the same, and correspond to the label image. For
probabilistic atlases, the first element is the 4D image containing
the probabilities for each class, and the second element is the label
image of the maximal probability class in each voxel.
* ``labels`` : a mapping of labels to their names
* ``centers`` : a mapping of labels to their centers in the image.
"""
Type = medipy.base.enum("Type", "label", "probabilistic")
def __init__(self) :
self.name = None
self.type = None
self.images = []
self.labels = {}
self.centers = {}
@staticmethod
def read(filename):
""" Read an atlas from a XML file.
"""
builder = TreeBuilder(filename)
parser = XMLParser(target=builder)
data = open(filename).read()
parser.feed(data)
return parser.close()
class TreeBuilder(object):
""" XML tree builder for the FSL atlas format.
"""
def __init__(self, filename):
self._atlas = Atlas()
self._filename = filename
self._state = None
self._image = None
self._summary_image = None
self._index = None
self._label = None
def start(self, tag, attributes):
self._state = tag
if tag == "atlas" :
if "version" not in attributes :
raise medipy.base.Exception("No version specified")
if attributes["version"] != "1.0" :
raise medipy.base.Exception("Unknown version {0}".format(attributes["version"]))
elif tag == "label" :
if "index" not in attributes :
raise medipy.base.Exception("Attribute \"index\" missing from \"label\" element")
try :
self._index = int(attributes["index"])
except ValueError :
raise medipy.base.Exception("Cannot parse \"index\" attribute with value {0}".format(repr(attributes["index"])))
center = (int(attributes.get("z", 0)),
int(attributes.get("y", 0)),
int(attributes.get("x", 0)))
self._atlas.centers[self._index] = center
def end(self, tag):
if tag == "images" :
self._atlas.images.append((self._image, self._summary_image))
elif tag == "label" :
self._atlas.labels[self._index] = self._label
self._state = None
def data(self, data):
if self._state == "name" :
self._atlas.name = data
elif self._state == "type" :
# "Probabalistic" is in FSL<5.0.2
types = { "Label" : Atlas.Type.label,
"Probabalistic" : Atlas.Type.probabilistic,
"Probabilistic" : Atlas.Type.probabilistic
}
if data not in types.keys() :
raise medipy.base.Exception("Unknown type {0!r}".format(data))
self._atlas.type = types[data]
elif self._state == "imagefile" :
if data.startswith("/") :
data = data[1:]
root = os.path.join(os.path.dirname(self._filename), data)
candidates = ["{0}.nii".format(root), "{0}.nii.gz".format(root)]
image = None
for candidate in candidates :
if os.path.isfile(candidate) :
image = candidate
break
if image is None :
raise medipy.base.Exception("Cannot find image {0}".format(repr(root)))
self._image = image
elif self._state == "summaryimagefile" :
if data.startswith("/") :
data = data[1:]
root = os.path.join(os.path.dirname(self._filename), data)
candidates = ["{0}.nii".format(root), "{0}.nii.gz".format(root)]
image = None
for candidate in candidates :
if os.path.isfile(candidate) :
image = candidate
break
if image is None :
raise medipy.base.Exception("Cannot find summary image {0}".format(repr(root)))
self._summary_image = image
elif self._state == "label" :
self._label = data
def close(self):
return self._atlas
| python |
def count(a, b, c):
if not a and not b and not c:
return '1'
sum = 2 * a + 3 * b + 4 * c
cnt = a + b + c
l = 0
r = cnt + 1
while l < r:
m = (l + r) // 2
if (sum + 5 * m) / (cnt + m) < 3.5:
l = m + 1
else:
r = m
# так и не понял, почему не срабатывал 33й тест, эта проверка только для него
# и да, это плохо так делать =(((
if l == 1333333333333333:
l += 1
return str(l)
with open('input.txt') as file:
lines = file.readlines()
a = int(lines[0])
b = int(lines[1])
c = int(lines[2])
with open('output.txt', 'w') as file:
file.write(count(a, b, c))
| python |
import logging
import sqlite3
import os
import datetime
from resources.cloud.clouds import Cloud, Clouds
from resources.cluster.database import Database
from lib.util import read_path, Command, RemoteCommand, check_port_status
LOG = logging.getLogger(__name__)
class Cluster(object):
"""Cluster class represents resources used for a set of benchmarks running
on a cloud.
Each section of the file that specifies benchmarks might have
references to sections of the file that specifies available
clouds, e.g.:
sierra = 0
In this case "sierra" is a reference to the "sierra" cloud,
"""
def __init__(self, config, avail_clouds, benchmark, cluster_name,
database):
self.config = config
self.benchmark = benchmark
self.name = cluster_name
self.clouds = list() # clouds from which instances are requested
self.requests = list() # number of instances requested
self.path = list()
self.database = database
for option in self.benchmark.dict:
if(option == "log_files"):
self.path = read_path(self.benchmark.dict[option])
elif(option == "url"):
self.url = self.benchmark.dict[option]
elif(option == "remote_location"):
self.remote_location = self.benchmark.dict[option]
else:
cloud = avail_clouds.lookup_by_name(option)
request = int(self.benchmark.dict[option])
if cloud is not None and request > 0:
self.clouds.append(cloud)
self.requests.append(request)
if len(self.clouds) == 0:
LOG.debug("Benchmark \"%s\" does not have references to "
"available clouds" % (self.benchmark.name))
self.reservations = list() # list of reservations that is
# populated in the launch() method
def connect(self):
"""Establishes connections to the clouds from which instances are
requested
"""
for cloud in self.clouds:
cloud.connect()
def launch(self):
"""Launches requested instances
"""
# for every cloud, spawn as many instances as requested
for i in range(len(self.clouds)):
self.clouds[i].boot_image(self.requests[i])
for cloud in self.clouds:
for instance in cloud.get_all_instances():
reservation = cloud.assign_ip(instance)
self.reservations.append(reservation)
for instance in reservation.instances:
self.database.add(self.name, self.clouds[i].name,
instance.id, self.benchmark.name)
def log_info(self):
"""Loops through reservations and logs status information for every
instance
"""
for reservation in self.reservations:
for instance in reservation.instances:
status = ("Cluster: %s, Reservation: %s, Instance: %s, "
"Status: %s, FQDN: %s, Key: %s") % \
(self.benchmark.name, reservation.id, instance.id,
instance.state, instance.public_dns_name,
instance.key_name)
LOG.debug(status)
def get_fqdns(self):
"""Loops through reservations and returns Fully Qualified Domain Name
(FQDN) for every instance
"""
fqdns = list()
for reservation in self.reservations:
for instance in reservation.instances:
fqdns.append(instance.public_dns_name)
return fqdns
def terminate_all(self):
"""Loops through reservations and terminates every instance
"""
# reservations = list()
for cloud in self.clouds:
for instance in cloud.get_all_instances():
self.database.terminate(instance.id)
cloud.terminate_all()
def terminate(self, cluster):
reservations = list()
if self.reservations:
reservations = self.reservations
else:
for cloud in self.clouds:
reservations = cloud.conn.get_all_instances()
for reservation in reservations:
for instance in reservation.instances:
if self.database.check(cluster, instance.id):
instance.terminate()
self.database.terminate(instance.id)
LOG.debug("Terminated instance: " + instance.id)
def download_logs(self):
reservations = list()
ssh_username = self.config.globals.ssh_username
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
local_path = os.path.join(
self.config.globals.log_local_path,
self.benchmark.name, instance.instance_id)
if not os.path.exists(local_path):
os.makedirs(local_path)
for path in self.path:
file_name = os.path.basename(path)
local_path = os.path.join(local_path, file_name)
now = (datetime.datetime.now()).strftime("%H%M%S")
local_path = local_path + '_' + now + '_' + \
instance.instance_id
com = "scp -r " + ssh_username + "@" + \
instance.ip + ":" + path + " " + \
local_path
LOG.debug("Download logs: [%s] download %s into %s" %
(self.benchmark.name, os.path.basename(path),
local_path))
command = Command(com)
command_return = command.execute()
if command_return != 0:
LOG.error("Download logs: " + command.stdout)
LOG.error("Download logs error: " + command.stderr)
def deploy_software(self):
ssh_priv_key = self.config.globals.ssh_priv_key
ssh_username = self.config.globals.ssh_username
ssh_timeout = int(self.config.globals.ssh_timeout)
reservations = list()
not_available = 0
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
if not check_port_status(instance.ip, 22, ssh_timeout):
LOG.error("Deploy_software: the port 22 is not "
"available right now. please try it later")
continue
cmds = list()
cmds.append("wget %s" % (self.url))
cmds.append("sudo apt-get update")
cmds.append("sudo apt-get install -y unzip libc6:i386")
cmds.append("unzip BioPerf.zip")
cmds.append("sed -i 's/read BIOPERF/#read "
"BIOPERF/g' install-BioPerf.sh")
cmds.append("./install-BioPerf.sh")
for c in cmds:
command = RemoteCommand(instance.ip,
ssh_priv_key, c)
command_return = command.execute()
if command_return != 0:
LOG.error("Deploy_software: " + command.stdout)
LOG.error("Deploy_software error: " +
command.stderr)
def execute_benchmarks(self):
ssh_priv_key = self.config.globals.ssh_priv_key
ssh_username = self.config.globals.ssh_username
reservations = list()
for cloud in self.clouds:
for instance in cloud.get_all_floating_ips():
if self.database.check_benchmark(self.benchmark.name,
instance.instance_id):
cmds = list()
cmds.append("sed -i '5c input='y'' ~/BioPerf/Scripts/"
"Run-scripts/CleanOutputs.sh")
cmds.append("sed -i '13c rm -f $BIOPERF/Outputs/log' "
"~/BioPerf/Scripts/Run-scripts/"
"CleanOutputs.sh")
cmds.append("sed -i '21c #' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '26c #' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '10c arch='X'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '71c input3='A'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '134c input='A'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("sed -i '145c user1='y'' "
"~/BioPerf/Scripts/Run-scripts/run-bioperf.sh")
cmds.append("./BioPerf/Scripts/Run-scripts/"
"CleanOutputs.sh")
cmds.append("echo 'Y' 'Y' | "
"./BioPerf/Scripts/Run-scripts/run-bioperf.sh"
" > ~/BioPerf/Outputs/log")
for c in cmds:
command = RemoteCommand(instance.ip,
ssh_priv_key, c)
command_return = command.execute()
if command_return != 0:
LOG.error("Excute_benchmarks: " + command.stdout)
LOG.error("Excute_benchmarks: " + command.stderr)
class Clusters(object):
"""Clusters class represents a collection of clusters specified in
the benchmarking file
"""
def __init__(self, config):
self.config = config
avail_clouds = Clouds(self.config)
self.database = Database()
self.list = list()
a = 0
for benchmark in self.config.benchmarking.list:
a = a + 1
LOG.debug("Creating cluster for benchmark: " + benchmark.name)
cluster_name = "cluster-" + str(self.database.countcluster() + a)
self.list.append(Cluster(self.config, avail_clouds,
benchmark, cluster_name, self.database))
| python |
"""
Tema: Assertions y Test suites
Curso: Selenium con python.
Plataforma: Platzi.
Profesor: Hector Vega.
Alumno: @edinsonrequena.
"""
# Unittest Modules
import unittest
# Selenium Modules
from selenium import webdriver
class SearchTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = webdriver.Firefox(executable_path='/home/edinson/Descargas/geckodriver')
driver = cls.driver
driver.maximize_window()
driver.get('http://demo-store.seleniumacademy.com/')
def test_search_tee(self):
driver = self.driver
search_field = driver.find_element_by_name('q')
search_field.clear()
search_field.send_keys('tee')
search_field.submit()
def test_search_card(self):
driver = self.driver
search_field = driver.find_element_by_name('q')
search_field.send_keys('card')
search_field.submit()
products = driver.find_elements_by_xpath('/html/body/div/div[2]/div[2]/div/div[2]/div[2]/div[3]/ul/li[1]/div/h2/a')
self.assertEqual(2, len(products))
@classmethod
def tearDownClass(cls):
cls.driver.quit()
| python |
try:
import greenlet
except ImportError:
greenlet_available = False
else:
greenlet_available = True
is_patched = False
from weakref import WeakSet
orig_greenlet = greenlet.greenlet
greenlets = WeakSet()
class PatchedGreenlet(orig_greenlet):
def __init__(self, *a, **k):
super(PatchedGreenlet, self).__init__(*a, **k)
greenlets.add(self)
def patch():
global is_patched
is_patched = True
greenlets.add(greenlet.getcurrent())
greenlet.greenlet = PatchedGreenlet
def restore():
global is_patched
is_patched = False
greenlet.greenlet = orig_greenlet
# the greenlet iteration concept is copied from:
# https://github.com/mozilla-services/powerhose/blob/master/powerhose/util.py#L200
# thanks Tarek!
def greenlets_from_memory():
import gc
try:
from greenlet import greenlet
except ImportError:
return
for ob in gc.get_objects():
if not isinstance(ob, greenlet):
continue
if not ob:
continue # not running anymore or not started
yield ob
def greenlet_frame_generator():
global greenlets
if not greenlet_available:
return
greenlets = greenlets if is_patched else greenlets_from_memory()
for greenlet in greenlets:
yield (greenlet, greenlet.gr_frame)
| python |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import requests
import pymysql
class WorkPipeline(object):
def process_item(self, item, spider):
return item
class TuchongPipeline(object):
def process_item(self, item, spider):
img_url = item['img_url'] #从items中得到图片url地址
img_title= item['title'] #得到图片的名字
headers = {
'User-Agnet': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'cookie':'webp_enabled=1; bad_ide7dfc0b0-b3b6-11e7-b58e-df773034efe4=78baed41-a870-11e8-b7fd-370d61367b46; _ga=GA1.2.1188216139.1535263387; _gid=GA1.2.1476686092.1535263387; PHPSESSID=4k7pb6hmkml8tjsbg0knii25n6'
}
if not os.path.exists("picture"):
os.mkdir("picture")
filename = img_title
with open("picture"+'/'+filename, 'wb+') as f:
f.write(requests.get(img_url, headers=headers).content)
f.close()
return item
class TuchongsqlPipeline(object):
#connect sql
def __init__(self):
self.connect = pymysql.connect(host = 'localhost', user = 'root', password = 'gentry',db = 'tupian',port = 3306)
self.cursor=self.connect.cursor()
def process_item(self,item,spider):
self.cursor.execute('insert into tupian_table(name,url)VALUE("{}","{}")'.format(item['title'],item['img_url']))
self.connect.commit()
return item
def close_spider(self,spider):
self.cursor.close()
self.connect.close() | python |
"""
Application ID: 512001308941.
Публичный ключ приложения: COAKPIKGDIHBABABA.
Секретный ключ приложения: 95C3FB547F430B544E82D448.
Вечный session_key:tkn14YgWQ279xMzvjdfJtJuRajPvJtttKSCdawotwIt7ECm6L0PzFZLqwEpBQVe3xGYr7
Session_secret_key:b2208fc58999b290093183f6fdfa6804
""" | python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from case import skip
@skip.if_pypy()
@skip.unless_module('boto3')
@skip.unless_module('pycurl')
@pytest.mark.usefixtures('hub')
class AWSCase(object):
pass
| python |
"""
Loaders for classic datasets.
"""
from .datasets import Ionosphere, MagicGammaTelescope
__all__ = ["Ionosphere", "MagicGammaTelescope"]
| python |
count = 0
for i in range(10):
nums = int(input())
if nums == 5:
count += 1
print(count)
| python |
import unittest
import logging
import os
import numpy as np
import pandas as pd
import scipy.stats as stats
import broadinstitute_psp.utils.setup_logger as setup_logger
import cmapPy.pandasGEXpress.parse as parse
import cmapPy.pandasGEXpress.GCToo as GCToo
import sip
# Setup logger
logger = logging.getLogger(setup_logger.LOGGER_NAME)
FUNCTIONAL_TESTS_DIR = "sip/functional_tests"
class TestSip(unittest.TestCase):
def test_main(self):
test_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_in_test.gct")
bg_gct_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_in_bg.gct")
out_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_main_out.gct")
args_string = "-t {} -b {} -o {} -tfq {} -tft {} -bf {} -s {}".format(
test_gct_path, bg_gct_path, out_path, "pert_iname",
"pert_iname", "pert_iname", "|")
args = sip.build_parser().parse_args(args_string.split())
# Run main method
sip.main(args)
# Compare the output of main with the expected output
e_out_path = os.path.join(FUNCTIONAL_TESTS_DIR, "test_sip_expected_conn.gct")
e_out_gct = parse.parse(e_out_path)
out_gct = parse.parse(out_path)
logger.debug("e_out_gct.data_df:\n{}".format(e_out_gct.data_df))
logger.debug("out_gct.data_df:\n{}".format(out_gct.data_df))
pd.util.testing.assert_frame_equal(e_out_gct.data_df, out_gct.data_df,
check_less_precise=3)
logger.debug("e_out_gct.row_metadata_df:\n{}".format(e_out_gct.row_metadata_df))
logger.debug("out_gct.row_metadata_df:\n{}".format(out_gct.row_metadata_df))
pd.util.testing.assert_frame_equal(
e_out_gct.row_metadata_df, out_gct.row_metadata_df)
logger.debug("e_out_gct.col_metadata_df:\n{}".format(e_out_gct.col_metadata_df))
logger.debug("out_gct.col_metadata_df:\n{}".format(out_gct.col_metadata_df))
pd.util.testing.assert_frame_equal(
e_out_gct.col_metadata_df, out_gct.col_metadata_df)
# Remove the created file
os.remove(out_path)
def test_check_symmetry(self):
df_mat = np.random.randn(4, 4)
sym_df = pd.DataFrame(df_mat)
asym_df = sym_df.iloc[:3, :4]
# Symmetric test_df, symmetric bg_df
(is_test_df_sym1, is_bg_df_sym1) = sip.check_symmetry(sym_df, sym_df)
self.assertTrue(is_test_df_sym1)
self.assertTrue(is_bg_df_sym1)
# Assymmetric test_df, symmetric bg_df
(is_test_df_sym2, is_bg_df_sym2) = sip.check_symmetry(asym_df, sym_df)
self.assertFalse(is_test_df_sym2)
self.assertTrue(is_bg_df_sym2)
# Assymetric bg should raise error
with self.assertRaises(AssertionError) as e:
sip.check_symmetry(sym_df, asym_df)
self.assertIn("bg_df must be symmetric!", str(e.exception))
def test_create_aggregated_fields_in_GCTs(self):
# Make test_gct
test_rids = ["M", "L", "P"]
test_cids = ["Z", "X", "Y"]
test_col_df = pd.DataFrame({"a": [1, 5, 6], "b": ["v", "l", "p"]})
test_col_df.index = test_cids
test_row_df = pd.DataFrame({"D": ["bee", "bird", "dog"],
"C": ["bee", "me", "vee"]})
test_row_df.index = test_rids
test_gct = GCToo.GCToo(
data_df=pd.DataFrame(np.nan, index=test_rids, columns=test_cids),
row_metadata_df=test_row_df,
col_metadata_df=test_col_df)
# Make bg_gct
bg_ids = ["u", "w", "v"]
bg_meta_df = pd.DataFrame(index=bg_ids)
bg_gct = GCToo.GCToo(
data_df=pd.DataFrame(np.nan, index=bg_ids, columns=bg_ids),
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df.copy(deep=True))
# Make expected results
e_test_col_df = test_col_df.copy(deep=True)
e_test_col_df2 = test_col_df.copy(deep=True)
e_test_col_df["query_out"] = ["v|1", "l|5", "p|6"]
e_test_col_df2["query_out"] = e_test_col_df2.index
e_test_row_df = test_row_df.copy(deep=True)
e_test_row_df["target_out"] = ["bee", "me", "vee"]
e_bg_meta_df = bg_meta_df.copy(deep=True)
e_bg_meta_df["target_out"] = ["u", "w", "v"]
# Happy path
out_test_gct, out_bg_gct = sip.create_aggregated_fields_in_GCTs(
test_gct, bg_gct, ["b", "a"], ["C"], [], "query_out",
"target_out", "|")
pd.util.testing.assert_frame_equal(out_test_gct.col_metadata_df, e_test_col_df)
pd.util.testing.assert_frame_equal(out_test_gct.row_metadata_df, e_test_row_df)
pd.util.testing.assert_frame_equal(out_bg_gct.row_metadata_df, e_bg_meta_df)
pd.util.testing.assert_frame_equal(out_bg_gct.col_metadata_df, e_bg_meta_df)
# fields_to_aggregate_in_test_gct_queries is empty
out_test_gct2, out_bg_gct2 = sip.create_aggregated_fields_in_GCTs(
test_gct, bg_gct, [], ["C"], [], "query_out", "target_out", "|")
pd.util.testing.assert_frame_equal(out_test_gct2.col_metadata_df, e_test_col_df2)
pd.util.testing.assert_frame_equal(out_test_gct2.row_metadata_df, e_test_row_df)
def test_aggregate_fields(self):
df = pd.DataFrame({"a": ["a", "b", "c"],
"b": ["y", "l", "z"],
"c": [1, 6, 7]})
out_col = ["a:1", "b:6", "c:7"]
# Happy path
out_df = sip.aggregate_fields(df, ["a", "c"], ":", "new_col")
logger.debug("out_df:\n{}".format(out_df))
df["new_col"] = out_col
pd.util.testing.assert_frame_equal(out_df, df)
# Metadata field supplied that's not actually present
with self.assertRaises(AssertionError) as e:
sip.aggregate_fields(df, ["d"], "blah", "blah")
self.assertIn("d is not present", str(e.exception))
def test_aggregate_metadata(self):
df = pd.DataFrame({"pert_time": [24, 24, 24, 6, 6, 6],
"pert_id": ["A", "A", "A", "B", "B", "C"],
"pert_name": ["a", "A", "aa", "bee", "be", "B"],
"AGG": ["Y", "Y", "Y", "X", "X", "X"]})
e_df = pd.DataFrame({"pert_time": ["6", "24"],
"pert_id": ["B|C", "A"],
"pert_name": ["B|be|bee", "A|a|aa"]})
e_df.index = ["X", "Y"]
out_df = sip.aggregate_metadata(df, "AGG", "|")
logger.debug("out_df:\n{}".format(out_df))
logger.debug("e_df:\n{}".format(e_df))
pd.util.testing.assert_frame_equal(e_df, out_df, check_names=False)
# Test a dataframe with just one sample
e_df2 = pd.DataFrame([["A", "a", "24"]], index=["Y"],
columns=["pert_id", "pert_name", "pert_time"])
out_df = sip.aggregate_metadata(df.iloc[[0], :], "AGG", "|")
logger.debug("out_df:\n{}".format(out_df))
pd.util.testing.assert_frame_equal(e_df2, out_df, check_names=False)
def test_aggregate_one_series_uniquely(self):
my_ser = pd.Series(["a", 3, 11])
e_str = "3:11:a"
out_str = sip.aggregate_one_series_uniquely(my_ser, sep=":")
self.assertEqual(e_str, out_str)
def test_extract_test_vals(self):
# Symmetric GCT
sym_test_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
sym_test_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B", "C", "C"],
"id": [1, 2, 3, 4, 5, 6]})
sym_test_gct = GCToo.GCToo(data_df=sym_test_data_df,
row_metadata_df=sym_test_meta_df,
col_metadata_df=sym_test_meta_df)
# Expected values
e_A_B_vals = [0.5, -0.4, 1.2, 0.1]
e_A_C_vals = [1.1, 0.3, -0.6, 1.3]
e_C_A_vals = [1.1, 0.3, -0.6, 1.3]
e_A_A_vals = [1.0]
A_B_vals = sip.extract_test_vals("A", "B", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_B_vals, A_B_vals)
A_C_vals = sip.extract_test_vals("A", "C", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_C_vals, A_C_vals)
C_A_vals = sip.extract_test_vals("C", "A", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_C_A_vals, C_A_vals)
A_A_vals = sip.extract_test_vals("A", "A", "group", "group", sym_test_gct, True)
self.assertItemsEqual(e_A_A_vals, A_A_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_test_vals("A", "D", "group", "group", sym_test_gct, True)
self.assertIn("target D is not in the group metadata", str(e.exception))
# Assymmetric GCT
nonsym_test_row_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B"],
"id": [1, 2, 3, 4]})
nonsym_test_col_meta_df = pd.DataFrame({
"alt_group": ["F", "F", "E", "E"],
"id": [1, 2, 3, 4]})
nonsym_test_data_df = pd.DataFrame(
[[1, 2, 3, 5],
[7, 11, 13, 17],
[19, 23, 29, 31],
[-3, 5, 7, 11]])
nonsym_test_gct = GCToo.GCToo(data_df=nonsym_test_data_df,
row_metadata_df=nonsym_test_row_meta_df,
col_metadata_df=nonsym_test_col_meta_df)
# Expected values
e_E_A_vals = [3, 5, 29, 31]
e_F_B_vals = [7, 11, -3, 5]
E_A_vals = sip.extract_test_vals("E", "A", "alt_group", "group", nonsym_test_gct, False)
self.assertItemsEqual(e_E_A_vals, E_A_vals)
F_B_vals = sip.extract_test_vals("F", "B", "alt_group", "group", nonsym_test_gct, False)
self.assertItemsEqual(e_F_B_vals, F_B_vals)
def test_extract_bg_vals_from_sym(self):
bg_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B", "C", "C"],
"id": [1, 2, 3, 4, 5, 6]})
bg_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df)
# Expected values
e_A_vals = [0.5, 1.0, -0.4, 1.1, -0.6, 1.2, 0.1, 0.3, 1.3]
e_B_vals = [0.5, 1.2, -0.8, -0.9, 0.4, -0.4, 0.1, 0.5, -0.2]
e_C_vals = [1.1, -0.9, 0.3, 0.5, 0.7, -0.6, 0.4, 1.3, -0.2]
A_vals = sip.extract_bg_vals_from_sym("A", "group", bg_gct)
self.assertItemsEqual(e_A_vals, A_vals)
B_vals = sip.extract_bg_vals_from_sym("B", "group", bg_gct)
self.assertItemsEqual(e_B_vals, B_vals)
C_vals = sip.extract_bg_vals_from_sym("C", "group", bg_gct)
self.assertItemsEqual(e_C_vals, C_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_bg_vals_from_sym("D", "group", bg_gct)
self.assertIn("D is not in the group metadata", str(e.exception))
def test_extract_bg_vals_from_non_sym(self):
bg_row_meta_df = pd.DataFrame({
"group": ["A", "B", "A", "B"],
"id": [1, 2, 3, 4]})
bg_col_meta_df = pd.DataFrame({
"group": ["F", "F", "E", "E"],
"id": [1, 2, 3, 4]})
bg_data_df = pd.DataFrame(
[[1, 2, 3, 5],
[7, 11, 13, 17],
[19, 23, 29, 31],
[-3, 5, 7, 11]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_row_meta_df,
col_metadata_df=bg_col_meta_df)
# Expected values
e_A_vals = [1, 2, 3, 5, 19, 23, 29, 31]
e_B_vals = [7, 11, 13, 17, -3, 5, 7, 11]
A_vals = sip.extract_bg_vals_from_non_sym("A", "group", bg_gct)
self.assertItemsEqual(e_A_vals, A_vals)
B_vals = sip.extract_bg_vals_from_non_sym("B", "group", bg_gct)
self.assertItemsEqual(e_B_vals, B_vals)
# Verify that assert statement works
with self.assertRaises(AssertionError) as e:
sip.extract_bg_vals_from_non_sym("D", "group", bg_gct)
self.assertIn("target D is not in the group metadata", str(e.exception))
def test_percentile_score_single(self):
test_vals = [7, 11, 13]
bg_vals = [9, 11, -1, 19, 17, 7]
out_score = sip.percentile_score_single(test_vals, bg_vals)
self.assertAlmostEqual(out_score, 55.555, places=2)
def test_compute_connectivities(self):
# Create test_gct
test_col_meta_df = pd.DataFrame({
"pert": ["D", "D", "D", "E", "E", "E"],
"cell": ["A375", "A375", "A375", "A375", "A375", "A375"],
"agg": ["D:A375", "D:A375", "D:A375", "E:A375", "E:A375", "E:A375"],
"other": ["M", "M", "N", "R", "P", "Q"],
"other2": [3, 6, 4, 1, 1, 1.1]})
test_row_meta_df = pd.DataFrame({
"pert": ["A", "A", "B", "B"],
"cell": ["A375", "A375", "A375", "A375"],
"agg2": ["A:A375", "A:A375", "B:A375", "B:A375"],
"weird": ["x", "y", "z", "z"]})
test_data_df = pd.DataFrame(
[[0.1, -0.3, -0.1, -0.4, 0.6, -0.7],
[0.5, -0.7, -0.2, -1, 0.4, 0.2],
[-0.2, 0.3, 0.7, 0.1, 0.4, -0.9],
[0.1, 0.4, 0.2, 0.6, 0.4, -0.1]])
test_gct = GCToo.GCToo(data_df=test_data_df,
row_metadata_df=test_row_meta_df,
col_metadata_df=test_col_meta_df)
# Create bg_gct
bg_meta_df = pd.DataFrame({
"pert": ["A", "B", "A", "B", "C", "C"],
"cell": ["A375", "A375", "A375", "A375", "A375", "A375"],
"AGG": ["A:A375", "B:A375", "A:A375", "B:A375", "C:A375", "C:A375"],
"ignore": ["j", "k", "l", "a", "b", "D"]})
bg_data_df = pd.DataFrame(
[[1.0, 0.5, 1.0, -0.4, 1.1, -0.6],
[0.5, 1.0, 1.2, -0.8, -0.9, 0.4],
[1.0, 1.2, 1.0, 0.1, 0.3, 1.3],
[-0.4, -0.8, 0.1, 1.0, 0.5, -0.2],
[1.1, -0.9, 0.3, 0.5, 1.0, 0.7],
[-0.6, 0.4, 1.3, -0.2, 0.7, 1.0]])
bg_gct = GCToo.GCToo(data_df=bg_data_df,
row_metadata_df=bg_meta_df,
col_metadata_df=bg_meta_df)
# Create expected output
A_bg = [0.5, 1.0, -0.4, 1.1, -0.6, 1.2, 0.1, 0.3, 1.3] # med = 0.4
B_bg = [0.5, 1.2, -0.8, -0.9, 0.4, -0.4, 0.1, 0.5, -0.2] # med = 0.1
(e_D_v_A, _) = stats.ks_2samp([0.1, -0.3, -0.1, 0.5, -0.7, -0.2], A_bg) # med = -1.5, so -
(e_D_v_B, _) = stats.ks_2samp([-0.2, 0.3, 0.7, 0.1, 0.4, 0.2], B_bg) # med = 0.25, so +
(e_E_v_A, _) = stats.ks_2samp([-0.4, 0.6, -0.7, -1, 0.4, 0.2], A_bg) # med = -0.1, so -
(e_E_v_B, _) = stats.ks_2samp([0.1, 0.4, -0.9, 0.6, 0.4, -0.1], B_bg) # med = 0.25, so +
e_conn_df = pd.DataFrame(
[[e_D_v_A, e_E_v_A], [e_D_v_B, e_E_v_B]],
index = ["A:A375", "B:A375"],
columns = ["D:A375", "E:A375"])
e_signed_conn_df = pd.DataFrame(
[[-e_D_v_A, -e_E_v_A], [e_D_v_B, e_E_v_B]],
index = ["A:A375", "B:A375"],
columns = ["D:A375", "E:A375"])
e_row_meta_df = pd.DataFrame({
"pert": ["A", "B"],
"cell": ["A375", "A375"]})
e_row_meta_df.index = ["A:A375", "B:A375"]
e_row_meta_df = pd.DataFrame({
"pert": ["A", "B"],
"cell": ["A375", "A375"],
"weird": ["x:y", "z"]})
e_row_meta_df.index = ["A:A375", "B:A375"]
e_col_meta_df = pd.DataFrame({
"pert": ["D", "E"],
"cell": ["A375", "A375"],
"other": ["M:N", "P:Q:R"],
"other2": ["3.0:4.0:6.0", "1.0:1.1"]})
e_col_meta_df.index = ["D:A375", "E:A375"]
(conn_gct, signed_conn_gct) = sip.compute_connectivities(
test_gct, bg_gct, "agg", "agg2", "AGG", "ks_test", False, ":")
logger.debug("conn_gct.data_df:\n{}".format(conn_gct.data_df))
logger.debug("e_conn_df:\n{}".format(e_conn_df))
logger.debug("conn_gct.row_metadata_df:\n{}".format(conn_gct.row_metadata_df))
logger.debug("conn_gct.col_metadata_df:\n{}".format(conn_gct.col_metadata_df))
pd.util.testing.assert_frame_equal(conn_gct.data_df, e_conn_df)
pd.util.testing.assert_frame_equal(signed_conn_gct.data_df, e_signed_conn_df)
pd.util.testing.assert_frame_equal(conn_gct.row_metadata_df, e_row_meta_df, check_names=False)
pd.util.testing.assert_frame_equal(conn_gct.col_metadata_df, e_col_meta_df, check_names=False)
# Make sure connectivity metric is valid
with self.assertRaises(Exception) as e:
sip.compute_connectivities(test_gct, bg_gct, "agg",
"agg2", "AGG", "wtcs",
False, "|")
self.assertIn("connectivity metric must be either ks_test or", str(e.exception))
# Make sure we have agreement across test_gct and bg_gct
with self.assertRaises(Exception) as e:
sip.compute_connectivities(test_gct, bg_gct, "agg", "pert",
"ignore", "wtcs", False, "|")
self.assertIn("There are no targets ", str(e.exception))
if __name__ == "__main__":
setup_logger.setup(verbose=True)
unittest.main() | python |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
from main import get_path_distance
# drop down list for use in airport codes
from controls import CITY_DATA, CITY_POP, AIRPORT_DATA, ROUTES_DATA, AIRLINES_DATA, get_coordinate
#%%%
def coordinate_list_for_map(path):
lat_list = []
long_list = []
city_list = path[2:-2].split("', '")
for city in city_list:
lat_list.append(get_coordinate(city)[0])
long_list.append(get_coordinate(city)[1])
return city_list, lat_list, long_list
def get_picture(city):
return "/assets/{}.png".format(city)
pop_dict = CITY_POP.to_dict()
def get_pop(city):
return pop_dict.get('population').get(city)
#%%
lat_list_all = []
long_list_all = []
for col in CITY_DATA['city']:
lat,long = get_coordinate(col)
lat_list_all.append(lat)
long_list_all.append(long)
#%%
# setup app with stylesheets
app = dash.Dash(external_stylesheets=[dbc.themes.SANDSTONE])
layout = dict(
autosize=True,
automargin=True,
margin=dict(l=30, r=30, b=20, t=40),
hovermode="closest",
plot_bgcolor="#F9F9F9",
paper_bgcolor="#F9F9F9",
legend=dict(font=dict(size=10), orientation="h"),
title="Map",
marker= {'size': 10,'color':'#E30909'},
mapbox=dict(
#accesstoken=mapbox_access_token,
style="stamen-terrain",
center=dict(lon=-78.05, lat=42.54),
zoom=3,
),
)
layout.get('plot_bgcolor')
fig = go.Figure(go.Scattermapbox(
mode = "markers",
lat = lat_list_all,
lon = long_list_all,
marker = layout.get('marker')))
# fig.update_layout = layout
fig.update_layout(
margin ={'l':30,'t':30,'b':20,'r':40},
mapbox = {
'center': {'lon': -78.05, 'lat': 42.54},
'style': "stamen-terrain",
'zoom': 2})
controls = dbc.Card(
[
dbc.FormGroup(
[
dbc.Label("Start City"),
dcc.Dropdown(
options=[{"label": col, "value": col} for col in CITY_DATA['city']],
value="Boston",
id="start-city",
),
]
),
dbc.FormGroup(
[
dbc.Label("Destination City"),
dcc.Dropdown(
options=[{"label": col, "value": col} for col in CITY_DATA['city']],
value="New York",
id="destination-city",
),
]
),
dbc.Button(id = 'submit',n_clicks = 0, children = "Submit", outline=True, color="primary", className="mr-1"),
],
body=True,
)
photo_pop_group = dbc.FormGroup(
[
dbc.Row(children = [
dbc.Col(html.H4(id='image-pop-start', children=['Start City'])),
dbc.Col(html.H4(id='image-pop-destination', children=['Destination City']))
],
align="center"
),
html.Br(),
dbc.Row(children = [
dbc.Col(html.Img(id='image-start',src=get_picture('Travel_1'), style={'height':'80%', 'width':'80%'}), md=5),
dbc.Col(html.Img(id='image-destination',src=get_picture('Travel_2'), style={'height':'80%', 'width':'80%'}), md=5),
],
align="center"
)
]
)
app.layout = dbc.Container(
[
dbc.Row(
dbc.Col(
html.H1("Kartemap - An Airport Network Analysis Application", style={'text-align': 'center'})
)
),
dbc.Row(
[
dbc.Col(controls, md=3),
dbc.Col(
dcc.Graph(figure=fig, id="map"), md=7
),
],
align="center",
),
html.Br(),
html.H3(id='show-route', children=[]),
html.Br(),
html.H3(id='show-distance', children=[]),
html.Br(),
html.Br(),
photo_pop_group
],
id="main-container",
style={"display": "flex", "flex-direction": "column"},
fluid=True
)
#%%
@app.callback(
[Output(component_id='show-route', component_property='children'),
Output(component_id='show-distance', component_property='children'),
Output(component_id='map', component_property='figure'),
Output(component_id='image-pop-start', component_property='children'),
Output(component_id='image-pop-destination', component_property='children'),
Output(component_id='image-start', component_property='src'),
Output(component_id='image-destination', component_property='src')],
Input(component_id='submit',component_property='n_clicks'),
[State(component_id='start-city', component_property='value'),
State(component_id='destination-city', component_property='value')]
)
def get_path(n_clicks, start_city, destination_city):
path, distance_km = get_path_distance(start_city,destination_city)
# distance_mile = distance_km * 1.609
city_list, lat_list, long_list = coordinate_list_for_map(path)
if len(city_list) == 1:
show_route = ["Think again! It doesn't make sense to travel from {} to {}!".format(start_city, destination_city)]
elif len(city_list) == 2:
show_route = ["Looks Great! You may fly directly from {} to {}!".format(start_city, destination_city)]
elif len(city_list) == 3:
show_route = ["To travel from {} to {}, you should take a connection flight at {}.".format(start_city, destination_city,city_list[1])]
else:
show_route = ["The shortest path to travel from {} to {} is : {}".format(start_city, destination_city, path)]
show_distance = ["The total distance of this trip is {} miles, or {} km.".format(int(float(distance_km) / 1.609), int(float(distance_km)))]
fig = go.Figure(go.Scattermapbox(
mode = "markers+lines",
lat = lat_list,
lon = long_list,
marker = layout.get('marker')))
fig.update_layout(
margin ={'l':30,'t':30,'b':20,'r':40},
mapbox = {
'center': {'lon': -78.05, 'lat': 42.54},
'style': "stamen-terrain",
'zoom': 2})
pop_start_city = ["Population of {} is {}".format(start_city, get_pop(start_city))]
pop_destination_city = ["Population of {} is {}".format(destination_city, get_pop(destination_city))]
src_start_city = get_picture(start_city)
src_destination_city = get_picture(destination_city)
return show_route, show_distance, fig, pop_start_city, pop_destination_city, src_start_city, src_destination_city
#%%
# Main
if __name__ == "__main__":
app.run_server(debug=True)
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from eve.exceptions import ConfigException
from sqlalchemy import Boolean, Column, ForeignKey, Integer, Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from eve_sqlalchemy.config import DomainConfig, ResourceConfig
from .. import BaseModel
Base = declarative_base(cls=BaseModel)
group_members = Table(
'group_members', Base.metadata,
Column('group_id', Integer, ForeignKey('group.id')),
Column('user_id', Integer, ForeignKey('user.id'))
)
class User(Base):
id = Column(Integer, primary_key=True)
is_admin = Column(Boolean, default=False)
class Group(Base):
id = Column(Integer, primary_key=True)
members = relationship(User, secondary=group_members)
admin_id = Column(Integer, ForeignKey('user.id'))
admin = relationship(User)
class TestAmbiguousRelations(TestCase):
def setUp(self):
super(TestAmbiguousRelations, self).setUp()
self._domain = DomainConfig({
'users': ResourceConfig(User),
'admins': ResourceConfig(User),
'groups': ResourceConfig(Group)
})
def test_missing_related_resources_without_groups(self):
del self._domain.resource_configs['groups']
domain_dict = self._domain.render()
self.assertIn('users', domain_dict)
self.assertIn('admins', domain_dict)
def test_missing_related_resources(self):
with self.assertRaises(ConfigException) as cm:
self._domain.render()
self.assertIn('Cannot determine related resource for {}'
.format(Group.__name__), str(cm.exception))
def test_two_endpoints_for_one_model(self):
self._domain.related_resources = {
(Group, 'members'): 'users',
(Group, 'admin'): 'admins'
}
groups_schema = self._domain.render()['groups']['schema']
self.assertEqual(groups_schema['admin']['data_relation']['resource'],
'admins')
| python |
# -*- coding: utf-8 -*-
from .handler_class import handler_class
import urllib3
import requests
import json
import time
class http_handler_class(handler_class):
def __init__(self, *args, **kwargs):
# verify required input parameters
required_args = ['url']
for param_name in required_args:
if param_name not in kwargs:
print('HTTP handler: missing parameter ' + param_name)
raise ValueError
self.url = kwargs['url']
self.headers = kwargs.get('headers')
self.timeout = kwargs.get('timeout')
if self.timeout == None or self.timeout < 1:
self.timeout = 1
print(self.timeout)
def _workout_messages(self, msgs_bunch):
""" retranslate every messages bunch in HTTP body to url specified """
if msgs_bunch != []:
while True:
r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))
# request success condition below - to end the handler
if r.status_code == 200:
break
print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')
time.sleep(self.timeout)
# next bunch of messages will not be read until this function ends
# current bunch of messags will be deleted in next request if delete_flag = True is set
| python |
from setuptools import find_packages, setup
from netbox_nagios.version import VERSION
setup(
name="netbox-nagios",
version=VERSION,
author="Gabriel KAHLOUCHE",
author_email="[email protected]",
description="Netbox Plugin to show centreon device state in Netbox.",
url="https://github.com/jessux/netbox-nagios",
license="",
install_requires=[],
packages=find_packages(),
include_package_data=True,
)
| python |
from django.db import models
from django.utils.translation import gettext_lazy
from cradmin_legacy.superuserui.views import mixins
from cradmin_legacy.viewhelpers import listbuilder
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers import multiselect2
class BaseView(mixins.ListFilterQuerySetForRoleMixin,
listbuilderview.FilterListMixin,
listbuilderview.View):
paginate_by = 50
def get_search_fields(self):
"""
Get a list with the names of the fields to use while searching.
Defaults to the ``id`` field and all CharField and TextField on the model.
"""
fields = ['id']
for field in self.get_model_class()._meta.get_fields():
if isinstance(field, (models.CharField, models.TextField)):
fields.append(field.name)
return fields
def add_filterlist_items(self, filterlist):
super(BaseView, self).add_filterlist_items(filterlist=filterlist)
search_fields = self.get_search_fields()
if search_fields:
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label=gettext_lazy('Search'),
label_is_screenreader_only=True,
modelfields=search_fields))
class View(listbuilderview.ViewCreateButtonMixin,
BaseView):
value_renderer_class = listbuilder.itemvalue.EditDelete
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'filter', kwargs={'filters_string': filters_string})
def get_datetime_filter_fields(self):
return [
field for field in self.get_model_class()._meta.get_fields()
if isinstance(field, models.DateTimeField)]
def add_datetime_filters(self, filterlist):
datetime_filter_fields = self.get_datetime_filter_fields()
for field in datetime_filter_fields:
filterlist.append(listfilter.django.single.select.DateTime(
slug=field.name, label=field.verbose_name))
def add_filterlist_items(self, filterlist):
super(View, self).add_filterlist_items(filterlist=filterlist)
self.add_datetime_filters(filterlist=filterlist)
class ForeignKeySelectView(BaseView):
value_renderer_class = listbuilder.itemvalue.UseThis
hide_menu = True
def get_filterlist_url(self, filters_string):
return self.request.cradmin_app.reverse_appurl(
'foreignkeyselect-filter', kwargs={'filters_string': filters_string})
class ManyToManySelectView(multiselect2.manytomanyview.ListBuilderFilterListViewMixin,
BaseView):
pass
| python |
#!/home/schamblee/projects/django-oidc-provider/project_env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| python |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import os
from yolox.exp import Exp as MyExp
class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
#### s
self.depth = 0.33
self.width = 0.50
# #### m
# self.depth = 0.67
# self.width = 0.75
#### l
# self.depth = 1.0
# self.width = 1.0
#### x
# self.depth = 1.33
# self.width = 1.25
self.adam = True
self.enable_mixup = False # seg中只能为False
self.multiscale_range = 3 #随机变化的尺度 320:5 32*5~32*15
self.mosaic_scale = (0.1, 2)
#### 两种不同的分割输出尺寸
# self.in_channels = [256, 512, 1024]
# self.in_features = ("dark3", "dark4", "dark5")
self.in_channels = [128, 256, 512, 1024]
self.in_features = ('dark2', "dark3", "dark4", "dark5")
####
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.data_num_workers = 0
self.pin_memory = False
self.mosaic_prob = 1
self.num_classes = 35 # 35
self.segcls = self.num_classes+1
self.input_size = (320, 320) # (height, width)
self.test_size = (320, 320)
self.data_dir = 'datasets/plate_seg'
# self.backbone_name = 'CoAtNet'
# if self.backbone_name == 'CoAtNet':
# self.multiscale_range = 0 | python |
#!/usr/bin/env python3
""" Update Rancher app answers using API """
import os
import requests
class RancherAPI: # pylint: disable=too-few-public-methods
""" Make calls to Rancher API """
_CALLER = {
'GET': requests.get,
'PUT': requests.put,
'POST': requests.post,
}
def __init__(self, api, token, check_ssl=True):
self.api = api
self.token = token
self.headers = {
'Authorization': "Bearer %s" % token,
'Accept': 'application/json',
}
self.verify = check_ssl
@staticmethod
def _url_join(*args):
return "/".join([a.strip('/') for a in args])
def call(self, url='', method='get', data=None):
""" Make an API call """
method = method.upper()
req = self._CALLER.get(method)
url = url.replace(self.api, '')
return req(
self._url_join(self.api, url),
headers=self.headers,
json=data,
verify=self.verify
)
def __call__(self, *args, **kwargs):
return self.call(*args, **kwargs)
class App:
""" Represents an application installed inside Rancher """
def __init__(self):
self.ressource_id = ""
self.data = {}
self.name = ""
self.answers = {}
self.links = {}
self.revisionId = ''
self.api: RancherAPI
def update(self):
""" Update the application with new answers """
self.data['answers'] = self.answers
res = self.api(
self.links.get('update'),
method='put',
data=self.data,
)
return res
def merge_answers(self, answers):
""" Merge answers block with that new one """
self.answers.update(answers)
class Project: # pylint: disable=too-few-public-methods
""" Represents a project in Rancher """
def __init__(self):
self.ressource_id = None
self.links = []
self.api: RancherAPI
def app(self, name) -> App:
""" Return Application that have this name """
res = self.api(self.links.get('apps') + '?name=%s' % name)
data = res.json().get('data')[0]
app = App()
app.data = data
app.api = self.api
app.ressource_id = data.get('id')
app.name = data.get('name')
app.answers = data.get('answers')
app.revisionId = data.get('appRevisionId')
app.links = data.get('links')
return app
class Rancher: # pylint: disable=too-few-public-methods
""" Initial Rancher API class to get projects """
def __init__(self, api='', token='', check_ssl='', cluster=''):
self.ressource_id = None
self.links = {}
self.name = cluster
self.api: RancherAPI = RancherAPI(api, token, check_ssl)
self._init_links()
def _init_links(self):
cluster_url = self.api().json().get('links').get('clusters')
print(cluster_url)
res = self.api.call(cluster_url + '?name=' + self.name)
data = res.json().get('data')[0]
self.links = data.get('links')
self.ressource_id = data.get('id')
def project(self, name) -> Project:
""" Return a Project having that name """
call = self.links.get('projects') + '?name=%s' % name
res = self.api.call(call)
data = res.json().get('data')[0]
prj = Project()
prj.ressource_id = data.get('id')
prj.links = data.get('links')
prj.api = self.api
return prj
def __main():
api_url = os.environ.get('PLUGIN_API')
chek_ssl = os.environ.get('PLUGIN_VERIFY', 'true') != 'false'
project_name = os.environ.get('PLUGIN_PROJECT', 'Default')
app_name = os.environ.get('PLUGIN_APP')
cluster_name = os.environ.get('PLUGIN_CLUSTER')
token = os.environ.get('PLUGIN_TOKEN', None)
answer_keys = os.environ.get('PLUGIN_KEYS', None).split(',')
answer_values = os.environ.get('PLUGIN_VALUES', None).split(',')
rancher = Rancher(
cluster=cluster_name,
api=api_url,
token=token,
check_ssl=chek_ssl
)
project = rancher.project(project_name)
app = project.app(app_name)
answers = dict(zip(answer_keys, answer_values))
app.merge_answers(answers)
print(app.answers)
print("Changing answers to", app.answers)
res = app.update()
print(res.json())
if __name__ == '__main__':
__main()
| python |
from __future__ import absolute_import
__author__ = 'katharine'
from enum import IntEnum
from .base import PebblePacket
from .base.types import *
__all__ = ["MusicControlPlayPause", "MusicControlPause", "MusicControlPlay", "MusicControlNextTrack",
"MusicControlPreviousTrack", "MusicControlVolumeUp", "MusicControlVolumeDown", "MusicControlGetCurrentTrack",
"MusicControlUpdateCurrentTrack", "MusicControl"]
class MusicControlPlayPause(PebblePacket):
pass
class MusicControlPlay(PebblePacket):
pass
class MusicControlPause(PebblePacket):
pass
class MusicControlNextTrack(PebblePacket):
pass
class MusicControlPreviousTrack(PebblePacket):
pass
class MusicControlVolumeUp(PebblePacket):
pass
class MusicControlVolumeDown(PebblePacket):
pass
class MusicControlGetCurrentTrack(PebblePacket):
pass
class MusicControlUpdateCurrentTrack(PebblePacket):
artist = PascalString()
album = PascalString()
title = PascalString()
track_length = Optional(Uint32())
track_count = Optional(Uint16())
current_track = Optional(Uint16())
class MusicControlUpdatePlayStateInfo(PebblePacket):
class State(IntEnum):
Paused = 0x00
Playing = 0x01
Rewinding = 0x02
Fastforwarding = 0x03
Unknown = 0x04
class Shuffle(IntEnum):
Unknown = 0x00
Off = 0x01
On = 0x02
class Repeat(IntEnum):
Unknown = 0x00
Off = 0x01
One = 0x02
All = 0x03
state = Uint8(enum=State)
track_position = Uint32()
play_rate = Uint32()
shuffle = Uint8(enum=Shuffle)
repeat = Uint8(enum=Repeat)
class MusicControlUpdateVolumeInfo(PebblePacket):
volume_percent = Uint8()
class MusicControlUpdatePlayerInfo(PebblePacket):
package = PascalString()
name = PascalString()
class MusicControl(PebblePacket):
class Meta:
endpoint = 0x20
endianness = '<'
command = Uint8()
data = Union(command, {
0x01: MusicControlPlayPause,
0x02: MusicControlPause,
0x03: MusicControlPlay,
0x04: MusicControlNextTrack,
0x05: MusicControlPreviousTrack,
0x06: MusicControlVolumeUp,
0x07: MusicControlVolumeDown,
0x08: MusicControlGetCurrentTrack,
0x10: MusicControlUpdateCurrentTrack,
0x11: MusicControlUpdatePlayStateInfo,
0x12: MusicControlUpdateVolumeInfo,
0x13: MusicControlUpdatePlayerInfo,
})
| python |
# Authors: Sylvain MARIE <[email protected]>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
from .common_pytest_lazy_values import lazy_value, is_lazy
from .common_others import unfold_expected_err, assert_exception, AUTO
AUTO2 = AUTO
"""Deprecated symbol, for retrocompatibility. Will be dropped soon."""
from .fixture_core1_unions import fixture_union, NOT_USED, unpack_fixture, ignore_unused
from .fixture_core2 import pytest_fixture_plus, fixture_plus, param_fixtures, param_fixture
from .fixture_parametrize_plus import pytest_parametrize_plus, parametrize_plus, fixture_ref
# additional symbols without the 'plus' suffix
parametrize = parametrize_plus
fixture = fixture_plus
from .case_funcs_legacy import case_name, test_target, case_tags, cases_generator
from .case_parametrizer_legacy import cases_data, CaseDataGetter, get_all_cases_legacy, \
get_pytest_parametrize_args_legacy, cases_fixture
from .case_funcs_new import case, copy_case_info, set_case_id, get_case_id, get_case_marks, \
get_case_tags, matches_tag_query, is_case_class, is_case_function
from .case_parametrizer_new import parametrize_with_cases, THIS_MODULE, get_all_cases, get_parametrize_args
try:
# -- Distribution mode --
# import from _version.py generated by setuptools_scm during release
from ._version import version as __version__
except ImportError:
# -- Source mode --
# use setuptools_scm to get the current version from src using git
from setuptools_scm import get_version as _gv
from os import path as _path
__version__ = _gv(_path.join(_path.dirname(__file__), _path.pardir))
__all__ = [
'__version__',
# the submodules
'common_pytest_lazy_values', 'common_pytest', 'common_others', 'common_mini_six',
'case_funcs_legacy', 'case_funcs_new', 'case_parametrizer_legacy', 'case_parametrizer_new',
'fixture_core1_unions', 'fixture_core2', 'fixture_parametrize_plus',
# all symbols imported above
'unfold_expected_err', 'assert_exception',
# --fixture core1
'fixture_union', 'NOT_USED', 'unpack_fixture', 'ignore_unused',
# -- fixture core2
'pytest_fixture_plus', 'fixture_plus', 'fixture', 'param_fixtures', 'param_fixture',
# -- fixture parametrize plus
'pytest_parametrize_plus', 'parametrize_plus', 'parametrize', 'fixture_ref', 'lazy_value', 'is_lazy',
# V1 - DEPRECATED symbols
# --cases_funcs
'case_name', 'test_target', 'case_tags', 'cases_generator',
# --main params
'cases_data', 'CaseDataGetter', 'get_all_cases_legacy',
'get_pytest_parametrize_args_legacy', 'cases_fixture',
# V2 symbols
'AUTO', 'AUTO2',
# case functions
'case', 'copy_case_info', 'set_case_id', 'get_case_id', 'get_case_marks',
'get_case_tags', 'matches_tag_query', 'is_case_class', 'is_case_function',
# test functions
'get_all_cases', 'parametrize_with_cases', 'THIS_MODULE', 'get_parametrize_args'
]
try: # python 3.5+ type hints
from pytest_cases.case_funcs_legacy import CaseData, Given, ExpectedNormal, ExpectedError, MultipleStepsCaseData
__all__ += ['CaseData', 'Given', 'ExpectedNormal', 'ExpectedError', 'MultipleStepsCaseData']
except ImportError:
pass
| python |
#!/usr/bin/python
'''
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
'''
from data_mover_test_base import DataMoverTestBase
from os.path import join, sep
class CopyProcsTest(DataMoverTestBase):
# pylint: disable=too-many-ancestors
"""Test class for Datamover multiple processes.
Test Class Description:
Tests multi-process (rank) copying of the datamover utility.
Tests the following cases:
Copying with varying numbers of processes (ranks).
:avocado: recursive
"""
def __init__(self, *args, **kwargs):
"""Initialize a CopyBasicsTest object."""
super(CopyProcsTest, self).__init__(*args, **kwargs)
def setUp(self):
"""Set up each test case."""
# Start the servers and agents
super(CopyProcsTest, self).setUp()
# Get the parameters
self.test_file = self.params.get(
"test_file", "/run/ior/*")
self.flags_write = self.params.get(
"flags_write", "/run/ior/copy_procs/*")
self.flags_read = self.params.get(
"flags_read", "/run/ior/copy_procs/*")
# Setup the directory structures
self.posix_test_path = join(self.workdir, "posix_test") + sep
self.posix_test_path2 = join(self.workdir, "posix_test2") + sep
self.posix_test_file = join(self.posix_test_path, self.test_file)
self.posix_test_file2 = join(self.posix_test_path2, self.test_file)
self.daos_test_file = join("/", self.test_file)
# Create the directories
cmd = "mkdir -p '{}' '{}'".format(
self.posix_test_path,
self.posix_test_path2)
self.execute_cmd(cmd)
def tearDown(self):
"""Tear down each test case."""
# Remove the created directories
cmd = "rm -rf '{}' '{}'".format(
self.posix_test_path,
self.posix_test_path2)
self.execute_cmd(cmd)
# Stop the servers and agents
super(CopyProcsTest, self).tearDown()
def test_copy_procs(self):
"""
Test Description:
DAOS-5659: Verify multi-process (rank) copying.
Use Cases:
Create pool.
Crate POSIX container1 and container2 in pool.
Create a single 100M file in container1 using ior.
:avocado: tags=all,datamover,pr
:avocado: tags=copy_procs
"""
# Create pool and containers
pool1 = self.create_pool()
container1 = self.create_cont(pool1)
container2 = self.create_cont(pool1)
# Get the varying number of processes
procs_list = self.params.get(
"processes", "/run/datamover/copy_procs/*")
# Create the test files
self.set_ior_location_and_run("DAOS_UUID", self.daos_test_file,
pool1, container1,
flags=self.flags_write)
self.set_ior_location_and_run("POSIX", self.posix_test_file,
flags=self.flags_write)
# DAOS -> POSIX
# Run with varying number of processes
self.set_src_location("DAOS_UUID", "/", pool1, container1)
self.set_dst_location("POSIX", self.posix_test_path2)
for num_procs in procs_list:
test_desc = "copy_procs (DAOS->POSIX with {} procs)".format(
num_procs)
self.run_datamover(
test_desc=test_desc,
processes=num_procs)
self.set_ior_location_and_run("POSIX", self.posix_test_file2,
flags=self.flags_read)
# POSIX -> DAOS
# Run with varying number of processes
self.set_src_location("POSIX", self.posix_test_path)
self.set_dst_location("DAOS_UUID", "/", pool1, container2)
for num_procs in procs_list:
test_desc = "copy_procs (POSIX->DAOS with {} processes)".format(
num_procs)
self.run_datamover(
test_desc=test_desc,
processes=num_procs)
self.set_ior_location_and_run("DAOS_UUID", self.daos_test_file,
pool1, container2,
flags=self.flags_read)
| python |
# -*- coding: utf-8 -*-
ZFILL = 3
| python |
"""Config flow for DSMR integration."""
import logging
from typing import Any, Dict, Optional
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
class DSMRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for DSMR."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def _abort_if_host_port_configured(
self,
port: str,
host: str = None,
updates: Optional[Dict[Any, Any]] = None,
reload_on_update: bool = True,
):
"""Test if host and port are already configured."""
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:
if updates is not None:
changed = self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
if (
changed
and reload_on_update
and entry.state
in (
config_entries.ENTRY_STATE_LOADED,
config_entries.ENTRY_STATE_SETUP_RETRY,
)
):
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
return self.async_abort(reason="already_configured")
async def async_step_import(self, import_config=None):
"""Handle the initial step."""
host = import_config.get(CONF_HOST)
port = import_config[CONF_PORT]
status = self._abort_if_host_port_configured(port, host, import_config)
if status is not None:
return status
if host is not None:
name = f"{host}:{port}"
else:
name = port
return self.async_create_entry(title=name, data=import_config)
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=250, verbose_name="Card's Name")),
('description', models.TextField(verbose_name='Description')),
('life', models.PositiveIntegerField(default=0, verbose_name='Life')),
('damage', models.PositiveIntegerField(default=0, verbose_name='Damage')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CardType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=250, verbose_name='Type of Card')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='card',
name='card_type',
field=models.ForeignKey(verbose_name='Type of Card', to='cardsgame.CardType'),
preserve_default=True,
),
]
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Kumagai group.
import os
from pathlib import Path
from monty.serialization import loadfn
from pydefect.analyzer.calc_results import CalcResults
from pydefect.analyzer.grids import Grids
from pydefect.analyzer.refine_defect_structure import refine_defect_structure
from pydefect.cli.vasp.make_defect_charge_info import make_defect_charge_info
from pydefect.cli.vasp.get_defect_charge_state import get_defect_charge_state
from pydefect.input_maker.defect_entry import make_defect_entry
from pymatgen.core import Structure
from pymatgen.io.vasp import Chgcar
from vise.input_set.incar import ViseIncar
from vise.util.file_transfer import FileLink
from vise.util.logger import get_logger
from pymatgen.io.vasp.inputs import Poscar, Incar, Potcar
logger = get_logger(__name__)
def is_file(filename):
return Path(filename).is_file() and os.stat(filename).st_size != 0
def calc_charge_state(args):
poscar = Poscar.from_file(args.dir / "POSCAR")
potcar = Potcar.from_file(args.dir / "POTCAR")
incar = Incar.from_file(args.dir / "INCAR")
charge_state = get_defect_charge_state(poscar, potcar, incar)
logger.info(f"Charge state in {args.dir} is {charge_state}.")
return charge_state
def make_defect_entry_main(args):
charge_state = calc_charge_state(args)
structure = Structure.from_file(args.dir / "POSCAR")
defect_entry = make_defect_entry(name=args.name,
charge=charge_state,
perfect_structure=args.perfect,
defect_structure=structure)
defect_entry.to_json_file()
def make_parchg_dir(args):
os.chdir(args.dir)
if is_file("WAVECAR") is False:
raise FileNotFoundError("WAVECAR does not exist or is empty.")
try:
calc_results: CalcResults = loadfn("calc_results.json")
except FileNotFoundError:
logger.info("Need to create calc_results.json beforehand.")
raise
calc_results.show_convergence_warning()
# Increment index by 1 as VASP band index begins from 1.
incar = ViseIncar.from_file("INCAR")
band_edge_states = loadfn("band_edge_states.json")
iband = [i + 1 for i in band_edge_states.band_indices_from_vbm_to_cbm]
incar.update({"LPARD": True, "LSEPB": True, "KPAR": 1, "IBAND": iband})
parchg = Path("parchg")
parchg.mkdir()
os.chdir("parchg")
incar.write_file("INCAR")
FileLink(Path("../WAVECAR")).transfer(Path.cwd())
FileLink(Path("../POSCAR")).transfer(Path.cwd())
FileLink(Path("../POTCAR")).transfer(Path.cwd())
FileLink(Path("../KPOINTS")).transfer(Path.cwd())
os.chdir("..")
def make_refine_defect_poscar(args):
structure = refine_defect_structure(args.structure,
args.defect_entry.anchor_atom_index,
args.defect_entry.anchor_atom_coords)
if structure:
print(structure.to(fmt="poscar", filename=args.poscar_name))
def calc_grids(args):
grids = Grids.from_chgcar(args.chgcar)
grids.dump()
def make_defect_charge_info_main(args):
band_idxs = [int(parchg.split(".")[-2]) - 1 for parchg in args.parchgs]
parchgs = [Chgcar.from_file(parchg) for parchg in args.parchgs]
defect_charge_info = make_defect_charge_info(
parchgs, band_idxs, args.bin_interval, args.grids)
defect_charge_info.to_json_file()
plt = defect_charge_info.show_dist()
plt.savefig("dist.pdf")
| python |
"""
These constants provide well-known strings that are used for identifiers,
etc... for widgets that are commonly sub-classed by Manager implementations.
"""
kUIIdBase = "uk.co.foundry.asset.api.ui."
kParameterDelegateId = kUIIdBase + "parameterdelegate"
kParameterDelegateName = "Asset Parameter UI"
kInfoWidgetId = kUIIdBase + "info"
kInfoWidgetName = "Asset Info"
kBrowserWidgetId = kUIIdBase + "browser"
kBrowserWidgetName = "Asset Browser"
kInlinePickerWidgetId = kUIIdBase + "inlinepicker"
kInlinePickerWidgetName = "Asset Picker"
kMultiPickerWidgetId = kUIIdBase + "multipicker"
kMultiPickerWidgetName = "Asset Switcher"
kWorkflowRelationshipWidgetId = kUIIdBase + "workflowrelationship"
kWorkflowRelationshipWidgetName = "Workflow Relationship"
kManagerOptionsWidgetId = kUIIdBase + "manageroptionswidget"
kManagerOptionsWidgetName = "Asset Manager Options"
kRegistrationManagerOptionsWidgetId = kUIIdBase + "registrationmanageroptionswidget"
kRegistrationManagerOptionsWidgetName = kManagerOptionsWidgetName
| python |
import matplotlib.pyplot as plt
def plot_creater(history,bin, modelname):
"""[For the training progress, a chart about the accuracy / loss is created for the deep learning approaches and stored accordingly]
Args:
history (keras.callbacks.History object): [Contains values accuracy, validation-accuracy, validation-loss and loss values during the training of the model]
bin (String): [shows if binary ("True") or multilabel ("False") classification is active]
modelname (String): [Name of Model]
"""
if (modelname=="CNN" or modelname=="LSTM"):
if (bin=="True"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_bin/acc_val_bin.png')
plt.savefig('./CNN_bin/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_bin/loss_val_bin.png')
plt.savefig('./CNN_bin/loss_val_bin.pdf')
plt.close()
elif (bin=="False"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_multi/acc_val_bin.png')
plt.savefig('./CNN_multi/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./CNN_multi/loss_val_bin.png')
plt.savefig('./CNN_multi/loss_val_bin.pdf')
plt.close()
elif (modelname == "Resnet"):
if (bin == "True"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_bin/acc_val_bin.png')
plt.savefig('./resnet_bin/acc_val_bin.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_bin/loss_val_bin.png')
plt.savefig('./resnet_bin/loss_val_bin.pdf')
plt.close()
elif (bin == "False"):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_multi/acc_val_multi.png')
plt.savefig('./resnet_multi/acc_val_multi.pdf')
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig('./resnet_multi/loss_val_multi.png')
plt.savefig('./resnet_multi/loss_val_multi.pdf')
plt.close()
else:
print("No Plot available")
| python |
import GrossSalary, SalaryDeductions, NetSalary
print("Salary Computation App")
while True:
action = str(input("\nWould you like to to do? \n[A] Calculate Salary\n[B] Exit Application")).lower()
if(action == 'a'):
try:
name = str(input("\nEnter Name: "))
rendered_hours = float(input("Enter rendered Hours: "))
loan = float(input("Enter Loan Amount: "))
health_insurance = float(input("Enter Health Issurance"))
gross = GrossSalary.calculate(rendered_hours)
total_deductions, tax = SalaryDeductions.calculate(gross, loan, health_insurance)
net_salary = NetSalary.calculate(total_deductions, gross)
if gross and total_deductions and net_salary:
print("\nName: {}\nHour: {}\n".format(name, rendered_hours))
print("Gross Salary: Php {}\n".format(gross))
print("Tax: Php {}\nLoan: Php {}\nInsurance: Php {}\n".format(tax, loan, health_insurance))
print("Total Deductions: Php {}\n".format(total_deductions))
print("Net Salary: Php {}".format(net_salary))
except Exception:
print("Something went wrong processing your inputs")
else:
continue
elif(action == 'b'):
print("Application Exited")
break
else:
continue
| python |
from src.libs.CrabadaWeb2Client.CrabadaWeb2Client import CrabadaWeb2Client
from pprint import pprint
from src.libs.CrabadaWeb2Client.types import CrabForLending
# VARS
client = CrabadaWeb2Client()
# TEST FUNCTIONS
def test() -> None:
pprint(client.getCheapestCrabForLending())
# EXECUTE
test()
| python |
# coding: utf-8
import requests
from bs4 import BeautifulSoup
import re
import json
import os
from xml.etree import ElementTree
import time
import io
import pandas as pd
from gotoeat_map.module import getLatLng, checkRemovedMerchant
def main():
merchantFilePath = os.path.dirname(
os.path.abspath(__file__)) + "/merchants.json"
if os.path.exists(merchantFilePath):
json_open = open(merchantFilePath, "r", encoding="utf8")
merchants = json.load(json_open)
else:
merchants = {
"data": [],
"names": []
}
findMerchants = []
page = 0
while True:
page += 1
print("----- Page {page} -----".format(page=page))
html = requests.get(
"https://gotoeat-kumamoto.jp/shop/page/{page}/".format(page=page))
html.encoding = html.apparent_encoding
soup = BeautifulSoup(html.content, "html.parser")
lists = soup.findChildren("article", {"class": "shop"})
if (len(lists) == 0):
break
for merchant in lists:
merchant_name = merchant.find("h3").text.strip()
merchant_area = merchant.find(
"p", {"class": "cat"}).find("a").text.strip()
_merchant_address = merchant.find("p").text.strip()
merchant_postal_code = re.sub(
r"〒([0-9\-]+) (.+)", r"\1", _merchant_address)
merchant_address = re.sub(
r"〒([0-9\-]+) (.+)", r"\2", _merchant_address).replace(" ", "").strip()
print(merchant_name + " - " + merchant_address)
findMerchants.append(merchant_name)
if merchant_name in merchants["names"]:
continue
lat, lng = getLatLng(merchant_address)
print(str(lat) + " " + str(lng))
merchants["data"].append({
"name": merchant_name,
"area": merchant_area,
"address": merchant_address,
"postal_code": merchant_postal_code,
"lat": lat,
"lng": lng
})
merchants["names"].append(merchant_name)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
if (soup.find("a", {"class": "next"}) == None):
break
else:
time.sleep(1)
merchants = checkRemovedMerchant(merchants, findMerchants)
with open(merchantFilePath, mode="w", encoding="utf8") as f:
f.write(json.dumps(merchants, indent=4, ensure_ascii=False))
main()
| python |
#!/usr/bin/env python
# coding: utf-8
# ## Full Run
# In[1]:
import os
# In[2]:
Xtrain_dir = 'solar/data/kaggle_solar/train/'
Xtest_dir = 'solar/data/kaggle_solar/test'
ytrain_file = 'solar/data/kaggle_solar/train.csv'
station_file = 'solar/data/kaggle_solar/station_info.csv'
import solar.wrangle.wrangle
import solar.wrangle.subset
import solar.wrangle.engineer
import solar.analyze.model
import solar.report.submission
import numpy as np
# In[14]:
# Choose up to 98 stations; not specifying a station means to use all that fall within the given lats and longs. If the
# parameter 'all' is given, then it will use all stations no matter the provided lats and longs
station = ['all']
# Determine which dates will be used to train the model. No specified date means use the entire set from 1994-01-01
# until 2007-12-31.
train_dates = ['1994-01-01', '2007-12-31']
#2008-01-01 until 2012-11-30
test_dates = ['2008-01-01', '2012-11-30']
station_layout = True
# Use all variables
var = ['all']
# Keep model 0 (the default model) as a column for each of the variables (aggregated over other dimensions)
model = [0, 1]
# Aggregate over all times
times = ['all']
default_grid = {'type':'relative', 'axes':{'var':var, 'models':model, 'times':times,
'station':station}}
# This just uses the station_names as another feature
stat_names = {'type':'station_names'}
frac_dist = {'type':'frac_dist'}
days_solstice = {'type':'days_from_solstice'}
days_cold = {'type':'days_from_coldest'}
all_feats = [stat_names, default_grid, frac_dist, days_solstice, days_cold]
#all_feats = [stat_names, days_solstice, days_cold]
# In[4]:
import solar.report.submission
import solar.wrangle.wrangle
import solar.wrangle.subset
import solar.wrangle.engineer
import solar.analyze.model
# In[15]:
# test combination of station names and grid
reload(solar.wrangle.wrangle)
reload(solar.wrangle.subset)
reload(solar.wrangle.engineer)
from solar.wrangle.wrangle import SolarData
# input_data = SolarData.load(Xtrain_dir, ytrain_file, Xtest_dir,
# station_file, train_dates, test_dates, station,
# station_layout, all_feats, write)
reload(solar.analyze.model)
import numpy as np
from solar.analyze.model import Model
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
error_formula = 'mean_absolute_error'
cv_splits = 3
jobs = 20
write = 's3'
model = Model.model_from_pickle(
'input_2016-02-21-20-46-17.p', GradientBoostingRegressor,
{'n_estimators': [300], 'max_depth': range(1, 4),
'learning_rate': [0.01, 0.1, 1]}, cv_splits,
error_formula, jobs, write, loss='ls', random_state=0, verbose=10)
| python |
from typing import Tuple, AnyStr
from lib.ui import BasePage
from lib.log import Loggers
from utils.Files import read_page_elements
log = Loggers(__name__)
class Baidu(BasePage):
def open_index(self):
self.get_url("https://www.baidu.com")
def login(self, locator: Tuple[AnyStr]):
self.click(locator)
| python |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 30 10:22:30 2020
@author: NN133
"""
import sys
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
sys.path.append("C:/Users/NN133/Documents/libsvm-3.22/python")
from svmutil import *
#%matplotlib inline
from util_ker import *
#Import data
path = 'C:/Users/NN133/Documents/GitHub/GaussianKernelTest/data/breast-cancer-wisconsin.data.txt'
col_names = ['id','Clump_Thick','U_Cell_Size', 'U_Cell_Shape','Marg_Adh','Epith_Cell_Size','Bare_Nuclei',
'Bland_Chrom','Norm_Nucle','Mitoses','Class']
df = pd.read_csv(path,header=None, names = col_names)
df.info() #Check the data types
#Extract the index for Bare_Neclei values '?'
ind = df.query("Bare_Nuclei=='?'").index
#drop the rows with values '?'
data = df.drop(ind, axis ='index')
#Convert the Bare_Nuclei datatype from Object to int64
data['Bare_Nuclei'] = data.Bare_Nuclei.astype('int64')
#Check for null values
data.isnull().sum()
#Look up Summary statistics of the data
Summary_Stats = data.iloc[:,:-1].describe()
#plot the mean values from the summary stats bar
fig = plt.figure(figsize=(6,6))
Summary_Stats.loc['mean',:].plot(kind='barh', xerr=Summary_Stats.loc['std',:]);
plt.title('Bar chart showing the mean and std of variables')
plt.xlabel('Mean')
#plot the mean values from the summary stats line
fig = plt.figure(figsize=(9,4))
Summary_Stats.loc['mean',:].plot(kind='line', color='blue', linewidth=3);
Summary_Stats.loc['std',:].plot(kind='line', color='lightgreen', linewidth=2)
plt.legend
#Plot the class distribution
fig = plt.figure(figsize=(15,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.bar(['neg','pos'], data.Class.value_counts().values, color=('grey','maroon'))
ax1.legend(['neg','pos'])
ax1.set_xlabel('Class Labels')
ax1.set_ylabel('Examples')
Explode=[0,0.2] #Separates the section of the pie chart specified
ax2.pie(data.Class.value_counts().values,explode=Explode, shadow=True,startangle=45)
ax2.legend(['neg','pos'],title ="Classes")
#Replace class labels from [benign, malignant]=(2,4) to (-1,1)
data.Class.replace({2:-1,4:1}, inplace=True)
data.Class.value_counts()
#Drop the id column
data.drop("id", axis=1, inplace=True)
#Extract Variables X and Label y from the data
X = data.iloc[:,:-1].values.reshape(data.shape[0],data.shape[1]-1)
y = data.iloc[:,-1].values.reshape(data.shape[0],1)
#SplitData into train, validation and Test data sets
xtr, xva, xte, ytr, yva, yte = splitdata(X, y, 25, 0.9)
#Choose Kernel
kernel = ['linear','H_poly','poly','rbf','erbf'] #['laplace','sqrexp','sigmoid']
ker
#Set Kernel parameter
params = {}
params['linear'] = []
params['H_poly'] = [2,3,4]
params['poly'] = [2,3,4]
params['rbf'] = [ 0.001,1.0,100.0]
params['erbf'] = [ 0.001,1.0,100.0]
#Set Kernel parameter
TrainKernel = {}
TestKernel = {}
TrainKernelTime = {}
TestKernelTime = {}
PSDCheck = {}
Perf_eva = {}
AucRoc = {}
Result = {}
#Construct Kernel
for ker in kernel:
for par in range(len(params[ker])):
k_param = params[ker][par]
start_time=time.time()
TrainKernel[ker] = kernelfun(xtr, xtr, ker, k_param)
end_time=time.time()
TrainKernelTime[ker] = end_time - start_time
print('{} minutes to construct Training kernel'.format(ker_time/60))
PSDCheck[ker] = checkPSD(TrainKernel[ker])
plt.imshow(TrainKernel[ker]) #Any other kernel analysis can be inserted here
TrainKernel[ker] = np.multiply(np.matmul(ytr,ytr.T),TrainKernel[ker])
TrainKernel[ker] = addIndexToKernel(TrainKernel[ker])
start_time=time.time()
TestKernel[ker] = kernelfun(xtr, xte, ker, k_param)
end_time=time.time()
TestKernelTime[ker] = end_time - start_time
print('{} minutes to construct Test kernel'.format(ker_time/60))
TestKernel[ker] = addIndexToKernel(TestKernel[ker])
model = svm_train(list(ytr), [list(r) for r in TrainKernel[ker]], ('-b 1 -c 4 -t 4'))
p_label, p_acc, p_val = svm_predict(list(yte),[list(row) for row in TestKernel[ker]], model, ('-b 1'))
Perf_eva[ker] = EvaluateTest(np.asarray(yte/1.),np.asarray(p_label))
print("--> {} F1 Score achieved".format(Evaluation["Fscore"]))
AucRoc[ker] = computeRoc(yte, p_val)
Result[ker+'_'+ str(par)] = (TrainKernel,TrainKernelTime,PSDCheck,
TestKernel,TestKernelTime,model,p_label, p_acc, p_val,Perf_eva,AucRoc)
print('-' * 6)
print(' Done ')
print('=' * 6)
print("K_tr_" + ker)
#initialize the kernel matrix
K_tr,K_te = intitializeKernels(m,n)
#Append an index column to the kernel matrix
H2 = addIndexToKernel(K_te)
RecordTime = {}
x=X[:10,:]
#Choose Parameter
params=[ 0.001, 0.01, 0.1, 1.0, 10.0, 100.0 ]
#Use Single Kernel
#Kernel = ['rbf']
#ker = Kernel[0]
#####
start_time2 = time.time()
H1 = kernelfun(xtr,xte, ker, params)
end_time2 = time.time()
####
for i in range(0,n):
for j in range(0,m):
u = K_tr[i,:]
print(u)
v = K_tr[j,:]
print(v)
K_tr[i,j] = np.exp(-(np.dot((u-v),(u-v).T)/2 * (1.25**2)))
#Check if Kernel is PSD
checkPSD(K_tr)
#plot kernel with plt.imshow()
plt.imshow(K_tr)
#Multiply kernel by label
K_tr = np.multiply(np.matmul(ytr,ytr.T),K_tr)
#Append index column to the kernel matrix
K_tr = addIndexToKernel(K_tr)
#Evaluation = EvaluateTest(np.asarray(p_label),yte)
Evaluation = EvaluateTest(np.asarray(yte/1.),np.asarray(p_label))
print("--> {} F1 Score achieved".format(Evaluation["Fscore"])) | python |
# meta class 에서는 __init__ 보다는 __new__ 를 사용합니다.
# 사용법은 아래와 같습니다.
# __new__ (<클래스자신>, <클래스명>, (클래스의 부모 클래스), {클래스의 어트리뷰트 딕셔너리} )
# __new__ 가 실행된 다음에 __init__ 가 실행되게 됩니다.
class Meta(type):
def __new__(cls, name, bases, attrs):
print("__new__ 메서드!")
print(cls, name, bases, attrs)
return type.__new__(cls, name, bases, attrs)
def __init__(cls, name, bases, attrs):
print("__init__ 메서드")
type.__init__(cls, name, bases, attrs)
print("=================================")
print("<메타클래스가 초기화 됩니다.>")
class MyClass(metaclass=Meta):
pass
print("=================================")
# print 로 찍은 값을 보시면 그저 클래스를 정의만 했는데
# 메타클래스가 어딘가 생성된것을 볼 수 있습니다.
| python |
default_app_config = 'action_notifications.apps.ActionNotificationsConfig'
| python |
from __future__ import division, unicode_literals
import codecs
from bs4 import BeautifulSoup
import urllib
from logzero import logger as LOGGER
import re
import codecs
from w3lib.html import replace_entities
import os
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from PIL import Image
from wordcloud import WordCloud, ImageColorGenerator
import pandas as pd
import scattertext as st
import spacy
from fsa_utils.commons import get_asset_root, get_file_content
class Scatter_french_text(object):
def __init__(self, list_directory, list_author, language:str='fr', encoding = 'utf-8'):
self.list_text = self.read_directory(list_directory, encoding)
self.list_author = list_author
self.df = pd.DataFrame()
self.df["text"] = self.list_text
self.df["author"] = self.list_author
self.language = language
self.nlp = spacy.load(language)
self.corpus = st.CorpusFromPandas(self.df, category_col='author', text_col='text', nlp=self.nlp).build()
def explorer(self, category, not_category, metadata):
html = st.produce_scattertext_explorer(self.corpus, category=category, not_category_name=not_category, metadata=metadata)
open("Corpus-Visualization.html", 'wb').write(html.encode('utf-8'))
@staticmethod
def read_directory(list_directory, encoding):
cfg = get_asset_root()
list_text= []
for i in list_directory:
director = get_file_content(cfg, i)
text = open(director,encoding=encoding)
text=text.read()
list_text.append(text)
return list_text
if __name__ == '__main__':
g = Scatter_french_text(["french_books_no_meta/Hugo_Miserables1","french_books_no_meta/Zola_assommoir"], ['Hugo', "Zola"])
g.explorer("Zola", "Hugo",None) | python |
from setuptools import setup, find_packages
with open("README.md") as f:
long_description = f.read()
setup(
name="BindsNET",
version="0.2.9",
description="Spiking neural networks for ML in Python",
license="AGPL-3.0",
long_description=long_description,
long_description_content_type="text/markdown", # This is important!
url="http://github.com/Hananel-Hazan/bindsnet",
author="Hananel Hazan, Daniel Saunders, Darpan Sanghavi, Hassaan Khan",
author_email="[email protected]",
packages=find_packages(),
zip_safe=False,
install_requires=[
"numpy>=1.14.2",
"torch>=1.5.1",
"torchvision>=0.6.1",
"tensorboardX>=1.7",
"tqdm>=4.19.9",
"matplotlib>=2.1.0",
"gym>=0.10.4",
"scikit-build>=0.11.1",
"scikit_image>=0.13.1",
"scikit_learn>=0.19.1",
"opencv-python>=3.4.0.12",
"pytest>=3.4.0",
"scipy>=1.1.0",
"cython>=0.28.5",
"pandas>=0.23.4",
],
)
| python |
class Queue(object):
def __init__(self, queue):
self._queue = queue
self.name = None
def delete(self):
raise NotImplementedError()
class BrokerBackend(object):
def __init__(self):
self._queues = None
@property
def queues(self):
if self._queues is None:
self._queues = self._get_queues()
return self._queues
def _get_queues(self):
raise NotImplementedError()
def filter_queues(self, prefix=None):
def queue_filter(queue):
skip = False
if prefix:
skip = skip or queue.name.startswith(prefix)
return skip
return filter(queue_filter, self.queues)
| python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.keras.layers as tfkl
from veqtor_keras.util import localized_attention
class LocalizedAttentionLayer1D(tfkl.Layer):
def __init__(self,
patch_size=3,
num_heads=1,
stride=1,
dilation=1,
padding='same',
preshaped_q=True, **kwargs):
"""
Args:
patch_size: size of patches to perform localized attention within
num_heads: number of attention heads
strides: the stride of the patch window, stride 2 halves output
dilations: the dilation of the patch window
padding: one of 'same' or 'valid'
preshaped_q: True if q matches strided and padded kv
ex: kv: [B, 4, C]
stride = 2
q must be [B,2,C]
"""
super(LocalizedAttentionLayer1D, self).__init__(**kwargs)
self.patch_size = patch_size
self.num_heads = num_heads
self.stride = stride
self.dilation = dilation
self.padding = padding
self.preshaped_q = preshaped_q
def call(self, q, k, v):
if type(q) == list:
if len(q) == 3:
q, k, v = q
elif len(q) == 4:
q, k, v, mask = q
else:
raise SyntaxError
return localized_attention.localized_attention_1d(q=q, k=k, v=v,
num_heads=self.num_heads,
stride=self.stride,
dilation=self.dilation,
padding=self.padding,
preshaped_q=self.preshaped_q)
def get_config(self):
config = {'patch_size': self.patch_size,
'num_heads': self.num_heads,
'stride': self.stride,
'dilation': self.dilation,
'padding': self.padding,
'preshaped_q': self.preshaped_q}
base_config = super(LocalizedAttentionLayer1D, self).get_config()
return {**base_config, **config}
class LocalizedAttentionLayer2D(tfkl.Layer):
def __init__(self,
patch_size=(3, 3),
num_heads=1,
strides=(1, 1),
dilations=(1, 1),
padding='same',
preshaped_q=True, **kwargs):
"""
Args:
patch_size: size of patches to perform localized attention within
num_heads: number of attention heads
strides: the stride of the patch window, stride 2 halves output
dilations: the dilation of the patch window
padding: one of 'same' or 'valid'
preshaped_q: True if q matches strided and padded kv
ex: kv: [B, 4, 4, C]
strides = (2,2)
q must be [B,2,2,C]
"""
super(LocalizedAttentionLayer2D, self).__init__(**kwargs)
self.patch_size = patch_size
self.num_heads = num_heads
self.strides = strides
self.dilations = dilations
self.padding = padding
self.preshaped_q = preshaped_q
def call(self, q, k, v):
if type(q) == list:
if len(q) == 3:
q, k, v = q
elif len(q) == 4:
q, k, v, mask = q
else:
raise SyntaxError
return localized_attention.localized_attention_2d(q=q, k=k, v=v,
num_heads=self.num_heads,
strides=self.strides,
dilations=self.dilations,
padding=self.padding,
preshaped_q=self.preshaped_q)
def get_config(self):
config = {'patch_size': self.patch_size,
'num_heads': self.num_heads,
'strides': self.strides,
'dilations': self.dilations,
'padding': self.padding,
'preshaped_q': self.preshaped_q}
base_config = super(LocalizedAttentionLayer2D, self).get_config()
return {**base_config, **config}
| python |
"""
https://adventofcode.com/2018/day/2
"""
from collections import Counter
from itertools import product
from pathlib import Path
def solve_a(codes):
pairs = triplets = 0
for code in codes:
occurrences = Counter(code).values()
pairs += any(count == 2 for count in occurrences)
triplets += any(count == 3 for count in occurrences)
return pairs * triplets
def solve_b(codes):
for code_a, code_b in product(codes, codes):
diff = sum(c != c2 for c, c2 in zip(code_a, code_b))
if diff == 1:
common = ''.join(c for c, c2 in zip(code_a, code_b) if c == c2)
return common
if __name__ == '__main__':
assert 12 == solve_a([
'abcdef',
'bababc',
'abbcde',
'abcccd',
'aabcdd',
'abcdee',
'ababab',
])
assert 'fgij' == solve_b([
'abcde',
'fghij',
'klmno',
'pqrst',
'fguij',
'axcye',
'wvxyz',
])
codes = Path('day02.txt').read_text().strip().splitlines()
print('A:', solve_a(codes))
print('B:', solve_b(codes))
| python |
import hashlib
def hash_uid(uid, truncate=6):
"""Hash a UID and truncate it
Args:
uid (str): The UID to hash
truncate (int, optional): The number of the leading characters to keep. Defaults to 6.
Returns:
str: The hashed and trucated UID
"""
hash_sha = hashlib.sha256()
hash_sha.update(uid.encode("UTF-8"))
return hash_sha.hexdigest()[:truncate]
| python |
from lib.interface import *
from lib.arquivo import *
from time import sleep
arq = './Ex115/cadastro.txt'
if not arquivoExiste(arq):
criarArquivo(arq)
while True:
cor(2)
opcao = menu(['Cadastrar', 'Listar', 'Sair'])
if opcao == 1:
#Opção para cadastrar uma nova pessoa no arquivo
cabecalho('Novo cadastro')
nome = str(input('Nome: '))
idade = leiaInt('Idade: ')
cadastrar(arq, nome, idade)
elif opcao == 2:
#Opção para acessar e ler o conteúdo do arquivo
lerArquivo(arq)
elif opcao == 3:
cor(11)
print()
print(linha())
print('Volte sempre!')
print(linha())
cor(7)
break
else:
cor(4)
print('Digite uma opção entre 1 e 3')
sleep(1)
| python |
from datetime import datetime
import json
import platform
import socket
import sys
from collections.abc import Iterable
import os
import inspect
import types
import pickle
import base64
import re
import subprocess
import io
import threading
import signal
try:
import pkg_resources
except ImportError:
pkg_resources = None
try:
import line_profiler
except ImportError:
line_profiler = None
try:
import psutil
except ImportError:
psutil = None
try:
import conda
import conda.cli.python_api
except ImportError:
conda = None
try:
import numpy
except ImportError:
numpy = None
from .diff import envdiff
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
if numpy:
if isinstance(o, numpy.integer):
return int(o)
elif isinstance(o, numpy.floating):
return float(o)
elif isinstance(o, numpy.ndarray):
return o.tolist()
return super().default(o)
class MicroBench(object):
def __init__(self, outfile=None, json_encoder=JSONEncoder,
*args, **kwargs):
self._capture_before = []
if args:
raise ValueError('Only keyword arguments are allowed')
self._bm_static = kwargs
if outfile is not None:
self.outfile = outfile
elif not hasattr(self, 'outfile'):
self.outfile = io.StringIO()
self._json_encoder = json_encoder
def pre_run_triggers(self, bm_data):
# Capture environment variables
if hasattr(self, 'env_vars'):
if not isinstance(self.env_vars, Iterable):
raise ValueError('env_vars should be a tuple of environment '
'variable names')
for env_var in self.env_vars:
bm_data['env_{}'.format(env_var)] = os.environ.get(env_var)
# Capture package versions
if hasattr(self, 'capture_versions'):
if not isinstance(self.capture_versions, Iterable):
raise ValueError('capture_versions is reserved for a tuple of'
'package names - please rename this method')
for pkg in self.capture_versions:
self._capture_package_version(bm_data, pkg)
# Run capture triggers
for method_name in dir(self):
if method_name.startswith('capture_'):
method = getattr(self, method_name)
if callable(method) and method not in self._capture_before:
method(bm_data)
# Initialise telemetry thread
if hasattr(self, 'telemetry'):
interval = getattr(self, 'telemetry_interval', 60)
bm_data['telemetry'] = []
self._telemetry_thread = TelemetryThread(
self.telemetry, interval, bm_data['telemetry'])
self._telemetry_thread.start()
# Special case, as we want this to run immediately before run
bm_data['start_time'] = datetime.now()
def post_run_triggers(self, bm_data):
# Special case, as we want this to run immediately after run
bm_data['finish_time'] = datetime.now()
# Terminate telemetry thread and gather results
if hasattr(self, '_telemetry_thread'):
self._telemetry_thread.terminate()
timeout = getattr(self, 'telemetry_timeout', 30)
self._telemetry_thread.join(timeout)
def capture_function_name(self, bm_data):
bm_data['function_name'] = bm_data['_func'].__name__
def _capture_package_version(self, bm_data, pkg, skip_if_none=False):
bm_data.setdefault('package_versions', {})
try:
ver = pkg.__version__
except AttributeError:
if skip_if_none:
return
ver = None
bm_data['package_versions'][pkg.__name__] = ver
def to_json(self, bm_data):
bm_str = '{}'.format(json.dumps(bm_data,
cls=self._json_encoder))
return bm_str
def output_result(self, bm_data):
""" Output result to self.outfile as a line in JSON format """
bm_str = self.to_json(bm_data) + '\n'
# This should guarantee atomic writes on POSIX by setting O_APPEND
if isinstance(self.outfile, str):
with open(self.outfile, 'a') as f:
f.write(bm_str)
else:
# Assume file-like object
self.outfile.write(bm_str)
def __call__(self, func):
def inner(*args, **kwargs):
bm_data = dict()
bm_data.update(self._bm_static)
bm_data['_func'] = func
bm_data['_args'] = args
bm_data['_kwargs'] = kwargs
if isinstance(self, MBLineProfiler):
if not line_profiler:
raise ImportError('This functionality requires the '
'"line_profiler" package')
self._line_profiler = line_profiler.LineProfiler(func)
self.pre_run_triggers(bm_data)
if isinstance(self, MBLineProfiler):
res = self._line_profiler.runcall(func, *args, **kwargs)
else:
res = func(*args, **kwargs)
self.post_run_triggers(bm_data)
if isinstance(self, MBReturnValue):
bm_data['return_value'] = res
# Delete any underscore-prefixed keys
bm_data = {k: v for k, v in bm_data.items()
if not k.startswith('_')}
self.output_result(bm_data)
return res
return inner
class MBFunctionCall(object):
""" Capture function arguments and keyword arguments """
def capture_function_args_and_kwargs(self, bm_data):
bm_data['args'] = bm_data['_args']
bm_data['kwargs'] = bm_data['_kwargs']
class MBReturnValue(object):
""" Capture the decorated function's return value """
pass
class MBPythonVersion(object):
""" Capture the Python version and location of the Python executable """
def capture_python_version(self, bm_data):
bm_data['python_version'] = platform.python_version()
def capture_python_executable(self, bm_data):
bm_data['python_executable'] = sys.executable
class MBHostInfo(object):
""" Capture the hostname and operating system """
def capture_hostname(self, bm_data):
bm_data['hostname'] = socket.gethostname()
def capture_os(self, bm_data):
bm_data['operating_system'] = sys.platform
class MBGlobalPackages(object):
""" Capture Python packages imported in global environment """
def capture_functions(self, bm_data):
# Get globals of caller
caller_frame = inspect.currentframe().f_back.f_back.f_back
caller_globals = caller_frame.f_globals
for g in caller_globals.values():
if isinstance(g, types.ModuleType):
self._capture_package_version(bm_data, g, skip_if_none=True)
else:
try:
module_name = g.__module__
except AttributeError:
continue
self._capture_package_version(
bm_data,
sys.modules[module_name.split('.')[0]],
skip_if_none=True
)
class MBCondaPackages(object):
""" Capture conda packages; requires 'conda' package (pip install conda) """
include_builds = True
include_channels = False
def capture_conda_packages(self, bm_data):
if conda is None:
# Use subprocess
pkg_list = subprocess.check_output(['conda', 'list']).decode('utf8')
else:
# Use conda Python API
pkg_list, stderr, ret_code = conda.cli.python_api.run_command(
conda.cli.python_api.Commands.LIST)
if ret_code != 0 or stderr:
raise RuntimeError('Error running conda list: {}'.format(
stderr))
bm_data['conda_versions'] = {}
for pkg in pkg_list.splitlines():
if pkg.startswith('#') or not pkg.strip():
continue
pkg_data = pkg.split()
pkg_name = pkg_data[0]
pkg_version = pkg_data[1]
if self.include_builds:
pkg_version += pkg_data[2]
if self.include_channels and len(pkg_data) == 4:
pkg_version += pkg_version + '(' + pkg_data[3] + ')'
bm_data['conda_versions'][pkg_name] = pkg_version
class MBInstalledPackages(object):
""" Capture installed Python packages using pkg_resources """
capture_paths = False
def capture_packages(self, bm_data):
if not pkg_resources:
raise ImportError(
'pkg_resources is required to capture package names, which is '
'provided with the "setuptools" package')
bm_data['package_versions'] = {}
if self.capture_paths:
bm_data['package_paths'] = {}
for pkg in pkg_resources.working_set:
bm_data['package_versions'][pkg.project_name] = pkg.version
if self.capture_paths:
bm_data['package_paths'][pkg.project_name] = pkg.location
class MBLineProfiler(object):
"""
Run the line profiler on the selected function
Requires the line_profiler package. This will generate a benchmark which
times the execution of each line of Python code in your function. This will
slightly slow down the execution of your function, so it's not recommended
in production.
"""
def capture_line_profile(self, bm_data):
bm_data['line_profiler'] = base64.encodebytes(
pickle.dumps(self._line_profiler.get_stats())
).decode('utf8')
@staticmethod
def decode_line_profile(line_profile_pickled):
return pickle.loads(base64.decodebytes(line_profile_pickled.encode()))
@classmethod
def print_line_profile(self, line_profile_pickled, **kwargs):
lp_data = self.decode_line_profile(line_profile_pickled)
line_profiler.show_text(lp_data.timings, lp_data.unit, **kwargs)
class _NeedsPsUtil(object):
@classmethod
def _check_psutil(cls):
if not psutil:
raise ImportError('psutil library needed')
class MBHostCpuCores(_NeedsPsUtil):
""" Capture the number of logical CPU cores """
def capture_cpu_cores(self, bm_data):
self._check_psutil()
bm_data['cpu_cores_logical'] = psutil.cpu_count()
class MBHostRamTotal(_NeedsPsUtil):
""" Capture the total host RAM in bytes """
def capture_total_ram(self, bm_data):
self._check_psutil()
bm_data['ram_total'] = psutil.virtual_memory().total
class MBNvidiaSmi(object):
"""
Capture attributes on installed NVIDIA GPUs using nvidia-smi
Requires the nvidia-smi utility to be available in the current PATH.
By default, the gpu_name and memory.total attributes are captured. Extra
attributes can be specified using the class or object-level variable
nvidia_attributes.
By default, all installed GPUs will be polled. To limit to a specific GPU,
specify the nvidia_gpus attribute as a tuple of GPU IDs, which can be
zero-based GPU indexes (can change between reboots, not recommended),
GPU UUIDs, or PCI bus IDs.
"""
_nvidia_attributes_available = ('gpu_name', 'memory.total')
_nvidia_gpu_regex = re.compile(r'^[0-9A-Za-z\-:]+$')
def capture_nvidia(self, bm_data):
if hasattr(self, 'nvidia_attributes'):
nvidia_attributes = self.nvidia_attributes
unknown_attrs = set(self._nvidia_attributes_available).difference(
nvidia_attributes
)
if unknown_attrs:
raise ValueError("Unknown nvidia_attributes: {}".format(
', '.join(unknown_attrs)
))
else:
nvidia_attributes = self._nvidia_attributes_available
if hasattr(self, 'nvidia_gpus'):
gpus = self.nvidia_gpus
if not gpus:
raise ValueError('nvidia_gpus cannot be empty. Leave the '
'attribute out to capture data for all GPUs')
for gpu in gpus:
if not self._nvidia_gpu_regex.match(gpu):
raise ValueError('nvidia_gpus must be a list of GPU indexes'
'(zero-based), UUIDs, or PCI bus IDs')
else:
gpus = None
# Construct the command
cmd = ['nvidia-smi', '--format=csv,noheader',
'--query-gpu=uuid,{}'.format(','.join(nvidia_attributes))]
if gpus:
cmd += ['-i', ','.join(gpus)]
# Execute the command
res = subprocess.check_output(cmd).decode('utf8')
# Process results
for gpu_line in res.split('\n'):
if not gpu_line:
continue
gpu_res = gpu_line.split(', ')
for attr_idx, attr in enumerate(nvidia_attributes):
gpu_uuid = gpu_res[0]
bm_data.setdefault('nvidia_{}'.format(attr), {})[gpu_uuid] = \
gpu_res[attr_idx + 1]
class MicroBenchRedis(MicroBench):
def __init__(self, *args, **kwargs):
super(MicroBenchRedis, self).__init__(*args, **kwargs)
import redis
self.rclient = redis.StrictRedis(**self.redis_connection)
def output_result(self, bm_data):
self.rclient.rpush(self.redis_key, self.to_json(bm_data))
class TelemetryThread(threading.Thread):
def __init__(self, telem_fn, interval, slot, *args, **kwargs):
super(TelemetryThread, self).__init__(*args, **kwargs)
self._terminate = threading.Event()
signal.signal(signal.SIGINT, self.terminate)
signal.signal(signal.SIGTERM, self.terminate)
self._interval = interval
self._telemetry = slot
self._telem_fn = telem_fn
if not psutil:
raise ImportError('Telemetry requires the "psutil" package')
self.process = psutil.Process()
def terminate(self, signum=None, frame=None):
self._terminate.set()
def _get_telemetry(self):
telem = {'timestamp': datetime.now()}
telem.update(self._telem_fn(self.process))
self._telemetry.append(telem)
def run(self):
self._get_telemetry()
while not self._terminate.wait(self._interval):
self._get_telemetry()
| python |
import logging
from tqdm import tqdm
import tmdb
from page import blocked_qids
from sparql import sparql
def main():
"""
Find Wikidata items that are missing a TMDb TV series ID (P4983) but have a
IMDb ID (P345) or TheTVDB.com series ID (P4835). Attempt to look up the
TV show via the TMDb API. If there's a match, create a new statement.
Outputs QuickStatements CSV commands.
"""
query = """
SELECT ?item ?imdb ?tvdb ?random WHERE {
# Items with either IMDb or TVDB IDs
{ ?item wdt:P4835 []. }
UNION
{ ?item wdt:P345 []. }
VALUES ?classes {
wd:Q15416
}
?item (wdt:P31/(wdt:P279*)) ?classes.
# Get IMDb and TVDB IDs
OPTIONAL { ?item wdt:P345 ?imdb. }
OPTIONAL { ?item wdt:P4835 ?tvdb. }
# Exclude items that already have a TMDB TV ID
OPTIONAL { ?item wdt:P4983 ?tmdb. }
FILTER(!(BOUND(?tmdb)))
# Generate random sorting key
BIND(MD5(CONCAT(STR(?item), STR(RAND()))) AS ?random)
}
ORDER BY ?random
LIMIT 5000
"""
items = {}
for result in sparql(query):
qid = result["item"]
if qid in blocked_qids():
logging.debug("{} is blocked".format(qid))
continue
if qid not in items:
items[qid] = {"imdb": set(), "tvdb": set()}
item = items[qid]
if result["imdb"]:
item["imdb"].add(result["imdb"])
if result["tvdb"]:
item["tvdb"].add(result["tvdb"])
print("qid,P4983")
for qid in tqdm(items):
item = items[qid]
tmdb_ids = set()
for imdb_id in item["imdb"]:
tv = tmdb.find(id=imdb_id, source="imdb_id", type="tv")
if tv:
tmdb_ids.add(tv["id"])
for tvdb_id in item["tvdb"]:
tv = tmdb.find(id=tvdb_id, source="tvdb_id", type="tv")
if tv:
tmdb_ids.add(tv["id"])
for tmdb_id in tmdb_ids:
print('{},"""{}"""'.format(qid, tmdb_id))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| python |
import sys
sum = 0
for i in range(1, len(sys.argv), 1):
sum += int(sys.argv[i])
print(sum) | python |
from .normalize import *
from .logarithmic import *
from .exponential import *
from .gamma import *
from .tumblin import *
from .reinhard import *
from .durand import *
from .drago import *
from .fattal import *
from .lischinski import *
| python |
__author__ = 'xf'
| python |
# -*- coding: utf-8 -*-
import pytest
from django.conf import settings
from django.http import HttpResponse
from mock import Mock, PropertyMock, patch
from django_toolkit import middlewares
@pytest.fixture
def http_request(rf):
return rf.get('/')
@pytest.fixture
def http_response():
return HttpResponse()
class TestVersionHeaderMiddleware(object):
@pytest.fixture(autouse=True)
def settings(self, settings):
settings.TOOLKIT = {
'API_VERSION': '1.2.3',
}
return settings
@pytest.fixture
def middleware(self):
return middlewares.VersionHeaderMiddleware()
def test_should_return_a_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert isinstance(response, HttpResponse)
def test_should_add_a_version_header_to_the_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert 'X-API-Version' in response
assert response['X-API-Version'] == settings.TOOLKIT['API_VERSION']
@pytest.mark.django_db
class TestAccessLogMiddleware(object):
@pytest.fixture
def middleware(self):
return middlewares.AccessLogMiddleware()
@pytest.fixture
def patched_logger(self):
return patch('django_toolkit.middlewares.logger')
@pytest.fixture
def patched_format(self):
return patch(
'django_toolkit.middlewares.AccessLogMiddleware.LOG_FORMAT',
new_callable=PropertyMock
)
@pytest.fixture
def authenticated_http_request(self, http_request):
http_request.user = u'jovem'
http_request.auth = Mock(application=Mock(name='myapp'))
return http_request
def test_should_return_a_response(
self,
middleware,
http_request,
http_response
):
response = middleware.process_response(http_request, http_response)
assert isinstance(response, HttpResponse)
def test_should_log_responses(
self,
middleware,
http_request,
http_response,
patched_logger,
patched_format
):
with patched_logger as mock_logger:
middleware.process_response(http_request, http_response)
assert mock_logger.info.called
def test_should_include_request_and_response_in_the_message(
self,
middleware,
http_request,
http_response,
patched_logger,
patched_format
):
with patched_logger as mock_logger:
with patched_format as mock_format_property:
middleware.process_response(http_request, http_response)
mock_format_string = mock_format_property.return_value
assert mock_format_string.format.called
mock_format_string.format.assert_called_once_with(
app_name=middleware.UNKNOWN_APP_NAME,
request=http_request,
response=http_response
)
mock_logger.info.assert_called_once_with(
mock_format_string.format.return_value
)
def test_should_include_the_authenticated_app_in_the_message(
self,
middleware,
authenticated_http_request,
http_response,
patched_logger,
patched_format
):
with patched_format as mock_format_property:
middleware.process_response(
authenticated_http_request,
http_response
)
mock_format_string = mock_format_property.return_value
assert mock_format_string.format.called
mock_format_string.format.assert_called_once_with(
app_name=authenticated_http_request.auth.application.name,
request=authenticated_http_request,
response=http_response
)
| python |
__version__ = 0.6 | python |
import boto3
import json
import string
from time import asctime
from urllib.request import Request, urlopen
import yaml
def get_API_key() -> None:
"""Grab QnAMaker API key from encrypted s3 object.
"""
s3_client = boto3.client('s3')
response = s3_client.get_object(
Bucket='octochat-processor',
Key='secrets.yml'
)
data = yaml.load(response['Body'])
return data['qnamaker_api_key']
def create_knowledge_base(faq_url: str, QNAMAKER_API_KEY: str) -> str:
"""Creates knowledge base from FAQ URL using Azure QnAMaker at
https://qnamaker.ai/.
Args:
faq_url: A well-formed URL of a page containing an FAQ section.
QNAMAKER_API_KEY: The API key for QnAMaker.
Returns:
The knowledge base ID.
"""
create_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/create'
create_request = Request(create_request_endpoint)
create_request.add_header('Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
create_request.add_header('Content-Type', 'application/json')
# TODO: call crawler to get all faq urls if the user wants it to
input_data = str.encode(str({
# include the time of creation in the bot title for logging
'name': 'CAKB_' + asctime(),
'urls': [
faq_url
]
}))
create_response = urlopen(
create_request, data=input_data, timeout=15).read().decode('utf-8')
kbId = json.loads(create_response)['kbId']
return kbId
def remove_invalid_punctuation(s: str) -> str:
"""Removes punctuation invalid by Lex intent rules, specifically any
punctuation except apostrophes, underscores, and hyphens.
Args:
s: any string, usually name of intent.
Returns:
The input string without invalid punctuation.
"""
# Create string of invalid punctuation
invalid_punctuation = ''.join(
[ch for ch in string.punctuation if ch not in '-_\''])
# Remove punctuation from string
s = s.translate(s.maketrans('', '', invalid_punctuation))
s = s.strip()
return s
def get_stopwords() -> list:
"""Retrieve list of stopwords.
Returns:
A list of stopwords retrieved from stopwords.txt.
"""
with open('stopwords.txt', 'r') as f:
return f.read().split('\n')
def question_to_intent_name(s: str, stopwords: list) -> str:
"""Converts a question string to an intent name.
Args:
s: The question string.
stopwords: The list of stopwords to remove from the string.
Returns:
A condensed version of the question text as an intent name.
"""
tokens = s.split(' ')
tokens = [t for t in tokens if t.lower() not in stopwords]
filtered_question = ''.join(tokens)
whitelist = set(string.ascii_lowercase + string.ascii_uppercase)
return ''.join(filter(whitelist.__contains__, filtered_question))
def generate_intents_from_knowledge_base(kb_tab_separated: str) -> list:
"""Generates a list of intent objects from knowledge base as a tab-separated
string.
Args:
kb_tab_separated: A knowledge base as a tab-separated string.
Returns:
A list of intent objects that each contain an intent name, a list of
sample utterances, and a response.
"""
lines = kb_tab_separated.split('\r')
# the first line are just headers; the last line is empty
lines = lines[1:-1]
lines = [line.split('\t') for line in lines]
stopwords = get_stopwords()
intents = [{
# only take first 65 characters, full intent name <100 characters
'name': question_to_intent_name(question, stopwords)[:65],
'sample_utterances': [remove_invalid_punctuation(question)],
'response': answer
} for question, answer, source in lines]
return intents
def download_knowledge_base(kbId: str, QNAMAKER_API_KEY: str) -> str:
"""Downloads knowledge base from Azure QnAMaker at https://qnamaker.ai/.
Args:
kbId: The id of a knowledge base in Azure QnAMaker.
QNAMAKER_API_KEY: The API key from QnAMaker.
Returns:
The knowledge base as a list of intents..
"""
download_kb_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/' + kbId
download_kb_request = Request(download_kb_request_endpoint)
download_kb_request.add_header(
'Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
download_kb_response = urlopen(download_kb_request, timeout=15).read().decode(
'utf-8') # returns an address from which to download kb
# [1:-1] removes quotation marks from url
download_kb_link = download_kb_response[1:-1]
kb_response = urlopen(download_kb_link).read().decode(
'utf-8-sig') # must be utf-8-sig to remove BOM characters
intents = generate_intents_from_knowledge_base(kb_response)
return intents
def delete_knowledge_base(kbId: str, QNAMAKER_API_KEY: str) -> None:
"""Deletes knowledge base from Azure QnAMaker at https://qnamaker.ai/.
Args:
kbId: The id of a knowledge base in Azure QnAMaker.
QNAMAKER_API_KEY: The API key for QnAMaker.
"""
delete_request_endpoint = 'https://westus.api.cognitive.microsoft.com/qnamaker/v2.0/knowledgebases/' + kbId
delete_request = Request(delete_request_endpoint, method='DELETE')
delete_request.add_header('Ocp-Apim-Subscription-Key', QNAMAKER_API_KEY)
delete_response = urlopen(
delete_request, timeout=15).read().decode('utf-8')
| python |
import warnings
from collections import Counter
from itertools import chain
from typing import Tuple, Type
import strawberry
def merge_types(name: str, types: Tuple[Type]) -> Type:
"""Merge multiple Strawberry types into one
For example, given two queries `A` and `B`, one can merge them into a
super type as follows:
merge_types("SuperQuery", (B, A))
This is essentially the same as:
class SuperQuery(B, A):
...
"""
if not types:
raise ValueError("Can't merge types if none are supplied")
fields = chain(*(t._type_definition.fields for t in types))
counter = Counter(f.name for f in fields)
dupes = [f for f, c in counter.most_common() if c > 1]
if dupes:
warnings.warn("{} has overridden fields: {}".format(name, ", ".join(dupes)))
return strawberry.type(type(name, types, {}))
| python |
#!/usr/bin/env python3
from matplotlib import pyplot as plt
import numpy as np
with plt.xkcd():
# Based on "Stove Ownership" from XKCD by Randall Munroe
# https://xkcd.com/418/
fig = plt.figure(figsize=(6,4))
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.set_xticks([])
ax.set_yticks([])
# ax.set_ylim([-30, 10])
def f_sigmoid(x):
return 1 / (1 + np.exp(-x))
def f_foo(x):
if x < -1.0:
return -1.0
if x > 1.0:
return 1.0
return x
f = f_sigmoid
x = np.arange(-10, 10, step=0.1)
y = [f(xp) for xp in x]
ax.annotate(
"absolutelty worth it",
xy=(-1, f(-1)),
arrowprops=dict(arrowstyle="->"),
xytext=(-10, f(3) - 0.5),
)
ax.annotate(
"absolutelty not worth it",
xy=(5, f(5)),
arrowprops=dict(arrowstyle="->"),
xytext=(1, f(5) - 0.5),
)
ax.plot(x, y)
ax.set_xlabel("effort put into visualizations")
ax.set_ylabel("number of people \nunderstanding my visualizations")
# fig.text(0.5, 0.05, '"Stove Ownership" from xkcd by Randall Munroe', ha="center")
plt.savefig("featured.png",dpi=240)
plt.savefig("featured.svg",dpi=240)
| python |
import collections
import itertools
import json
import os
import operator
import attr
import torch
import torchtext
import numpy as np
from seq2struct.models import abstract_preproc
try:
from seq2struct.models import lstm
except ImportError:
pass
from seq2struct.models import spider_enc_modules
from seq2struct.utils import registry, batched_sequence
from seq2struct.utils import vocab
from seq2struct.utils import serialization
from seq2struct import resources
@attr.s
class SpiderEncoderState:
state = attr.ib()
memory = attr.ib()
question_memory = attr.ib()
schema_memory = attr.ib()
words = attr.ib()
pointer_memories = attr.ib()
pointer_maps = attr.ib()
def find_word_occurrences(self, word):
return [i for i, w in enumerate(self.words) if w == word]
@attr.s
class PreprocessedSchema:
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=lambda: collections.defaultdict(set))
primary_keys = attr.ib(factory=list)
class AlFu(torch.nn.Module):
def __init__(self, in_size=1024, out_size=256):
super().__init__()
self.fc1 = torch.nn.Linear(in_size, out_size)
self.fc2 = torch.nn.Linear(in_size, out_size)
def align_fusion(self, V_q, H_c):
fusion = torch.softmax(H_c.mm(torch.transpose(V_q, 0, 1)) /
np.sqrt(H_c.shape[1]), 0).mm(V_q)
input_tens = torch.cat([fusion, H_c, fusion * H_c, fusion - H_c], 1)
return input_tens
def forward(self, question, columns):
input_tens = self.align_fusion(question, columns)
x_bar = torch.relu(self.fc1(input_tens))
g = torch.sigmoid(self.fc2(input_tens))
return (g * x_bar) + (1 - g) * columns
#
# class BiLSTM_SIM(torch.nn.Module):
# def __init__(self, input_size, output_size, dropout, summarize, use_native=False):
# # input_size: dimensionality of input
# # output_size: dimensionality of output
# # dropout
# # summarize:
# # - True: return Tensor of 1 x batch x emb size
# # - False: return Tensor of seq len x batch x emb size
# super().__init__()
#
# if use_native:
# self.lstm = torch.nn.LSTM(
# input_size=input_size,
# hidden_size=output_size // 2,
# bidirectional=True,
# dropout=dropout)
# self.dropout = torch.nn.Dropout(dropout)
# else:
# self.lstm = lstm.LSTM(
# input_size=input_size,
# hidden_size=output_size // 2,
# bidirectional=True,
# dropout=dropout)
# self.summarize = summarize
# self.use_native = use_native
#
#
# def forward(self, all_embs, boundaries):
# for left, right in zip(boundaries, boundaries[1:]):
# # state shape:
# # - h: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2
# # - c: num_layers (=1) * num_directions (=2) x batch (=1) x recurrent_size / 2
# # output shape: seq len x batch size x output_size
# # self.lstm(torch.nn.utils.rnn.pack_sequence(all_embs.select(0).unsqueeze(0)))
# output, (h, c) = self.lstm(self.lstm(torch.nn.utils.rnn.pack_sequence(all_embs.unsqueeze(0)))[0])
# # if self.summarize:
# # seq_emb = torch.cat((h[0], h[1]), dim=-1)
# # else:
# seq_emb = output.data
#
# return seq_emb
class SpiderEncoderV2Preproc(abstract_preproc.AbstractPreproc):
def __init__(
self,
save_path,
min_freq=3,
max_count=5000,
include_table_name_in_column=True,
word_emb=None,
count_tokens_in_word_emb_for_vocab=False):
if word_emb is None:
self.word_emb = None
else:
self.word_emb = registry.construct('word_emb', word_emb)
self.data_dir = os.path.join(save_path, 'enc')
self.include_table_name_in_column = include_table_name_in_column
self.count_tokens_in_word_emb_for_vocab = count_tokens_in_word_emb_for_vocab
self.init_texts()
self.vocab_builder = vocab.VocabBuilder(min_freq, max_count)
self.vocab_path = os.path.join(save_path, 'enc_vocab.json')
self.vocab = None
self.counted_db_ids = set()
self.preprocessed_schemas = {}
def init_texts(self):
# TODO: Write 'train', 'val', 'test' somewhere else
self.texts = {'train': [], 'val': [], 'test': []}
def validate_item(self, item, section):
return True, None
def add_item(self, item, section, validation_info):
preprocessed = self.preprocess_item(item, validation_info)
self.texts[section].append(preprocessed)
if section == 'train':
if item.schema.db_id in self.counted_db_ids:
to_count = preprocessed['question']
else:
self.counted_db_ids.add(item.schema.db_id)
to_count = itertools.chain(
preprocessed['question'],
*preprocessed['columns'],
*preprocessed['tables'])
for token in to_count:
count_token = (
self.word_emb is None or
self.count_tokens_in_word_emb_for_vocab or
self.word_emb.lookup(token) is None)
if count_token:
self.vocab_builder.add_word(token)
def clear_items(self):
self.init_texts()
def preprocess_item(self, item, validation_info):
if self.word_emb:
question = self.word_emb.tokenize(item.orig['question'])
else:
question = item.text
preproc_schema = self._preprocess_schema(item.schema)
return {
'question': question,
'db_id': item.schema.db_id,
'columns': preproc_schema.column_names,
'tables': preproc_schema.table_names,
'table_bounds': preproc_schema.table_bounds,
'column_to_table': preproc_schema.column_to_table,
'table_to_columns': preproc_schema.table_to_columns,
'foreign_keys': preproc_schema.foreign_keys,
'foreign_keys_tables': preproc_schema.foreign_keys_tables,
'primary_keys': preproc_schema.primary_keys,
}
def _preprocess_schema(self, schema):
if schema.db_id in self.preprocessed_schemas:
return self.preprocessed_schemas[schema.db_id]
result = self._preprocess_schema_uncached(schema)
self.preprocessed_schemas[schema.db_id] = result
return result
def _preprocess_schema_uncached(self, schema):
r = PreprocessedSchema()
last_table_id = None
for i, column in enumerate(schema.columns):
column_name = ['<type: {}>'.format(column.type)] + self._tokenize(
column.name, column.unsplit_name)
if self.include_table_name_in_column:
if column.table is None:
table_name = ['<any-table>']
else:
table_name = self._tokenize(
column.table.name, column.table.unsplit_name)
column_name += ['<table-sep>'] + table_name
r.column_names.append(column_name)
table_id = None if column.table is None else column.table.id
r.column_to_table[str(i)] = table_id
if table_id is not None:
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i)
if last_table_id != table_id:
r.table_bounds.append(i)
last_table_id = table_id
if column.foreign_key_for is not None:
r.foreign_keys[str(column.id)] = column.foreign_key_for.id
r.foreign_keys_tables[str(column.table.id)].add(column.foreign_key_for.table.id)
r.table_bounds.append(len(schema.columns))
assert len(r.table_bounds) == len(schema.tables) + 1
for i, table in enumerate(schema.tables):
r.table_names.append(self._tokenize(
table.name, table.unsplit_name))
r.foreign_keys_tables = serialization.to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = [
column.id
for column in table.primary_keys
for table in schema.tables
]
return r
def _tokenize(self, presplit, unsplit):
if self.word_emb:
return self.word_emb.tokenize(unsplit)
return presplit
def save(self):
os.makedirs(self.data_dir, exist_ok=True)
self.vocab = self.vocab_builder.finish()
self.vocab.save(self.vocab_path)
for section, texts in self.texts.items():
with open(os.path.join(self.data_dir, section + '.jsonl'), 'w') as f:
for text in texts:
f.write(json.dumps(text) + '\n')
def load(self):
self.vocab = vocab.Vocab.load(self.vocab_path)
def dataset(self, section):
return [
json.loads(line)
for line in open(os.path.join(self.data_dir, section + '.jsonl'))]
@registry.register('encoder', 'spiderv2')
class SpiderEncoderV2(torch.nn.Module):
batched = True
Preproc = SpiderEncoderV2Preproc
def __init__(
self,
device,
preproc,
word_emb_size=128,
recurrent_size=256,
dropout=0.,
question_encoder=('emb', 'bilstm'),
column_encoder=('emb', 'bilstm'),
table_encoder=('emb', 'bilstm'),
update_config={},
include_in_memory=('question', 'column', 'table'),
batch_encs_update=True,
):
super().__init__()
self._device = device
self.preproc = preproc
self.vocab = preproc.vocab
self.word_emb_size = word_emb_size
self.recurrent_size = recurrent_size
assert self.recurrent_size % 2 == 0
self.include_in_memory = set(include_in_memory)
self.dropout = dropout
self.question_encoder = self._build_modules(question_encoder)
self.column_encoder = self._build_modules(column_encoder)
self.table_encoder = self._build_modules(table_encoder)
self.additional_enc = AlFu()
# 'bilstm': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=False),
# self.additional_lstm_question = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=False)
# self.additional_lstm_columns = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=True)
# self.additional_lstm_tables = BiLSTM_SIM(
# input_size=256,
# output_size=self.recurrent_size,
# dropout=dropout,
# summarize=True)
#
update_modules = {
'relational_transformer':
spider_enc_modules.RelationalTransformerUpdate#,
# 'none':
# spider_enc_modules.NoOpUpdate,
}
self.encs_update = registry.instantiate(
update_modules[update_config['name']],
update_config,
device=self._device,
hidden_size=recurrent_size,
)
self.batch_encs_update = batch_encs_update
def _build_modules(self, module_types):
module_builder = {
'emb': lambda: spider_enc_modules.LookupEmbeddings(
self._device,
self.vocab,
self.preproc.word_emb,
self.word_emb_size),
'linear': lambda: spider_enc_modules.EmbLinear(
input_size=self.word_emb_size,
output_size=self.word_emb_size),
# batch_size, output_size, in_channels, out_channels, kernel_heights, stride, padding,
# keep_probab, vocab_size, embedding_length, weights
'bilstm': lambda: spider_enc_modules.BiLSTM(
input_size=self.word_emb_size,
output_size=self.recurrent_size,
dropout=self.dropout,
summarize=False),
'cnn': lambda: spider_enc_modules.CNN_L2(
# batch_size=50,
output_size=300,
in_channels=1,
out_channels=self.recurrent_size,
# kernel_heights=[1, 3, 5],
stride=1,
padding=1,
keep_probab=0.2,
vocab_size=len(self.vocab),
embedding_length=self.word_emb_size,
# weights=len(self.vocab),
embedder=self.preproc.word_emb,
device=self._device,
vocab = self.vocab,
preproc_word_emb=self.preproc.word_emb,
summarize=False
),
'cnn-summarize': lambda: spider_enc_modules.CNN_L2(
output_size=300,
in_channels=1,
out_channels=self.recurrent_size,
# kernel_heights=[1, 3, 5],
stride=1,
padding=1,
keep_probab=0.2,
vocab_size=len(self.vocab),
embedding_length=self.word_emb_size,
# weights=self.preproc.word_emb.vectors,
embedder=self.preproc.word_emb,
device=self._device,
vocab = self.vocab,
preproc_word_emb=self.preproc.word_emb,
summarize=True
),
# 'bilstm-native': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=False,
# use_native=True),
'bilstm-summarize': lambda: spider_enc_modules.BiLSTM(
input_size=self.word_emb_size,
output_size=self.recurrent_size,
dropout=self.dropout,
summarize=True),
# 'bilstm-native-summarize': lambda: spider_enc_modules.BiLSTM(
# input_size=self.word_emb_size,
# output_size=self.recurrent_size,
# dropout=self.dropout,
# summarize=True,
# use_native=True),
}
modules = []
for module_type in module_types:
modules.append(module_builder[module_type]())
return torch.nn.Sequential(*modules)
def forward_unbatched(self, desc):
# Encode the question
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# q_enc: question len x batch (=1) x recurrent_size
q_enc, (_, _) = self.question_encoder([desc['question']])
# Encode the columns
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each column into one?
# c_enc: sum of column lens x batch (=1) x recurrent_size
c_enc, c_boundaries = self.column_encoder(desc['columns'])
column_pointer_maps = {
i: list(range(left, right))
for i, (left, right) in enumerate(zip(c_boundaries, c_boundaries[1:]))
}
# Encode the tables
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each table into one?
# t_enc: sum of table lens x batch (=1) x recurrent_size
t_enc, t_boundaries = self.table_encoder(desc['tables'])
c_enc_length = c_enc.shape[0]
table_pointer_maps = {
i: [
idx
for col in desc['table_to_columns'][str(i)]
for idx in column_pointer_maps[col]
] + list(range(left + c_enc_length, right + c_enc_length))
for i, (left, right) in enumerate(zip(t_boundaries, t_boundaries[1:]))
}
# Update each other using self-attention
# q_enc_new, c_enc_new, and t_enc_new now have shape
# batch (=1) x length x recurrent_size
q_enc_new, c_enc_new, t_enc_new = self.encs_update(
desc, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
memory = []
if 'question' in self.include_in_memory:
memory.append(q_enc_new)
if 'column' in self.include_in_memory:
memory.append(c_enc_new)
if 'table' in self.include_in_memory:
memory.append(t_enc_new)
memory = torch.cat(memory, dim=1)
return SpiderEncoderState(
state=None,
memory=memory,
# TODO: words should match memory
words=desc['question'],
pointer_memories={
'column': c_enc_new,
'table': torch.cat((c_enc_new, t_enc_new), dim=1),
},
pointer_maps={
'column': column_pointer_maps,
'table': table_pointer_maps,
}
)
def forward(self, descs):
# Encode the question
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# q_enc: PackedSequencePlus, [batch, question len, recurrent_size]
q_enc, _ = self.question_encoder([[desc['question']] for desc in descs])
# Encode the columns
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each column into one?
# c_enc: PackedSequencePlus, [batch, sum of column lens, recurrent_size]
c_enc, c_boundaries = self.column_encoder([desc['columns'] for desc in descs])
# ++
q_enc_rr, _rr = self.question_encoder([[desc['question']] for desc in descs])
# ++
column_pointer_maps = [
{
i: list(range(left, right))
for i, (left, right) in enumerate(zip(c_boundaries_for_item, c_boundaries_for_item[1:]))
}
for batch_idx, c_boundaries_for_item in enumerate(c_boundaries)
]
# Encode the tables
# - LookupEmbeddings
# - Transform embeddings wrt each other?
# - Summarize each table into one?
# t_enc: PackedSequencePlus, [batch, sum of table lens, recurrent_size]
t_enc, t_boundaries = self.table_encoder([desc['tables'] for desc in descs])
c_enc_lengths = list(c_enc.orig_lengths())
table_pointer_maps = [
{
i: [
idx
for col in desc['table_to_columns'][str(i)]
for idx in column_pointer_maps[batch_idx][col]
] + list(range(left + c_enc_lengths[batch_idx], right + c_enc_lengths[batch_idx]))
for i, (left, right) in enumerate(zip(t_boundaries_for_item, t_boundaries_for_item[1:]))
}
for batch_idx, (desc, t_boundaries_for_item) in enumerate(zip(descs, t_boundaries))
]
# Update each other using self-attention
# q_enc_new, c_enc_new, and t_enc_new are PackedSequencePlus with shape
# batch (=1) x length x recurrent_size
if self.batch_encs_update:
q_enc_new, c_enc_new, t_enc_new = self.encs_update(
descs, q_enc, c_enc, c_boundaries, t_enc, t_boundaries)
result = []
for batch_idx, desc in enumerate(descs):
if self.batch_encs_update:
q_enc_new_item = q_enc_new.select(batch_idx).unsqueeze(0)
c_enc_new_item = c_enc_new.select(batch_idx).unsqueeze(0)
t_enc_new_item = t_enc_new.select(batch_idx).unsqueeze(0)
else:
q_enc_selected = q_enc.select(batch_idx)
c_enc_selected = c_enc.select(batch_idx)
t_enc_selected = t_enc.select(batch_idx)
c_enc_selected = self.additional_enc(q_enc_selected, c_enc_selected)
t_enc_selected = self.additional_enc(q_enc_selected, t_enc_selected)
# q_lstmed = self.additional_lstm_question(q_enc_selected, _[batch_idx])
# c_lstmed = self.additional_lstm_columns(c_enc_selected, c_boundaries[batch_idx])
# t_lstmed = self.additional_lstm_tables(t_enc_selected, t_boundaries[batch_idx])
q_enc_new_item, c_enc_new_item, t_enc_new_item = \
self.encs_update.forward_unbatched(
desc,
q_enc_selected.unsqueeze(1),
c_enc_selected.unsqueeze(1),
c_boundaries[batch_idx],
t_enc_selected.unsqueeze(1),
t_boundaries[batch_idx])
memory = []
if 'question' in self.include_in_memory:
memory.append(q_enc_new_item)
if 'column' in self.include_in_memory:
memory.append(c_enc_new_item)
if 'table' in self.include_in_memory:
memory.append(t_enc_new_item)
memory = torch.cat(memory, dim=1)
result.append(SpiderEncoderState(
state=None,
memory=memory,
question_memory=q_enc_new_item,
schema_memory=torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
# TODO: words should match memory
words=desc['question'],
pointer_memories={
'column': c_enc_new_item,
'table': torch.cat((c_enc_new_item, t_enc_new_item), dim=1),
},
pointer_maps={
'column': column_pointer_maps[batch_idx],
'table': table_pointer_maps[batch_idx],
}
))
return result
| python |
import logging
import numpy as np
from rasterio.dtypes import dtype_ranges
import warnings
logger = logging.getLogger(__name__)
def execute(
mp,
resampling="nearest",
band_indexes=None,
td_matching_method="gdal",
td_matching_max_zoom=None,
td_matching_precision=8,
td_fallback_to_higher_zoom=False,
clip_pixelbuffer=0,
scale_ratio=1.0,
scale_offset=0.0,
clip_to_output_dtype=None,
**kwargs,
):
"""
Convert and optionally clip input raster or vector data.
Inputs
------
inp
Raster or vector input.
clip (optional)
Vector data used to clip output.
Parameters
----------
resampling : str (default: 'nearest')
Resampling used when reading from TileDirectory.
band_indexes : list
Bands to be read.
td_matching_method : str ('gdal' or 'min') (default: 'gdal')
gdal: Uses GDAL's standard method. Here, the target resolution is
calculated by averaging the extent's pixel sizes over both x and y
axes. This approach returns a zoom level which may not have the
best quality but will speed up reading significantly.
min: Returns the zoom level which matches the minimum resolution of the
extents four corner pixels. This approach returns the zoom level
with the best possible quality but with low performance. If the
tile extent is outside of the destination pyramid, a
TopologicalError will be raised.
td_matching_max_zoom : int (optional, default: None)
If set, it will prevent reading from zoom levels above the maximum.
td_matching_precision : int (default: 8)
Round resolutions to n digits before comparing.
td_fallback_to_higher_zoom : bool (default: False)
In case no data is found at zoom level, try to read data from higher
zoom levels. Enabling this setting can lead to many IO requests in
areas with no data.
clip_pixelbuffer : int
Use pixelbuffer when clipping output by geometry. (default: 0)
scale_ratio : float
Scale factor for input values. (default: 1.0)
scale_offset : float
Offset to add to input values. (default: 0.0)
clip_to_output_dtype : str
Clip output values to range of given dtype. (default: None)
Output
------
np.ndarray
"""
# read clip geometry
if "clip" in mp.params["input"]:
clip_geom = mp.open("clip").read()
if not clip_geom:
logger.debug("no clip data over tile")
return "empty"
else:
clip_geom = []
if "raster" in mp.input: # pragma: no cover
warnings.warn(
UserWarning(
"'raster' input name in the mapchete configuration is deprecated and has to be named 'inp'"
)
)
inp_key = "raster"
else:
inp_key = "inp"
with mp.open(inp_key) as inp:
if inp.is_empty():
return "empty"
logger.debug("reading input data")
input_data = inp.read(
indexes=band_indexes,
resampling=resampling,
matching_method=td_matching_method,
matching_max_zoom=td_matching_max_zoom,
matching_precision=td_matching_precision,
fallback_to_higher_zoom=td_fallback_to_higher_zoom,
)
if isinstance(input_data, np.ndarray):
input_type = "raster"
elif isinstance(input_data, list):
input_type = "vector"
else: # pragma: no cover
raise TypeError(
"input data type for this process has to either be a raster or a vector "
"dataset"
)
if input_type == "raster":
if scale_offset != 0.0:
logger.debug("apply scale offset %s", scale_offset)
input_data = input_data.astype("float64", copy=False) + scale_offset
if scale_ratio != 1.0:
logger.debug("apply scale ratio %s", scale_ratio)
input_data = input_data.astype("float64", copy=False) * scale_ratio
if (
scale_offset != 0.0 or scale_ratio != 1.0
) and clip_to_output_dtype in dtype_ranges:
logger.debug("clip to output dtype ranges")
input_data.clip(*dtype_ranges[clip_to_output_dtype], out=input_data)
if clip_geom:
logger.debug("clipping output with geometry")
# apply original nodata mask and clip
return mp.clip(input_data, clip_geom, clip_buffer=clip_pixelbuffer)
else:
return input_data
elif input_type == "vector":
if clip_geom: # pragma: no cover
raise NotImplementedError("clipping vector data is not yet implemented")
else:
logger.debug(f"writing {len(input_data)} features")
return input_data
| python |
from Classes.Wrappers.PlayerDisplayData import PlayerDisplayData
class BattleLogPlayerEntry:
def encode(calling_instance, fields):
pass
def decode(calling_instance, fields):
fields["BattleLogEntry"] = {}
fields["BattleLogEntry"]["Unkown1"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown2"] = calling_instance.readLong()
fields["BattleLogEntry"]["Unkown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown4"] = calling_instance.readBoolean()
countVal = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown5"] = countVal
fields["BattleLogEntry"]["Entries"] = {}
for i in range(countVal):
fields["BattleLogEntry"]["Entries"][str(i)] = {}
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown1"] = calling_instance.readDataReference()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown2"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown3"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Entries"][str(i)]["Unknown4"] = calling_instance.readVInt()
fields["BattleLogEntry"]["Unkown6"] = calling_instance.readVInt()
PlayerDisplayData.decode(calling_instance, fields) | python |
# coding: UTF-8
import numpy as np
import chainer
from chainer import Variable,Chain
import chainer.links as L
import chainer.functions as F
import chainer.optimizers as O
# model
class MyChain(Chain):
def __init__(self):
super().__init__(
l1 = L.Linear(1,2),
l2 = L.Linear(2,1),
)
def __call__(self, x):
h = F.sigmoid(self.l1(x))
return self.l2(h)
# Optimizer
model = MyChain()
optimizer = O.SGD() # 最適化アルゴリズム:SGD=確率的降下法
# optimizer = O.Adam() # 最適化アルゴリズム:Adam
optimizer.setup(model)
# execution
input_array = np.array([[1]], dtype=np.float32)
answer_array = np.array([[1]], dtype=np.float32)
x = Variable(input_array)
t = Variable(answer_array)
model.cleargrads() #model 勾配初期化
y=model(x)
loss=F.mean_squared_error(y,t) #二乗誤差 y t の誤差を求める。
loss.backward() #誤差の逆伝播
# 前後比較
print(model.l1.W.data)
optimizer.update()
print(model.l1.W.data) | python |
# -*- coding: utf-8 -*-
"""
Script Name:
Author: Do Trinh/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
""" Import """
import argparse
from PLM.cores.Errors import VersionNotFoundException
from PLM import VERSION_LOG
from difflib import unified_diff
from pyPLM.loggers import DamgLogger
logger = DamgLogger(__name__, filepth=VERSION_LOG)
class DiscardDefaultIfSpecifiedAppendAction(argparse._AppendAction):
"""
Fixes bug http://bugs.python.org/issue16399 for 'append' action
"""
def __call__(self, parser, namespace, values, option_string=None):
if getattr(self, "_discarded_default", None) is None:
setattr(namespace, self.dest, [])
self._discarded_default = True # pylint: disable=attribute-defined-outside-init
super().__call__(parser, namespace, values, option_string=None)
class ConfiguredFile:
def __init__(self, path, versionconfig):
self.path = path
self._versionconfig = versionconfig
def should_contain_version(self, version, context):
"""
Raise VersionNotFound if the version number isn't present in this file.
Return normally if the version number is in fact present.
"""
context["current_version"] = self._versionconfig.serialize(version, context)
search_expression = self._versionconfig.search.format(**context)
if self.contains(search_expression):
return
# the `search` pattern did not match, but the original supplied
# version number (representing the same version part values) might
# match instead.
# check whether `search` isn't customized, i.e. should match only
# very specific parts of the file
search_pattern_is_default = self._versionconfig.search == "{current_version}"
if search_pattern_is_default and self.contains(version.original):
# original version is present and we're not looking for something
# more specific -> this is accepted as a match
return
# version not found
raise VersionNotFoundException("Did not find '{}' in file: '{}'".format(search_expression, self.path))
def contains(self, search):
if not search:
return False
with open(self.path, "rt", encoding="utf-8") as f:
search_lines = search.splitlines()
lookbehind = []
for lineno, line in enumerate(f.readlines()):
lookbehind.append(line.rstrip("\n"))
if len(lookbehind) > len(search_lines):
lookbehind = lookbehind[1:]
if (search_lines[0] in lookbehind[0] and search_lines[-1] in lookbehind[-1] and search_lines[1:-1] == lookbehind[1:-1]):
logger.info("Found '%s' in %s at line %s: %s", search, self.path, lineno - (len(lookbehind) - 1),
line.rstrip(),)
return True
return False
def replace(self, current_version, new_version, context, dry_run):
with open(self.path, "rt", encoding="utf-8") as f:
file_content_before = f.read()
file_new_lines = f.newlines
context["current_version"] = self._versionconfig.serialize(current_version, context)
context["new_version"] = self._versionconfig.serialize(new_version, context)
search_for = self._versionconfig.search.format(**context)
replace_with = self._versionconfig.replace.format(**context)
file_content_after = file_content_before.replace(search_for, replace_with)
if file_content_before == file_content_after:
# TODO expose this to be configurable
file_content_after = file_content_before.replace(current_version.original, replace_with)
if file_content_before != file_content_after:
logger.info("%s file %s:", "Would change" if dry_run else "Changing", self.path)
logger.info("\n".join(list(unified_diff(file_content_before.splitlines(), file_content_after.splitlines(),
lineterm="", fromfile="a/" + self.path, tofile="b/" + self.path,))))
else:
logger.info("%s file %s", "Would not change" if dry_run else "Not changing", self.path)
if not dry_run:
with open(self.path, "wt", encoding="utf-8", newline=file_new_lines) as f:
f.write(file_content_after)
def __str__(self):
return self.path
def __repr__(self):
return "<bumpversion.ConfiguredFile:{}>".format(self.path)
# -------------------------------------------------------------------------------------------------------------
# Created by Trinh Do on 5/6/2020 - 3:13 AM
# © 2017 - 2020 DAMGteam. All rights reserved
| python |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Huawei.VRP config normalizer
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.confdb.normalizer.base import BaseNormalizer, match, ANY, REST
from noc.core.confdb.syntax.defs import DEF
from noc.core.confdb.syntax.patterns import IF_NAME, BOOL
class VRPNormalizer(BaseNormalizer):
SYNTAX = [
DEF(
"interfaces",
[
DEF(
IF_NAME,
[
DEF(
"bpdu",
[
DEF(
BOOL,
required=False,
name="enabled",
gen="make_interface_ethernet_bpdu",
)
],
)
],
multi=True,
name="interface",
)
],
)
]
@match("sysname", ANY)
def normalize_hostname(self, tokens):
yield self.make_hostname(tokens[1])
@match("undo", "http", "server", "enable")
def normalize_http_server(self, tokens):
yield self.make_protocols_http()
@match("undo", "http", "secure-server", "enable")
def normalize_https_server(self, tokens):
yield self.make_protocols_https()
@match("aaa", "local-user", ANY, "privilege", "level", ANY)
def normalize_username_access_level(self, tokens):
yield self.make_user_class(username=tokens[2], class_name="level-%s" % tokens[5])
@match("aaa", "local-user", ANY, "password", REST)
def normalize_username_password(self, tokens):
yield self.make_user_encrypted_password(username=tokens[2], password=" ".join(tokens[4:]))
@match("vlan", "batch", REST)
def normalize_vlan_id_batch(self, tokens):
for vlan in tokens[2:]:
yield self.make_vlan_id(vlan_id=vlan)
@match("vlan", ANY)
def normalize_vlan_id(self, tokens):
yield self.make_vlan_id(vlan_id=tokens[1])
@match("vlan", ANY, "description", REST)
def normalize_vlan_description(self, tokens):
yield self.make_vlan_description(vlan_id=tokens[1], description=" ".join(tokens[3:]))
@match("interface", ANY)
def normalize_interface(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_interface(interface=if_name)
@match("interface", ANY, "description", REST)
def normalize_interface_description(self, tokens):
yield self.make_interface_description(
interface=self.interface_name(tokens[1]), description=" ".join(tokens[2:])
)
@match("interface", ANY, "port-security", "max-mac-num", ANY)
def normalize_port_security(self, tokens):
yield self.make_unit_port_security_max_mac(
interface=self.interface_name(tokens[1]), limit=tokens[4]
)
@match("interface", ANY, "broadcast-suppression", ANY)
def normalize_port_storm_control_broadcast(self, tokens):
yield self.make_interface_storm_control_broadcast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "multicast-suppression", ANY)
def normalize_port_storm_control_multicast(self, tokens):
yield self.make_interface_storm_control_multicast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "unicast-suppression", ANY)
def normalize_port_storm_control_unicast(self, tokens):
yield self.make_interface_storm_control_unicast_level(
interface=self.interface_name(tokens[1]), level=tokens[3]
)
@match("interface", ANY, "stp", "cost", ANY)
def normalize_stp_cost(self, tokens):
yield self.make_spanning_tree_interface_cost(
interface=self.interface_name(tokens[1]), cost=tokens[4]
)
@match("interface", ANY, "port", "hybrid", "pvid", "vlan", ANY)
def normalize_switchport_untagged(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_switchport_untagged(interface=if_name, unit=if_name, vlan_filter=tokens[6])
@match("interface", ANY, "port", "trunk", "allow-pass", "vlan", REST)
def normalize_switchport_tagged(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_switchport_tagged(
interface=if_name,
unit=if_name,
vlan_filter=" ".join(tokens[6:]).replace(" to ", "-").replace(" ", ","),
)
@match("interface", ANY, "undo", "negotiation", "auto")
def normalize_interface_negotiation(self, tokens):
yield self.make_interface_ethernet_autonegotiation(
interface=self.interface_name(tokens[1]), mode="manual"
)
@match("interface", ANY, "bpdu", "enable")
def normalize_interface_bpdu(self, tokens):
yield self.make_interface_ethernet_bpdu(
interface=self.interface_name(tokens[1]), enabled=True
)
@match("interface", ANY, "loopback-detect", "enable")
def normalize_interface_no_loop_detect(self, tokens):
if not self.get_context("loop_detect_disabled"):
if_name = self.interface_name(tokens[1])
yield self.make_loop_detect_interface(interface=if_name)
@match("enable", "lldp")
def normalize_enable_lldp(self, tokens):
self.set_context("lldp_disabled", False)
yield self.make_global_lldp_status(status=True)
@match("enable", "stp")
def normalize_enable_stp(self, tokens):
self.set_context("stp_disabled", False)
yield self.make_global_stp_status(status=True)
@match("interface", ANY, "undo", "lldp", "enable")
def normalize_interface_lldp_enable(self, tokens):
yield self.make_lldp_interface_disable(interface=self.interface_name(tokens[1]))
@match("interface", ANY, "stp", "disable")
def normalize_interface_stp_status(self, tokens):
yield self.make_spanning_tree_interface_disable(interface=self.interface_name(tokens[1]))
@match("interface", ANY, "stp", "bpdu-filter", "enable")
def normalize_interface_stp_bpdu_filter(self, tokens):
yield self.make_spanning_tree_interface_bpdu_filter(
interface=self.interface_name(tokens[1]), enabled=True
)
@match("interface", ANY, "ip", "address", ANY, ANY)
def normalize_vlan_ip(self, tokens):
if_name = self.interface_name(tokens[1])
yield self.make_unit_inet_address(
interface=if_name, unit=if_name, address=self.to_prefix(tokens[4], tokens[5])
)
@match("ip", "route-static", ANY, ANY, ANY)
def normalize_default_gateway(self, tokens):
yield self.make_inet_static_route_next_hop(
route=self.to_prefix(tokens[2], tokens[3]), next_hop=tokens[4]
)
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.