content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from argparse import Namespace, ArgumentParser
import os
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets
import torchvision.transforms as transforms
from utils import datautils
import models
from utils import utils
import numpy as np
import PIL
from tqdm import tqdm
import sklearn
from utils.lars_optimizer import LARS
import scipy
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import copy
class BaseSSL(nn.Module):
"""
Inspired by the PYTORCH LIGHTNING https://pytorch-lightning.readthedocs.io/en/latest/
Similar but lighter and customized version.
"""
DATA_ROOT = os.environ.get('DATA_ROOT', os.path.dirname(os.path.abspath(__file__)) + '/data')
IMAGENET_PATH = os.environ.get('IMAGENET_PATH', '/home/aashukha/imagenet/raw-data/')
def __init__(self, hparams):
super().__init__()
self.hparams = hparams
if hparams.data == 'imagenet':
print(f"IMAGENET_PATH = {self.IMAGENET_PATH}")
def get_ckpt(self):
return {
'state_dict': self.state_dict(),
'hparams': self.hparams,
}
@classmethod
def load(cls, ckpt, device=None):
parser = ArgumentParser()
cls.add_model_hparams(parser)
hparams = parser.parse_args([], namespace=ckpt['hparams'])
res = cls(hparams, device=device)
res.load_state_dict(ckpt['state_dict'])
return res
@classmethod
def default(cls, device=None, **kwargs):
parser = ArgumentParser()
cls.add_model_hparams(parser)
hparams = parser.parse_args([], namespace=Namespace(**kwargs))
res = cls(hparams, device=device)
return res
def forward(self, x):
pass
def transforms(self):
pass
def samplers(self):
return None, None
def prepare_data(self):
train_transform, test_transform = self.transforms()
# print('The following train transform is used:\n', train_transform)
# print('The following test transform is used:\n', test_transform)
if self.hparams.data == 'cifar':
self.trainset = datasets.CIFAR10(root=self.DATA_ROOT, train=True, download=True, transform=train_transform)
self.testset = datasets.CIFAR10(root=self.DATA_ROOT, train=False, download=True, transform=test_transform)
elif self.hparams.data == 'imagenet':
traindir = os.path.join(self.IMAGENET_PATH, 'train')
valdir = os.path.join(self.IMAGENET_PATH, 'val')
self.trainset = datasets.ImageFolder(traindir, transform=train_transform)
self.testset = datasets.ImageFolder(valdir, transform=test_transform)
else:
raise NotImplementedError
def dataloaders(self, iters=None):
train_batch_sampler, test_batch_sampler = self.samplers()
if iters is not None:
train_batch_sampler = datautils.ContinousSampler(
train_batch_sampler,
iters
)
train_loader = torch.utils.data.DataLoader(
self.trainset,
num_workers=self.hparams.workers,
pin_memory=True,
batch_sampler=train_batch_sampler,
)
test_loader = torch.utils.data.DataLoader(
self.testset,
num_workers=self.hparams.workers,
pin_memory=True,
batch_sampler=test_batch_sampler,
)
return train_loader, test_loader
@staticmethod
def add_parent_hparams(add_model_hparams):
def foo(cls, parser):
for base in cls.__bases__:
base.add_model_hparams(parser)
add_model_hparams(cls, parser)
return foo
@classmethod
def add_model_hparams(cls, parser):
parser.add_argument('--data', help='Dataset to use', default='cifar')
parser.add_argument('--arch', default='ResNet50', help='Encoder architecture')
parser.add_argument('--batch_size', default=256, type=int, help='The number of unique images in the batch')
parser.add_argument('--aug', default=True, type=bool, help='Applies random augmentations if True')
class SimCLR(BaseSSL):
@classmethod
@BaseSSL.add_parent_hparams
def add_model_hparams(cls, parser):
# loss params
parser.add_argument('--temperature', default=0.1, type=float, help='Temperature in the NTXent loss')
# data params
parser.add_argument('--multiplier', default=2, type=int)
parser.add_argument('--color_dist_s', default=1., type=float, help='Color distortion strength')
parser.add_argument('--scale_lower', default=0.08, type=float, help='The minimum scale factor for RandomResizedCrop')
# ddp
parser.add_argument('--sync_bn', default=True, type=bool,
help='Syncronises BatchNorm layers between all processes if True'
)
def __init__(self, hparams, device=None):
super().__init__(hparams)
self.hparams.dist = getattr(self.hparams, 'dist', 'dp')
model = models.encoder.EncodeProject(hparams)
self.reset_parameters()
if device is not None:
model = model.to(device)
if self.hparams.dist == 'ddp':
if self.hparams.sync_bn:
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
dist.barrier()
if device is not None:
model = model.to(device)
self.model = DDP(model, [hparams.gpu], find_unused_parameters=True)
elif self.hparams.dist == 'dp':
self.model = nn.DataParallel(model)
else:
raise NotImplementedError
self.criterion = models.losses.NTXent(
tau=hparams.temperature,
multiplier=hparams.multiplier,
distributed=(hparams.dist == 'ddp'),
)
def reset_parameters(self):
def conv2d_weight_truncated_normal_init(p):
fan_in = p.shape[1]
stddev = np.sqrt(1. / fan_in) / .87962566103423978
r = scipy.stats.truncnorm.rvs(-2, 2, loc=0, scale=1., size=p.shape)
r = stddev * r
with torch.no_grad():
p.copy_(torch.FloatTensor(r))
def linear_normal_init(p):
with torch.no_grad():
p.normal_(std=0.01)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv2d_weight_truncated_normal_init(m.weight)
elif isinstance(m, nn.Linear):
linear_normal_init(m.weight)
def step(self, batch):
x, _ = batch
z = self.model(x)
loss, acc = self.criterion(z)
return {
'loss': loss,
'contrast_acc': acc,
}
def encode(self, x):
return self.model(x, out='h')
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def train_step(self, batch, it=None):
logs = self.step(batch)
if self.hparams.dist == 'ddp':
self.trainsampler.set_epoch(it)
if it is not None:
logs['epoch'] = it / len(self.batch_trainsampler)
return logs
def test_step(self, batch):
return self.step(batch)
def samplers(self):
if self.hparams.dist == 'ddp':
# trainsampler = torch.utils.data.distributed.DistributedSampler(self.trainset, num_replicas=1, rank=0)
trainsampler = torch.utils.data.distributed.DistributedSampler(self.trainset)
print(f'Process {dist.get_rank()}: {len(trainsampler)} training samples per epoch')
testsampler = torch.utils.data.distributed.DistributedSampler(self.testset)
print(f'Process {dist.get_rank()}: {len(testsampler)} test samples')
else:
trainsampler = torch.utils.data.sampler.RandomSampler(self.trainset)
testsampler = torch.utils.data.sampler.RandomSampler(self.testset)
batch_sampler = datautils.MultiplyBatchSampler
# batch_sampler.MULTILPLIER = self.hparams.multiplier if self.hparams.dist == 'dp' else 1
batch_sampler.MULTILPLIER = self.hparams.multiplier
# need for DDP to sync samplers between processes
self.trainsampler = trainsampler
self.batch_trainsampler = batch_sampler(trainsampler, self.hparams.batch_size, drop_last=True)
return (
self.batch_trainsampler,
batch_sampler(testsampler, self.hparams.batch_size, drop_last=True)
)
def transforms(self):
if self.hparams.data == 'cifar':
train_transform = transforms.Compose([
transforms.RandomResizedCrop(
32,
scale=(self.hparams.scale_lower, 1.0),
interpolation=PIL.Image.BICUBIC,
),
transforms.RandomHorizontalFlip(),
datautils.get_color_distortion(s=self.hparams.color_dist_s),
transforms.ToTensor(),
datautils.Clip(),
])
test_transform = train_transform
elif self.hparams.data == 'imagenet':
from utils.datautils import GaussianBlur
im_size = 224
train_transform = transforms.Compose([
transforms.RandomResizedCrop(
im_size,
scale=(self.hparams.scale_lower, 1.0),
interpolation=PIL.Image.BICUBIC,
),
transforms.RandomHorizontalFlip(0.5),
datautils.get_color_distortion(s=self.hparams.color_dist_s),
transforms.ToTensor(),
GaussianBlur(im_size // 10, 0.5),
datautils.Clip(),
])
test_transform = train_transform
return train_transform, test_transform
def get_ckpt(self):
return {
'state_dict': self.model.module.state_dict(),
'hparams': self.hparams,
}
def load_state_dict(self, state):
k = next(iter(state.keys()))
if k.startswith('model.module'):
super().load_state_dict(state)
else:
self.model.module.load_state_dict(state)
class SSLEval(BaseSSL):
@classmethod
@BaseSSL.add_parent_hparams
def add_model_hparams(cls, parser):
parser.add_argument('--test_bs', default=256, type=int)
parser.add_argument('--encoder_ckpt', default='', help='Path to the encoder checkpoint')
parser.add_argument('--precompute_emb_bs', default=-1, type=int,
help='If it\'s not equal to -1 embeddings are precomputed and fixed before training with batch size equal to this.'
)
parser.add_argument('--finetune', default=False, type=bool, help='Finetunes the encoder if True')
parser.add_argument('--augmentation', default='RandomResizedCrop', help='')
parser.add_argument('--scale_lower', default=0.08, type=float, help='The minimum scale factor for RandomResizedCrop')
def __init__(self, hparams, device=None):
super().__init__(hparams)
self.hparams.dist = getattr(self.hparams, 'dist', 'dp')
if hparams.encoder_ckpt != '':
ckpt = torch.load(hparams.encoder_ckpt, map_location=device)
if getattr(ckpt['hparams'], 'dist', 'dp') == 'ddp':
ckpt['hparams'].dist = 'dp'
if self.hparams.dist == 'ddp':
ckpt['hparams'].dist = 'gpu:%d' % hparams.gpu
self.encoder = models.REGISTERED_MODELS[ckpt['hparams'].problem].load(ckpt, device=device)
else:
print('===> Random encoder is used!!!')
self.encoder = SimCLR.default(device=device)
self.encoder.to(device)
if not hparams.finetune:
for p in self.encoder.parameters():
p.requires_grad = False
elif hparams.dist == 'ddp':
raise NotImplementedError
self.encoder.eval()
if hparams.data == 'cifar':
hdim = self.encode(torch.ones(10, 3, 32, 32).to(device)).shape[1]
n_classes = 10
elif hparams.data == 'imagenet':
hdim = self.encode(torch.ones(10, 3, 224, 224).to(device)).shape[1]
n_classes = 1000
if hparams.arch == 'linear':
model = nn.Linear(hdim, n_classes).to(device)
model.weight.data.zero_()
model.bias.data.zero_()
self.model = model
else:
raise NotImplementedError
if hparams.dist == 'ddp':
self.model = DDP(model, [hparams.gpu])
def encode(self, x):
return self.encoder.model(x, out='h')
def step(self, batch):
if self.hparams.problem == 'eval' and self.hparams.data == 'imagenet':
batch[0] = batch[0] / 255.
h, y = batch
if self.hparams.precompute_emb_bs == -1:
h = self.encode(h)
p = self.model(h)
loss = F.cross_entropy(p, y)
acc = (p.argmax(1) == y).float()
return {
'loss': loss,
'acc': acc,
}
def forward(self, *args, **kwargs):
return self.model(*args, **kwargs)
def train_step(self, batch, it=None):
logs = self.step(batch)
if it is not None:
iters_per_epoch = len(self.trainset) / self.hparams.batch_size
iters_per_epoch = max(1, int(np.around(iters_per_epoch)))
logs['epoch'] = it / iters_per_epoch
if self.hparams.dist == 'ddp' and self.hparams.precompute_emb_bs == -1:
self.object_trainsampler.set_epoch(it)
return logs
def test_step(self, batch):
logs = self.step(batch)
if self.hparams.dist == 'ddp':
utils.gather_metrics(logs)
return logs
def prepare_data(self):
super().prepare_data()
def create_emb_dataset(dataset):
embs, labels = [], []
loader = torch.utils.data.DataLoader(
dataset,
num_workers=self.hparams.workers,
pin_memory=True,
batch_size=self.hparams.precompute_emb_bs,
shuffle=False,
)
for x, y in tqdm(loader):
if self.hparams.data == 'imagenet':
x = x.to(torch.device('cuda'))
x = x / 255.
e = self.encode(x)
embs.append(utils.tonp(e))
labels.append(utils.tonp(y))
embs, labels = np.concatenate(embs), np.concatenate(labels)
dataset = torch.utils.data.TensorDataset(torch.FloatTensor(embs), torch.LongTensor(labels))
return dataset
if self.hparams.precompute_emb_bs != -1:
print('===> Precompute embeddings:')
assert not self.hparams.aug
with torch.no_grad():
self.encoder.eval()
self.testset = create_emb_dataset(self.testset)
self.trainset = create_emb_dataset(self.trainset)
print(f'Train size: {len(self.trainset)}')
print(f'Test size: {len(self.testset)}')
def dataloaders(self, iters=None):
if self.hparams.dist == 'ddp' and self.hparams.precompute_emb_bs == -1:
trainsampler = torch.utils.data.distributed.DistributedSampler(self.trainset)
testsampler = torch.utils.data.distributed.DistributedSampler(self.testset, shuffle=False)
else:
trainsampler = torch.utils.data.RandomSampler(self.trainset)
testsampler = torch.utils.data.SequentialSampler(self.testset)
self.object_trainsampler = trainsampler
trainsampler = torch.utils.data.BatchSampler(
self.object_trainsampler,
batch_size=self.hparams.batch_size, drop_last=False,
)
if iters is not None:
trainsampler = datautils.ContinousSampler(trainsampler, iters)
train_loader = torch.utils.data.DataLoader(
self.trainset,
num_workers=self.hparams.workers,
pin_memory=True,
batch_sampler=trainsampler,
)
test_loader = torch.utils.data.DataLoader(
self.testset,
num_workers=self.hparams.workers,
pin_memory=True,
sampler=testsampler,
batch_size=self.hparams.test_bs,
)
return train_loader, test_loader
def transforms(self):
if self.hparams.data == 'cifar':
trs = []
if 'RandomResizedCrop' in self.hparams.augmentation:
trs.append(
transforms.RandomResizedCrop(
32,
scale=(self.hparams.scale_lower, 1.0),
interpolation=PIL.Image.BICUBIC,
)
)
if 'RandomCrop' in self.hparams.augmentation:
trs.append(transforms.RandomCrop(32, padding=4, padding_mode='reflect'))
if 'color_distortion' in self.hparams.augmentation:
trs.append(datautils.get_color_distortion(self.encoder.hparams.color_dist_s))
train_transform = transforms.Compose(trs + [
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
datautils.Clip(),
])
test_transform = transforms.Compose([
transforms.ToTensor(),
])
elif self.hparams.data == 'imagenet':
train_transform = transforms.Compose([
transforms.RandomResizedCrop(
224,
scale=(self.hparams.scale_lower, 1.0),
interpolation=PIL.Image.BICUBIC,
),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
lambda x: (255*x).byte(),
])
test_transform = transforms.Compose([
datautils.CenterCropAndResize(proportion=0.875, size=224),
transforms.ToTensor(),
lambda x: (255 * x).byte(),
])
return train_transform if self.hparams.aug else test_transform, test_transform
def train(self, mode=True):
if self.hparams.finetune:
super().train(mode)
else:
self.model.train(mode)
def get_ckpt(self):
return {
'state_dict': self.state_dict() if self.hparams.finetune else self.model.state_dict(),
'hparams': self.hparams,
}
def load_state_dict(self, state):
if self.hparams.finetune:
super().load_state_dict(state)
else:
if hasattr(self.model, 'module'):
self.model.module.load_state_dict(state)
else:
self.model.load_state_dict(state)
class SemiSupervisedEval(SSLEval):
@classmethod
@BaseSSL.add_parent_hparams
def add_model_hparams(cls, parser):
parser.add_argument('--train_size', default=-1, type=int)
parser.add_argument('--data_split_seed', default=42, type=int)
parser.add_argument('--n_augs_train', default=-1, type=int)
parser.add_argument('--n_augs_test', default=-1, type=int)
parser.add_argument('--acc_on_unlabeled', default=False, type=bool)
def prepare_data(self):
super(SSLEval, self).prepare_data()
if len(self.trainset) != self.hparams.train_size:
idxs, unlabeled_idxs = sklearn.model_selection.train_test_split(
np.arange(len(self.trainset)),
train_size=self.hparams.train_size,
random_state=self.hparams.data_split_seed,
)
if self.hparams.data == 'cifar' or self.hparams.data == 'cifar100':
if self.hparams.acc_on_unlabeled:
self.trainset_unlabeled = copy.deepcopy(self.trainset)
self.trainset_unlabeled.data = self.trainset.data[unlabeled_idxs]
self.trainset_unlabeled.targets = np.array(self.trainset.targets)[unlabeled_idxs]
print(f'Test size (0): {len(self.testset)}')
print(f'Unlabeled train size (1): {len(self.trainset_unlabeled)}')
self.trainset.data = self.trainset.data[idxs]
self.trainset.targets = np.array(self.trainset.targets)[idxs]
print('Training dataset size:', len(self.trainset))
else:
assert not self.hparams.acc_on_unlabeled
if isinstance(self.trainset, torch.utils.data.TensorDataset):
self.trainset.tensors = [t[idxs] for t in self.trainset.tensors]
else:
self.trainset.samples = [self.trainset.samples[i] for i in idxs]
print('Training dataset size:', len(self.trainset))
self.encoder.eval()
with torch.no_grad():
if self.hparams.n_augs_train != -1:
self.trainset = EmbEnsEval.create_emb_dataset(self, self.trainset, n_augs=self.hparams.n_augs_train)
if self.hparams.n_augs_test != -1:
self.testset = EmbEnsEval.create_emb_dataset(self, self.testset, n_augs=self.hparams.n_augs_test)
if self.hparams.acc_on_unlabeled:
self.trainset_unlabeled = EmbEnsEval.create_emb_dataset(
self,
self.trainset_unlabeled,
n_augs=self.hparams.n_augs_test
)
if self.hparams.acc_on_unlabeled:
self.testset = torch.utils.data.ConcatDataset([
datautils.DummyOutputWrapper(self.testset, 0),
datautils.DummyOutputWrapper(self.trainset_unlabeled, 1)
])
def transforms(self):
ens_train_transfom, ens_test_transform = EmbEnsEval.transforms(self)
train_transform, test_transform = SSLEval.transforms(self)
return (
train_transform if self.hparams.n_augs_train == -1 else ens_train_transfom,
test_transform if self.hparams.n_augs_test == -1 else ens_test_transform
)
def step(self, batch, it=None):
if self.hparams.problem == 'eval' and self.hparams.data == 'imagenet':
batch[0] = batch[0] / 255.
h, y = batch
if len(h.shape) == 4:
h = self.encode(h)
p = self.model(h)
loss = F.cross_entropy(p, y)
acc = (p.argmax(1) == y).float()
return {
'loss': loss,
'acc': acc,
}
def test_step(self, batch):
if not self.hparams.acc_on_unlabeled:
return super().test_step(batch)
# TODO: refactor
x, y, d = batch
logs = {}
keys = set()
for didx in [0, 1]:
if torch.any(d == didx):
t = super().test_step([x[d == didx], y[d == didx]])
for k, v in t.items():
keys.add(k)
logs[k + f'_{didx}'] = v
for didx in [0, 1]:
for k in keys:
logs[k + f'_{didx}'] = logs.get(k + f'_{didx}', torch.tensor([]))
return logs
def configure_optimizers(args, model, cur_iter=-1):
iters = args.iters
def exclude_from_wd_and_adaptation(name):
if 'bn' in name:
return True
if args.opt == 'lars' and 'bias' in name:
return True
param_groups = [
{
'params': [p for name, p in model.named_parameters() if not exclude_from_wd_and_adaptation(name)],
'weight_decay': args.weight_decay,
'layer_adaptation': True,
},
{
'params': [p for name, p in model.named_parameters() if exclude_from_wd_and_adaptation(name)],
'weight_decay': 0.,
'layer_adaptation': False,
},
]
LR = args.lr
if args.opt == 'sgd':
optimizer = torch.optim.SGD(
param_groups,
lr=LR,
momentum=0.9,
)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(
param_groups,
lr=LR,
)
elif args.opt == 'lars':
optimizer = torch.optim.SGD(
param_groups,
lr=LR,
momentum=0.9,
)
larc_optimizer = LARS(optimizer)
else:
raise NotImplementedError
if args.lr_schedule == 'warmup-anneal':
scheduler = utils.LinearWarmupAndCosineAnneal(
optimizer,
args.warmup,
iters,
last_epoch=cur_iter,
)
elif args.lr_schedule == 'linear':
scheduler = utils.LinearLR(optimizer, iters, last_epoch=cur_iter)
elif args.lr_schedule == 'const':
scheduler = None
else:
raise NotImplementedError
if args.opt == 'lars':
optimizer = larc_optimizer
# if args.verbose:
# print('Optimizer : ', optimizer)
# print('Scheduler : ', scheduler)
return optimizer, scheduler
|
python
|
from flask import Flask , request , jsonify , make_response
from flask_cors import CORS
import pymysql
#import config
from config import db_host, db_user, db_passwrd, db_db, config_domain
from check_encode import random_token, check_prefix
from display_list import list_data
from auth import auth
shorty_api = Flask(__name__)
cors = CORS(shorty_api)
shorty_api.config.from_object('config')
shorty_host = config_domain
# api Block
@shorty_api.route('/v1/shorten' , methods= ['POST'])
@auth.login_required
def create_short_url():
'''
Takes long _url as url, custom string(opt),
tag(opt) for input and returns short_url
'''
if request.method == 'POST':
if 'url' in request.args :
og_url = request.args['url']
if check_prefix(og_url):
if 'custom' in request.args :
token_string = request.args['custom']
if 'tag' in request.args:
tag_url = request.args['tag']
else:
tag_url = ''
else:
token_string = random_token()
if 'tag' in request.args:
tag_url = request.args['tag']
else:
tag_url = ''
conn = pymysql.connect(db_host , db_user , db_passwrd , db_db)
cursor = conn.cursor()
check_row = "SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE"
cursor.execute(check_row,(token_string,))
check_fetch = cursor.fetchone()
if (check_fetch is None):
insert_row = """
INSERT INTO WEB_URL(URL , S_URL , TAG) VALUES( %s, %s , %s)
"""
result_cur = cursor.execute(insert_row ,(og_url , token_string , tag_url,))
conn.commit()
conn.close()
short_url = shorty_host+token_string
long_url = og_url
data = jsonify({
'long_url' : og_url,
'short_url' : short_url,
'custom' : token_string,
'tag' : tag_url
})
return make_response(data , 200)
else:
data = jsonify({'error':'suffix already present'})
return make_response(data , 200)
else:
data = jsonify({'error':'URL given is not valid . Enter a valid URL.'})
return make_response(data , 200)
else:
data = jsonify({'error':'invalid request'})
return make_response(data , 405)
else:
data = jsonify({'error':'Invalid Method Used'})
return make_response(data , 405)
@shorty_api.route('/v1/expand' , methods= ['GET'])
def retrieve_short_url():
'''
Takes api input as short url and returns
long_url with analytics such as
total clicks , platform and browser clicks
'''
if request.method == 'GET':
if 'custom' in request.args:
token_string = request.args['custom']
conn = pymysql.connect(db_host , db_user , db_passwrd , db_db)
cursor = conn.cursor()
check_row = "SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE"
cursor.execute(check_row,(token_string,))
check_fetch = cursor.fetchone()
if (check_fetch is None):
data = jsonify({
'error' : 'Custom string given not available as shortened url.'
})
return make_response(data,200)
else:
info , counter , browser , platform = list_data(token_string)
data = jsonify({
'clicks' : counter[0],
'custom' : info[1],
'long_url' : info[0],
'click_browser' : {
'chrome' : browser[0] ,
'firefox' : browser[1],
'safari' : browser[2],
'other_browser': browser[3]
},
'click_platform' : {
'android' : platform[0],
'ios' : platform[1],
'windows' : platform[2],
'linux' : platform[3],
'mac' : platform[4],
'other_platform' :platform[5]
},
'tag' : info[2]
})
return make_response(data,200)
else:
data = jsonify({'error' : 'Follow the API format ',
})
return make_response(data,405)
else:
data = jsonify({'error':'Invalid Method Used , Use GET .'})
return make_response(data , 405)
@shorty_api.route('/v1/all' , methods= ['GET'])
@auth.login_required
def all_url():
'''
Takes api input as short url and returns
long_url with analytics such as
total clicks , platform and browser clicks
'''
if request.method != 'GET':
data = jsonify({'error':'Invalid Method Used , Use GET .'})
return make_response(data , 405)
if 'offset' not in request.args and 'limit' not in request.args:
data = jsonify({'error' : 'offset and limit must not empty '})
return make_response(data,405)
try:
offset = int(request.args['offset'])
limit = int(request.args['limit'])
except expression as identifier:
data = jsonify({'error' : 'Bad Request'})
return make_response(data, 400)
conn = pymysql.connect(db_host , db_user , db_passwrd , db_db, cursorclass=pymysql.cursors.DictCursor)
try:
with conn.cursor() as cursor:
sql = 'SELECT * FROM WEB_URL LIMIT %s OFFSET %s;'
cursor.execute(sql, (limit, offset))
result = cursor.fetchall()
finally:
conn.close()
return jsonify(result)
# api error Handlers
@shorty_api.errorhandler(404)
def not_found(error):
data = jsonify({'error' : 'Not Found'})
return make_response(data,404)
@shorty_api.errorhandler(400)
def invaid_response(error):
data = jsonify({'error' : 'Invaid Request'})
return make_response(data,400)
@shorty_api.errorhandler(500)
def invaid_response_five(error):
data = jsonify({'error' : 'Internal error'})
return make_response(data,500)
@shorty_api.errorhandler(405)
def invaid_response_five(error):
data = jsonify({'error' : 'Follow the API format ',
'Desc' : 'Use POST for API requests . url= < your url > , custom = < custom url> , tag = < URL tag >'})
return make_response(data,405)
# End API Block
if __name__ == '__main__':
shorty_api.run(host='0.0.0.0', port=8000 )
|
python
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from beanmachine.ppl.experimental.gp.kernels import all_kernels
from beanmachine.ppl.experimental.gp.likelihoods import all_likelihoods
__all__ = all_likelihoods + all_kernels
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Base classes for prior classifiers."""
import math
from abc import ABC
from dataclasses import dataclass
from enum import Enum, auto
from pathlib import Path
from typing import Any, Callable, Iterable, List, Union
from .actions import Action
from .color import Color
class ScalingMode(Enum):
"""No scaling at all, preserve the raw score"""
IDENTITY = auto()
"""Any values less than 0 are set to 0, any values larger than 1 are set to 1"""
CLAMP = auto()
"""Map values to the [0,1] range"""
LINEAR = auto()
"""Map the logarithm of the values to the [0,1] range"""
LOG = auto()
def scale(self, values: List[float]) -> List[float]:
"""Scales a list of scores according to a given mode"""
if self == ScalingMode.IDENTITY:
return values[:]
if self == ScalingMode.CLAMP:
return [min(1, max(0, score)) for score in values]
minimum, maximum = min(values), max(values)
if minimum == maximum or len(values) <= 1:
return [1.0] * len(values)
if self == ScalingMode.LINEAR:
return [(score - minimum) / (maximum - minimum) for score in values]
if self == ScalingMode.LOG:
return [(math.log(score) - math.log(minimum)) / (math.log(maximum) - math.log(minimum)) for score in values]
raise ValueError("Invalid ScalingMode value!")
@dataclass
class Classifier(ABC):
"""
Base class for all prior classifiers. Do not use,
refer instead to :class:`ElementClassifier` or :class:`ViewClassifier`.
"""
name: str
enabled: bool = True
callback: Any = None
@dataclass
class ElementClassifier(Classifier):
"""
Classifies a set of elements. The callback will receive a list of
elements that have have tags for all tags given in subset.
The callback either returns a sublist of elements, or a list of tuples
mapping element to a numeric score.
If the callback is doing multi-class prediction, then the output should be
a dictionary mapping class name to a sublist or list of tuples described above.
The prediction results would be stored as <classifier_name>__<class name>.
If highlight is True, highlight every element returned by this classifier.
If highlight is a float x, highlight every element with a score larger than x.
If highlight is an int N, highlight the top N scoring elemnets.
"""
action: Action = None
highlight: Union[float, bool] = False
mode: ScalingMode = ScalingMode.CLAMP
highlight_color: Color = Color.from_str("#5A1911")
subset: Union[str, Iterable[str]] = "all"
result_type: type = float
@dataclass
class ViewClassifier(Classifier):
"""
Classifies a given view. The callback will receive a view
and return an iterable of string tags.
"""
def _active_element_filter_func(elements, workflow):
actives = set(workflow.js.execute_file(Path("find_active_elements.js")))
return [elem for elem in elements if elem.wtl_uid in actives]
@dataclass
class ActiveElementFilter(ElementClassifier):
"""
Returns all elements that are considered active, i.e. interactable in some way.
Will also add a boolean `is_active` field to every element's metadata.
"""
name: str = "is_active"
callback: Callable = _active_element_filter_func
result_type: type = bool
|
python
|
from django.urls import path
from . import views
app_name = 'search'
urlpatterns = [
path('theorem', views.SearchTheoremView.as_view(), name='index'),
]
|
python
|
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.authentication import BasicAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from database.models import Table, Column, ValueDistribution
from database.serializers import (
TableSerializer,
ColumnSerializer,
ValueDistributionSerializer
)
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
class TableViewSet(viewsets.ModelViewSet):
queryset = Table.objects.all()
serializer_class = TableSerializer
lookup_field = 'name'
authentication_classes = (
BasicAuthentication, CsrfExemptSessionAuthentication)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = super(TableViewSet, self).get_queryset()
query = self.request.query_params.get("q", None)
if query:
queryset = queryset.filter(name__icontains=query)
return queryset
@action(detail=True)
def columns(self, request, name=None):
table = self.get_object()
columns = table.columns.all()
column_query = request.query_params.get("cq", None)
if column_query:
columns = columns.filter(name__icontains=column_query)
page = self.paginate_queryset(columns)
if page is not None:
serializer = ColumnSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = ColumnSerializer(columns, many=True)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def update_use_for_bi(self, request, name=None):
table = self.get_object()
use_for_bi = request.data.get("use_for_bi")
table.use_for_bi = use_for_bi
table.save()
serializer = TableSerializer(table, many=False)
return Response(serializer.data)
class ColumnViewSet(viewsets.ModelViewSet):
queryset = Column.objects.all()
serializer_class = ColumnSerializer
authentication_classes = (
BasicAuthentication, CsrfExemptSessionAuthentication)
permission_classes = (IsAuthenticated,)
@action(methods=['post'], detail=True)
def update_has_duplicates(self, request, pk=None):
column = self.get_object()
has_duplicates = request.data.get("has_duplicates")
column.has_duplicates = has_duplicates
column.save()
serializer = ColumnSerializer(column, many=False)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def update_is_structured(self, request, pk=None):
column = self.get_object()
is_structured = request.data.get("is_structured")
column.is_structured = is_structured
column.save()
serializer = ColumnSerializer(column, many=False)
return Response(serializer.data)
@action(methods=['post'], detail=True)
def update_needs_index(self, request, pk=None):
column = self.get_object()
needs_index = request.data.get("needs_index")
column.needs_index = needs_index
column.save()
serializer = ColumnSerializer(column, many=False)
return Response(serializer.data)
@action(methods=['get'], detail=True)
def value_distributions(self, request, pk=None):
column = self.get_object()
value_distributions = column.value_distributions.all()
page = self.paginate_queryset(value_distributions)
if page is not None:
serializer = ValueDistributionSerializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = ValueDistributionSerializer(
value_distributions, many=True)
return Response(serializer.data)
class ValueDistributionViewSet(viewsets.ModelViewSet):
queryset = ValueDistribution.objects.all()
serializer_class = ValueDistributionSerializer
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('config-creator/', views.config_creator, name='config_creator'),
path('data-generator/', views.data_generator, name='data_generator'),
]
|
python
|
# 导入需要的包文件
from flask import Blueprint, request
from flask_restx import Api, Resource
"""
创建一个蓝图,相当于创建项目组成的一部分,主要是方便我们管理自己的项目;
比如你的项目由很多个子项目构成,那么把为每一个子项目创建一个蓝图来创建对应的接口,好过于
把所有的接口都放在同一个.py文件吧!
这里创建蓝图Blueprint类还有很多其他的参数可选,大家可以看Blueprint类的源码,里面介绍的很清晰,英文不好的童鞋
请自觉学习英文;
"""
app_one_api = Blueprint('app_one_api', __name__)
"""
初始化我们的蓝图,它的作用是把我们的蓝图当作装饰器,修饰我们的类以及方法,
使得我们可以创建一个类,然后在类下面写不同的请求方法,比如get方法用来接收get请求,
post方法用来接收post请求,对于不同的方法返回不同的内容,看起来好像增加了代码量,实际上
却更容易对项目进行管理;
"""
api_one = Api(app_one_api)
"""
建立路由 有了路由可以建立相应的网络请求链接 并且可以对同一个链接发送不同的请求 节省视图的创建;
这里就是上面我们所说的Api()这个类的作用,通过它创建路由装饰器,然后装饰我们的类,在类下面写不同的请求方法,在接收到不同的请求时,
这个装饰器会将不同的请求转接到对应的类下面的方法;
@staticmethod将类内的方法修饰为静态方法,可以外部直接调用,也可以由类生成的对象调用;
接口是项目的核心部分,通过这个技术栈,童鞋你就可以愉快地写接口代码了;
"""
@api_one.route('/')
class Wss(Resource):
# 接收get请求
@staticmethod
def get():
print('hello world')
return 'hello world'
# 接收post请求
@staticmethod
def post():
json_data = request.json
print(json_data)
return 'hello world'
# 接收put请求
@staticmethod
def put():
print('hello world')
return 'hello world'
if __name__ == '__main__':
import requests
import json
res_one = requests.get('http://127.0.0.1:5000/hello')
res_two = requests.post('http://127.0.0.1:5000/hello', json=json.dumps({"a": "1"}))
res_three = requests.put('http://127.0.0.1:5000/hello')
print(res_one)
print(res_two)
print(res_three)
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphTrendingallof1(Model):
"""trending.
:param weight:
:type weight: float
:param resource_visualization:
:type resource_visualization:
~users.models.MicrosoftgraphresourceVisualization
:param resource_reference:
:type resource_reference: ~users.models.MicrosoftgraphresourceReference
:param last_modified_date_time:
:type last_modified_date_time: datetime
:param resource:
:type resource: ~users.models.Microsoftgraphentity
"""
_attribute_map = {
'weight': {'key': 'weight', 'type': 'float'},
'resource_visualization': {'key': 'resourceVisualization', 'type': 'MicrosoftgraphresourceVisualization'},
'resource_reference': {'key': 'resourceReference', 'type': 'MicrosoftgraphresourceReference'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'resource': {'key': 'resource', 'type': 'Microsoftgraphentity'},
}
def __init__(self, weight=None, resource_visualization=None, resource_reference=None, last_modified_date_time=None, resource=None):
super(ComponentsschemasmicrosoftGraphTrendingallof1, self).__init__()
self.weight = weight
self.resource_visualization = resource_visualization
self.resource_reference = resource_reference
self.last_modified_date_time = last_modified_date_time
self.resource = resource
|
python
|
import numpy as np
import statistics
import pandas as pd
import time
import os
from sklearn.metrics import f1_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import cross_val_predict
from sklearn.externals import joblib
def get_labels(label_string):
"""
This function converts label from string to array of labels
Input: "(1, 2, 3, 4, 5)"
Output: [1, 2, 3, 4, 5]
"""
label_array = label_string[1:-1]
label_array = label_array.split(',')
label_array = [int(label) for label in label_array if len(label) > 0]
return label_array
def get_features(feature_string):
"""
This function converts feature vector from string to array of features
Input: "(1.2, 3.4, ..., 9.10)"
Output: [1.2, 3.4, ..., 9.10]
"""
feature_array = feature_string[1:-1]
feature_array = feature_array.split(',')
feature_array = [float(label) for label in feature_array]
return feature_array
# Set home paths for data and features
DATA_HOME = "/content/drive/My Drive/Yelp-Restaurant-Classification/Model/data/"
FEATURES_HOME = '/content/drive/My Drive/Yelp-Restaurant-Classification/Model/features/'
MODELS_HOME = '/content/drive/My Drive/Yelp-Restaurant-Classification/Model/model/'
# Read training data and test data
train_data = pd.read_csv(FEATURES_HOME + 'train_aggregate_features.csv')
# Separate the labels from features in the training data
trainX = np.array([get_features(feature) for feature in train_data['feature']])
trainY = np.array([get_labels(label) for label in train_data['label']])
# Use validation data for calculating the training accuracy, random_state ensures reproducible results without overfitting
trainX, validationX, trainY, validationY = train_test_split(trainX, trainY, test_size=0.3, random_state=42)
# Binary representation (just like one-hot vector) (1, 3, 5, 9) -> (1, 0, 1, 0, 1, 0, 0, 0, 1)
mlb = MultiLabelBinarizer()
trainY = mlb.fit_transform(trainY)
# Do the same for validation labels
actual_labels = validationY
mlb = MultiLabelBinarizer()
validationY = mlb.fit_transform(validationY)
svc_clf = OneVsRestClassifier(SVC(kernel='linear', probability=True, verbose=True))
rf_clf = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=-1, random_state=42)
knn_clf = KNeighborsClassifier()
extra_tree_clf = ExtraTreesClassifier(n_estimators=195, max_leaf_nodes=16, n_jobs=-1, random_state=42)
for clf in [svc_clf, rf_clf, knn_clf, extra_tree_clf]:
if not os.path.isfile(MODELS_HOME + f'{clf.__class__.__name__}.pkl'):
# Start time
start_time = time.time()
# Fit the classifier on the training data and labels
clf.fit(trainX, trainY)
cross_val = cross_val_predict(clf, validationX, validationY, cv=3)
print(f"{clf.__class__.__name__} trained.")
joblib.dump((mlb,clf), MODELS_HOME + f'{clf.__class__.__name__}.pkl')
print("Model saved.")
# End time
end_time = time.time()
print(f"Overall F1 Score for {clf.__class__.__name__}:", f1_score(cross_val, validationY, average='micro'))
print(f"Individual F1 Score for {clf.__class__.__name__}:", f1_score(cross_val, validationY, average=None))
print(f"Variance of {clf.__class__.__name__} is:", statistics.variance(f1_score(cross_val, validationY, average=None)))
print(f"Time taken for training the {clf.__class__.__name__}", end_time - start_time, "sec")
print("======================================================")
print("\n")
mlb,clf = joblib.load(MODELS_HOME + f'{clf.__class__.__name__}'+".pkl")
print(f"{clf.__class__.__name__} Model loaded.")
# Predict the labels for the validation data
preds_binary = clf.predict(validationX)
# Predicted labels are converted back
# (1, 0, 1, 0, 1, 0, 0, 0, 1) -> (1, 3, 5, 9)
predicted_labels = mlb.inverse_transform(preds_binary)
print("Validation Set Results:")
print(f"Overall F1 Score for {clf.__class__.__name__}:", f1_score(preds_binary, validationY, average='micro'))
print(f"Individual F1 Score for {clf.__class__.__name__}:", f1_score(preds_binary, validationY, average=None))
print(f"Variance of {clf.__class__.__name__} is:", statistics.variance(f1_score(preds_binary, validationY, average=None)))
print("======================================================")
X_train_1, X_train_2, y_train_1, y_train_2 = train_test_split(trainX, trainY, random_state=42)
svc_clf = OneVsRestClassifier(SVC(kernel='linear', probability=True, verbose=True))
rf_clf = RandomForestClassifier(n_estimators=200, oob_score=True, n_jobs=-1, random_state=42)
knn_clf = KNeighborsClassifier()
extra_tree_clf = ExtraTreesClassifier(n_estimators=195, max_leaf_nodes=16, n_jobs=-1, random_state=42)
start_time = time.time()
rnd_clf_2 = RandomForestClassifier(random_state=42)
for p in [svc_clf, rf_clf, knn_clf, extra_tree_clf]:
p.fit(X_train_1, y_train_1)
svc_clf_p = svc_clf.predict(X_train_2)
rf_clf_p = rf_clf.predict(X_train_2)
knn_clf_p = knn_clf.predict(X_train_2)
held_out = np.column_stack((svc_clf_p, rf_clf_p, knn_clf_p))
rnd_clf_2.fit(held_out, y_train_2)
result_1 = []
for p in [svc_clf, rf_clf, knn_clf]:
result_1.append(p.predict(validationX))
y_pred_s = rnd_clf_2.predict(np.column_stack(tuple(result_1)))
# End time
end_time = time.time()
print(f"Time taken for training the Stacked Model:", end_time - start_time, "sec")
print(f"Overall Stacked F1 Score for:", f1_score(y_pred_s, validationY, average='micro'))
print(f"Overall Stacked F1 Score for:", f1_score(y_pred_s, validationY, average=None))
print(f"Variance of Stacked Model is:", statistics.variance(f1_score(y_pred_s, validationY, average=None)))
|
python
|
#!python3
a = (x for x in range(3))
print(next(a))
print(next(a))
print(next(a))
try:
print(next(a)) # -> raises StopIteration
except StopIteration:
print("StopIteration raised")
|
python
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
class FFNet(nn.Module):
def __init__(self, input_size, output_size, hidden_size=64):
super(FFNet, self).__init__()
# one hidden layer
self.fc1 = nn.Linear(input_size, 2*hidden_size)
self.fc2 = nn.Linear(2*hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = self.fc4(x)
return x
|
python
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# 解法一:44 ms 13.9 MB
import queue
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
else:
flag = TreeNode("")
q = queue.Queue()
finalArr = []
tempArr = []
q.put(root)
q.put(flag)
while not q.empty():
temp = q.get()
if temp.val == "":
finalArr.append(tempArr)
tempArr = []
if q.empty():
break
q.put(flag)
else:
tempArr.append(temp.val)
if temp.left:
q.put(temp.left)
if temp.right:
q.put(temp.right)
return finalArr
# 解法二:
import queue
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
else:
q = queue.Queue()
finalArr = []
q.put(root)
while not q.empty():
tempArr = []
size = q.qsize()
for i in range(size):
temp = q.get()
if temp.left:
q.put(temp.left)
if temp.right:
q.put(temp.right)
tempArr.append(temp.val)
finalArr.append(tempArr)
return finalArr
# 解法三:28ms
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
queue = []
finalArr = []
queue.append(root)
while len(queue) > 0:
tempArr = []
l = len(queue)
for i in range(l):
temp = queue.pop(0)
if temp.left:
queue.append(temp.left)
if temp.right:
queue.append(temp.right)
tempArr.append(temp.val)
finalArr.append(tempArr)
return finalArr
|
python
|
from __future__ import annotations
import typing as t
from functools import wraps
from graphql import GraphQLArgument
from graphql import GraphQLBoolean
from graphql import GraphQLEnumType
from graphql import GraphQLField
from graphql import GraphQLFloat
from graphql import GraphQLID
from graphql import GraphQLInputField
from graphql import GraphQLInputObjectType
from graphql import GraphQLInt
from graphql import GraphQLInterfaceType
from graphql import GraphQLList
from graphql import GraphQLNonNull
from graphql import GraphQLObjectType
from graphql import GraphQLScalarType
from graphql import GraphQLString
from graphql import GraphQLType
from graphql import GraphQLUnionType
from graphql import GraphQLWrappingType
from graphql.type.scalars import coerce_float
from graphql.type.scalars import coerce_int
from inflection import camelize
RESERVED = [
"JSON",
"JSONType",
"DateTime",
"Text",
"Date",
"UnicodeText",
"Unicode",
"UrlType",
"PhoneNumberType",
"EmailType",
"Time",
"String",
"VARCHAR",
"Float",
"Numeric",
"Boolean",
"ChoiceType",
]
class NamingConflictError(Exception):
def __init__(self, name: str, magql_type: str):
super().__init__((name, magql_type))
self.name = name
self.magql_type = magql_type
def __str__(self) -> str:
return f"{self.magql_type} instance cannot use reserved name {self.name}"
def check_name(init: t.Callable) -> t.Callable:
@wraps(init)
def wrapper(*args: str) -> None:
if args[1] in RESERVED:
raise NamingConflictError(args[1], args[0].__class__.__name__)
init(*args)
return wrapper
class MagqlObjectType:
@check_name
def __init__(
self,
name: str,
fields: t.Optional[t.Dict[str, t.Any]] = None,
description: t.Optional[str] = None,
):
self.name = name
# dict of field_name to MagqlField
self.fields = fields if fields is not None else {}
self.description = description
def field(
self, field_name: str, return_type: t.Any, args: t.Optional[t.Any] = None
) -> t.Callable:
def decorator(resolve: t.Callable) -> t.Callable:
self.fields[field_name] = MagqlField(return_type, args, resolve)
return resolve
return decorator
# Convert each value in fields to GQLField
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLObjectType:
if self.name in type_map:
return t.cast(GraphQLObjectType, type_map[self.name])
type_map[self.name] = GraphQLObjectType(
self.name, {}, None, description=self.description
)
for field_name, field in self.fields.items():
t.cast(GraphQLObjectType, type_map[self.name]).fields[
field_name
] = field.convert(type_map)
return t.cast(GraphQLObjectType, type_map[self.name])
class MagqlField:
def __init__(
self,
type_name: t.Optional[t.Any] = None,
args: t.Optional[t.Dict[str, MagqlArgument]] = None,
resolve: t.Optional[t.Callable] = None,
description: t.Optional[str] = None,
deprecation_reason: t.Optional[str] = None,
):
self.description = description
self.deprecation_reason = deprecation_reason
# String name representing type
self.type_name = type_name
self.args = args if args is not None else {}
self.resolve = resolve
def convert(
self,
type_map: t.Mapping[
str,
t.Union[
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLWrappingType,
],
],
) -> GraphQLField:
gql_args = {}
for arg_name, arg in self.args.items():
gql_args[arg_name] = arg.convert(type_map)
if self.type_name in type_map:
field_type = type_map[t.cast(str, self.type_name)]
else:
field_type = t.cast(t.Any, self.type_name).convert(type_map)
return GraphQLField(field_type, gql_args, self.resolve)
def js_camelize(word: str) -> str:
# add config check
# disable while camelcasing resolvers aren't added
return camelize(word, False)
class MagqlArgument: # noqa: E501
def __init__(self, type_: t.Any, default_value: t.Optional[t.Any] = None):
self.type_ = type_
self.default_value = default_value
def convert(
self,
type_map: t.Mapping[
str,
t.Union[
GraphQLScalarType,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLUnionType,
GraphQLEnumType,
GraphQLWrappingType,
],
],
) -> GraphQLArgument:
if self.type_ in type_map:
converted_type = type_map[self.type_]
else:
converted_type = self.type_.convert(type_map)
return GraphQLArgument(
t.cast(
t.Union[
GraphQLScalarType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLWrappingType,
],
converted_type,
),
self.default_value,
)
class MagqlInputObjectType:
@check_name
def __init__(
self,
name: str,
fields: t.Optional[t.Dict[str, t.Any]] = None,
description: t.Optional[str] = None,
):
self.name = name
self.fields: t.Dict[str, t.Any] = fields if fields is not None else {}
self.description = description
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLInputObjectType:
if self.name in type_map:
return t.cast(GraphQLInputObjectType, type_map[self.name])
type_map[self.name] = GraphQLInputObjectType(self.name, {}, self.description)
for field_name, field in self.fields.items():
t.cast(GraphQLInputObjectType, type_map[self.name]).fields[
field_name
] = field.convert(type_map)
return t.cast(GraphQLInputObjectType, type_map[self.name])
class MagqlInputField:
def __init__(self, type_name: t.Any, description: t.Optional[str] = None):
self.type_name = type_name
self.description = description
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLInputField:
if self.type_name in type_map:
field_type = type_map[self.type_name]
else:
field_type = self.type_name.convert(type_map)
return GraphQLInputField(
t.cast(
t.Union[
GraphQLScalarType,
GraphQLEnumType,
GraphQLInputObjectType,
GraphQLWrappingType,
],
field_type,
)
)
class MagqlWrappingType:
pass
class MagqlNonNull(MagqlWrappingType): # noqa: E501
def __init__(self, type_: t.Any):
self.type_ = type_
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLNonNull:
if self.type_ in type_map:
return GraphQLNonNull(type_map[self.type_])
return GraphQLNonNull(self.type_.convert(type_map))
class MagqlList(MagqlWrappingType): # noqa: E501
def __init__(self, type_: t.Any):
self.type_ = type_
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLList:
if self.type_ in type_map:
converted_type = type_map[self.type_]
else:
converted_type = self.type_.convert(type_map)
return GraphQLList(converted_type)
class MagqlEnumType:
@check_name
def __init__(self, name: str, values: t.Optional[t.Dict[str, t.Any]] = None):
self.name = name
self.values = values if values else {}
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLEnumType:
if self.name in type_map:
return t.cast(GraphQLEnumType, type_map[self.name])
type_map[self.name] = GraphQLEnumType(self.name, self.values)
return t.cast(GraphQLEnumType, type_map[self.name])
class MagqlUnionType: # noqa: B903
@check_name
def __init__(
self,
name: str,
types: t.List[t.Union[str, GraphQLObjectType]],
resolve_type: t.Optional[t.Callable],
):
self.name = name
# List of magql_types or magql_names
self.types = types
self.resolve_types = resolve_type
def convert(self, type_map: t.Dict[str, GraphQLType]) -> GraphQLUnionType:
if self.name in type_map:
return t.cast(GraphQLUnionType, type_map[self.name])
types: t.List[GraphQLObjectType] = []
for enum_type in self.types:
if isinstance(enum_type, str):
types.append(t.cast(GraphQLObjectType, type_map[enum_type]))
else:
types.append(enum_type)
type_map[self.name] = GraphQLUnionType(self.name, types, self.resolve_types)
return t.cast(GraphQLUnionType, type_map[self.name])
class MagqlInt:
def __init__(self, parse_value: t.Optional[t.Callable] = None):
self.parse_value = parse_value
@staticmethod
def parse_value_accepts_string(value: str) -> int:
try:
converted_value = int(value)
except ValueError:
converted_value = coerce_int(value)
return converted_value
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLScalarType:
gql_int = GraphQLInt
if self.parse_value:
gql_int.parse_value = self.parse_value # type: ignore
return gql_int
class MagqlFloat:
def __init__(self, parse_value: t.Optional[t.Callable] = None):
self.parse_value = parse_value
@staticmethod
def parse_value_accepts_string(value: str) -> float:
try:
converted_value = float(value)
except ValueError:
converted_value = coerce_float(value)
return converted_value
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLScalarType:
gql_float = GraphQLFloat
if self.parse_value:
gql_float.parse_value = self.parse_value # type: ignore
return gql_float
class MagqlFile:
pass
class MagqlBoolean:
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLScalarType:
return GraphQLBoolean
class MagqlString:
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLScalarType:
return GraphQLString
class MagqlID:
def convert(self, type_map: t.Mapping[str, GraphQLType]) -> GraphQLScalarType:
return GraphQLID
|
python
|
# this_dict={
# "brand":'Ford',
# "model":'Mutang',
# "year": 1964
# }
# print(this_dict)
# ####
# # Access items
# x=this_dict['model']
# print(x)
# ## Access items with get method
# x=this_dict.get('model')
# print(x)
# ###########
# # Change value in dictionary
# this_dict['year']=2019
# print(this_dict)
# ########
# # using loop in dictionary
# for x in this_dict:
# print(x) # when we can acces by loop so its return only key.
# ############
# # Access value in loop
# for x in this_dict.values():
# print(x)
# ##########
# # If we access both key and values at a time so we can use 'items funtion'.
# for x , y in this_dict.items():
# print(x,y)
# ############
# # check it if key exit in dictonary
# if 'model' in this_dict:
# print('model')
# ###########
# # add key and values in dictonary.
# this_dict['color']='red'
# print(this_dict)
# #############
# # removing items in dicitonary
# # using pop() method;
# this_dict.pop('model')
# print(this_dict)
# ####
# popitems()
# this_dict.popitem()
# print(this_dict)
###########
# Del removed a items with specificd name.
# del this_dict['brand']
# print(this_dict)
############
# clear() this keyword empties all dictionary.
# this_dict.clear()
# print(this_dict)
###############################################################
# Nested Dictionary.
# family={
# "child1":{"name":"Roshan","year":2004},
# "child2":{"name":"Prabhakar","year":2009},
# "child3":{"name":"Himani","year":1993},
# }
# print(family)
########
# Access items in nested Dictionary.
###################################################################################
# Write a program to print a roman no in with help of dictionary.
# roman_no={1000:'M',900:'CM',500:'D',400:'CD',100:'C',90:'XC',50:'L',40:'XL',10:'X',9:'IX',5:'V',4:'IV',1:'I'}
# user_input=int(input("Enter a number for convert into roman No: "))
# sum=''
# for i in roman_no:
# r=user_input//i
# user_input=user_input%i
# sum=sum+roman_no[i]*r
# print(sum)
####################################################################################
# # Write a program to board mass funtion with help of dictionary.
# import string
# new=[]
# add=''
# rem=''
# k=0
# numList=input("Enter a no: ")
# for i in range(len(numList)):
# # if numList[i] in string.punctuation and numList[i+1]==numList[i]:
# # pass
# if numList[i] in string.punctuation and k==1:
# rem+=numList[i]
# new.append(int(add))
# new.append(rem)
# add=''
# rem=''
# k=0
# else:
# add+=numList[i]
# k=1
# new.append(int(add))
# print(new)
# while True:
# index=0
# calc=0
# if '/' in new:
# index=new.index('/')
# calc=new[index-1]/new[index+1]
# elif '*' in new:
# index=new.index('*')
# calc=new[index-1]*new[index+1]
# elif '+' in new:
# index=new.index('+')
# calc=new[index-1]+new[index+1]
# elif '-' in new:
# index=new.index('-')
# calc=new[index-1]-new[index+1]
# del new[index-1:index+2]
# new.insert(index-1,calc)
# if len(new)==1:
# break
# print(new)
###############################################################################
# Write a program to convert word to numercal no.
# word={'Thousand':1000,'Hundred':100,"Ninty":90,"Eight":80,'Seventy':70,'Sixty':60,
# 'Fifty':50,'Forty':40,'Thirty':30,'Twenty':20,'Ten':10,'Nine':9,'Eight':8,'Seven':7,'Six':6,
# 'Five':5,'Four':4,'Three':3,'Two':2,'One':1
# }
# No=input('Enter no in word')
# new=No.split()
# list1=[]
# list2=[]
# sum=0
# for i in new:
# if i=='Thousand' or i=='Hundred':
# list1.append(word[i])
# else:
# list2.append(word[i])
# for i in range(len(list1)):
# if list1[i]==1000 or list1[i]==100:
# sum=sum+list1[i]*list2[i]
# list2.pop(i)
# list2.insert(i,0)
# for j in list2:
# sum+=j
# print(sum)
########################################################################################
# Write a program to remove zero after decimal value fi continues zero come.
# Number=input('Enter a Decimal No:')
# no=float(Number)
# b=no-int(no)
# print(b)
################################################################################
# Write a program to print table and store in dictionary.
# user_input=int(input('Enter a no by user: '))
# dict1={}
# for i in range (1,user_input+1):
# new=[]
# mult=0
# for j in range(1,10+1):
# mult=i*j
# new.append(mult)
# dict1[i]=new
# print(dict1)
#####################################################################################
'''A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
Actions
'''
# i=100
# big=0
# first_value,mulitple_value,second_value=0,0,0
# while i<1000:
# j=100
# while j<1000:
# pro=str(i*j)
# # print(pro)
# rev=pro[::-1]
# # print(i,j,rev,pro)
# if pro==rev:
# if int(pro)>big:
# big=int(pro)
# first_value=i
# second_value=j
# j+=1
# i+=1
# print(first_value,'x',second_value,big)
number=int(input('enter a no'))
i=2
new=[]
str_number=str(number)
if int(str_number[-1])%2==0:
i=2
else:
i=3
while i<=number//2+1:
if number%i==0:
new.append(i)
print(new)
i+=2
print(new)
for i in new:
j=2
while j<=i//2+1:
if i%j==0:
break
else:
j+=1
else:
print(i)
|
python
|
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import sys
from echomesh.util.registry.Registry import Registry
def register_module(class_path, *modules, **kwds):
module = sys.modules[class_path]
registry = Registry(class_path, class_path=class_path, **kwds)
for sub in modules:
if isinstance(sub, six.string_types):
registry.register(sub, sub.lower())
else:
registry.register(sub[1], sub[0].lower())
return registry
def register(class_path, *modules, **kwds):
return register_module(
'.'.join(class_path.split('.')[:-1]), *modules, **kwds)
|
python
|
# Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from distimate.types import DistributionType
try:
import pandas as pd
except ImportError:
pd = None
def _format_number(v):
if round(v) == v:
return str(int(v))
return str(v)
class DistributionAccessor(object):
"""
Implements ``.dist`` accessor on :class:`pandas.Series`.
Allows to easily call :class:`.Distribution` methods
on all instances in Pandas Series:
.. code-block:: python
df[col] = pd.Series.dist.from_histogram(dist_type, histograms)
median = df[col].dist.quantile(0.5)
"""
def __init__(self, series):
self._series = series
@staticmethod
def from_histogram(dist_type, histograms, *, name=None):
"""
Construct a new :class:`pandas.Series` from histograms.
This is a static method that can be accessed
as ``pd.Series.dist.from_histogram()``.
:param dist_type: :class:`.DistributionType` or
1-D array-like with histogram edges
:param histograms: :class:`pandas.DataFrame` or 2-D array-like
:param name: optional name of the series.
:return: :class:`pandas.Series`
"""
if not isinstance(dist_type, DistributionType):
dist_type = DistributionType(dist_type)
index = None
if isinstance(histograms, pd.DataFrame):
index = histograms.index
histograms = histograms.values
dists = [dist_type.from_histogram(histogram) for histogram in histograms]
return pd.Series(dists, index=index, name=name)
@staticmethod
def from_cumulative(dist_type, cumulatives, *, name=None):
"""
Construct a new :class:`pandas.Series` from cumulative histograms.
This is a static method that can be accessed
as ``pd.Series.dist.from_cumulative()``.
:param dist_type: :class:`.DistributionType` or
1-D array-like with histogram edges
:param histograms: :class:`pandas.DataFrame` or 2-D array-like
:param name: Optional name of the series.
:return: :class:`pandas.Series`
"""
if not isinstance(dist_type, DistributionType):
dist_type = DistributionType(dist_type)
index = None
if isinstance(cumulatives, pd.DataFrame):
index = cumulatives.index
cumulatives = cumulatives.values
histograms = np.diff(cumulatives, prepend=0)
dists = [dist_type.from_histogram(histogram) for histogram in histograms]
return pd.Series(dists, index=index, name=name)
def to_histogram(self):
"""
Convert :class:`pandas.Series` of :class:`.Distribution`
instances to histograms.
:return: :class:`pandas.DataFrame` with histogram values.
"""
data = self.values
columns = [self._get_name(f"histogram{i}") for i in range(data.shape[-1])]
return pd.DataFrame(data, index=self._series.index, columns=columns)
def to_cumulative(self):
"""
Convert :class:`pandas.Series` of :class:`.Distribution` instances
to cumulative histograms.
:return: :class:`pandas.DataFrame` with cumulative values
"""
data = np.cumsum(self.values, axis=1)
columns = [self._get_name(f"cumulative{i}") for i in range(data.shape[-1])]
return pd.DataFrame(data, index=self._series.index, columns=columns)
def pdf(self, v):
"""
Compute PDF for :class:`pandas.Series` of :class:`.Distribution` instances.
:param v: input value, or list of them
:return: :class:`pandas.Series`
"""
return self._compute(self._pdf, v)
def cdf(self, v):
"""
Compute CDF for series of distribution instances.
:param v: input value, or list of them
:return: :class:`pandas.Series`
"""
return self._compute(self._cdf, v)
def quantile(self, v):
"""
Compute quantile function :class:`pandas.Series`
of :class:`.Distribution` intances.
:param v: input value, or list of them
:return: :class:`pandas.Series`
"""
return self._compute(self._quantile, v)
@property
def values(self):
"""
Values of the underlying histograms.
:return: 2-D :class:`numpy.array`
"""
if self._series.empty:
return np.zeros((0, 0))
return np.array([dist.values for dist in self._series])
def _compute(self, meth, v):
if isinstance(v, (tuple, list)):
columns = [meth(i) for i in v]
return pd.concat(columns, axis=1)
return meth(v)
def _pdf(self, v):
name = self._get_name(f"pdf{_format_number(v)}")
data = [dist.pdf(v) if pd.notna(dist) else np.nan for dist in self._series]
return pd.Series(data, index=self._series.index, name=name)
def _cdf(self, v):
name = self._get_name(f"cdf{_format_number(v)}")
data = [dist.cdf(v) if pd.notna(dist) else np.nan for dist in self._series]
return pd.Series(data, index=self._series.index, name=name)
def _quantile(self, v):
name = self._get_name(f"q{_format_number(100 * v)}")
data = [dist.quantile(v) if pd.notna(dist) else np.nan for dist in self._series]
return pd.Series(data, index=self._series.index, name=name)
def _get_name(self, name):
if self._series.name is None:
return name
return f"{self._series.name}_{name}"
def register_to_pandas():
if pd is None:
return # Pandas are not installed
pd.api.extensions.register_series_accessor("dist")(DistributionAccessor)
|
python
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# =========================================================================== #
# Project : MLStudio #
# Version : 0.1.0 #
# File : regression.py #
# Python : 3.8.2 #
# -------------------------------------------------------------------------- #
# Author : John James #
# Company : DecisionScients #
# Email : [email protected] #
# URL : https://github.com/decisionscients/MLStudio #
# -------------------------------------------------------------------------- #
# Created : Wednesday, March 18th 2020, 4:34:57 am #
# Last Modified : Monday, March 23rd 2020, 10:31:37 am #
# Modified By : John James ([email protected]) #
# -------------------------------------------------------------------------- #
# License : BSD #
# Copyright (c) 2020 DecisionScients #
# =========================================================================== #
"""Regression algorithms.
This class encapsulates the core behaviors for regression classes. Currently,
the following regression classes are supported.
* Linear Regression
* Lasso Regression
* Ridge Regression
* ElasticNet Regression
The core behaviors exposed for each class include:
* predict : Predicts outputs as linear combination of inputs and weights.
* compute_cost : Computes cost associated with predictions
* compute_gradient : Computes the derivative of loss w.r.t. to weights
"""
import numpy as np
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from mlstudio.supervised.metrics.regression import R2
from mlstudio.utils.data_manager import StandardScaler
# --------------------------------------------------------------------------- #
# LINEAR REGRESSION (OLS) #
# --------------------------------------------------------------------------- #
class OLSRegression(BaseEstimator, RegressorMixin):
"""Ordinary least squares closed form linear regression."""
def __init__(self, metric=R2()):
self.metric_ = metric
def fit(self, X, y):
"""Fits the linear regression ordinary least squares solution.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
Returns
-------
self : returns instance of self._
"""
# Validate input and target
X, y = check_X_y(X, y)
# Add bias term of ones to feature matrix
X = np.insert(X, 0, 1.0, axis=1)
# Obtain n samples nad comp0ute matrix rank
n = X.shape[1]
r = np.linalg.matrix_rank(X)
# Find matrix equivalent using singular value decomposition
U, S, V = np.linalg.svd(X)
# Derive D^+ from sigma
D_plus = np.zeros((X.shape[0], X.shape[1])).T
D_plus[:S.shape[0], :S.shape[0]] = np.linalg.inv(np.diag(S))
# Compute Moore-Penrose pseudoinverse of X
X_plus = V.T.dot(D_plus).dot(U.T)
# Weights are the dot product of X_plus and y
theta = X_plus.dot(y)
# Save solution in attributes
self.intercept_ = theta[0]
self.coef_ = theta[1:]
return self
def predict(self, X):
"""Computes prediction.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data
Returns
-------
y_pred : prediction
"""
# Check if fit had been called
check_is_fitted(self)
# Input validation
X = check_array(X)
# Create prediction as linear combination of inputs and parameters
y_pred = self.intercept_ + X.dot(self.coef_)
return y_pred
def score(self, X, y):
"""Computes scores using the metric parameter.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data
y : array_like of shape (n_samples,) or (n_samples, n_classes)
The target variable.
Returns
-------
score based upon the metric object.
"""
X, y = check_X_y(X,y)
y_pred = self.predict(X)
score = self.metric_(y, y_pred)
return score
|
python
|
from django import forms
from django.contrib.auth.models import User
class POSTForm(forms.Form):
username = forms.CharField(
label='Username',
max_length=30,
help_text='Enter a unique name for your login',
widget=forms.TextInput(attrs={
'required': 'required',
'title': 'Enter a unique name for your login',
'data-toggle': "tooltip",
'data-placement': "right",
})
)
first_name = forms.CharField(
label='First Name',
max_length=30,
widget=forms.TextInput(attrs={
'required': 'required',
'title': 'Enter your First Name',
'data-toggle': "tooltip",
'data-placement': "right",
})
)
last_name = forms.CharField(
label='Last Name',
max_length=30,
widget=forms.TextInput(attrs={
'required': 'required',
'title': 'Enter your Last Name',
'data-toggle': "tooltip",
'data-placement': "right",
})
)
email = forms.EmailField(
label='E-Mail Address',
max_length=75,
widget=forms.EmailInput(attrs={
'required': 'required',
'title': 'Enter your email address to receive verification of submissions and updates',
'data-toggle': "tooltip",
'data-placement': "right",
})
)
password = forms.CharField(
label="Password",
max_length=128,
widget=forms.PasswordInput(attrs={
'required': 'required',
})
)
password2 = forms.CharField(
label="Verify Password",
max_length=128,
widget=forms.PasswordInput(attrs={
'required': 'required',
'title': 'Passwords Must match',
'data-toggle': "tooltip",
'data-placement': "right",
})
)
def clean(self):
form_data = super(POSTForm, self).clean()
if form_data['password'] != form_data['password2']:
msg = "Passwords do not Match"
self.add_error( 'password', msg)
|
python
|
#!/usr/bin/env python
# encoding: utf-8
#
# Stylus, Copyright 2006-2009 Biologic Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
common.py
Stylus, Copyright 2006-2009 Biologic Institute.
'''
import os
import os.path
import re
import shutil
import sys
import time
import urllib.request, urllib.error, urllib.parse
import urllib.parse
from . import xmldict as XMLDict
#==============================================================================
# Global Constants and Variables
#==============================================================================
class Constants:
basenamePlan = 'plan.xml'
filenameInitial = 'initial'
filenameTrial = 'trial'
filenameFinal = 'final'
globNotHidden = '[!.]*'
globTrials = 'trial*.xml'
reTRIALFILE = re.compile(r'trial(\d+).*')
schemeFile = 'file://'
urlSeparator = '/'
extGene = '.gene'
extCodonTable = '.table'
extHan = '.han'
extHCF = '.hcf'
extXHTML = '.html'
extXML = '.xml'
reBASENAMEUNICODE = re.compile(r'([\dA-Fa-f]{4,5}).*\.(\w+)')
reUUID = re.compile(r'([A-F\d]{8}\-[A-F\d]{4}\-[A-F\d]{4}\-[A-F\d]{4}\-[A-F\d]{12})')
# strExec 'd:n:e:u:g:p:c:f:l:s:t:a:qvh'
# aryExec [ 'datapath=', 'name=', 'expand=', 'urls=', 'genome=', 'plan=', 'constants=', 'frequency=', 'log=', 'statistics=', 'trace=', 'author=', 'quiet', 'version', 'help' ]
# strRpt 'r:d:n:e:u:g:p:i:x:m:qh'
# aryRpt [ 'reportpath=', 'datapath=', 'name=', 'expand=', 'urls=', 'genome=', 'plan=', 'images=', 'xhtml=', 'mpdb', 'quiet', 'help' ]
strShortArgs = 'd:r:n:e:u:g:p:c:f:l:s:t:a:i:x:m:qvh'
aryLongArgs = [ 'datapath=', 'reportpath=', 'name=', 'expand=', 'urls=', 'genome=', 'plan=', 'constants=', 'frequency=', 'log=', 'statistics=', 'trace=', 'author=', 'images=', 'xhtml=', 'mpdb', 'quiet', 'version', 'help' ]
class Globals:
fQuiet = False
fEchoStderr = False
#==============================================================================
# Classes
#==============================================================================
#------------------------------------------------------------------------------
# Class: BiologicError
#
#------------------------------------------------------------------------------
class BiologicError(Exception):
def __init__(self, msg):
self.msg = ''
if msg and len(msg) > 0:
self.msg = 'Error: ' + msg
def __str__(self):
return self.msg
#------------------------------------------------------------------------------
# Class: URLs
#
#------------------------------------------------------------------------------
class URLs:
GENOME = 0x0001
HAN = 0x0002
HTML = 0x0004
PLAN = 0x0008
SCHEMA = 0x0010
urlGenome = ''
urlHan = ''
urlHTML = ''
urlPlan = ''
urlSchema = ''
def __init__(self):
self.urlGenome = readEnvironment('$STYLUS_GENOMEURL')
self.urlHan = readEnvironment('$STYLUS_HANURL')
self.urlHTML = readEnvironment('$STYLUS_HTMLURL')
self.urlPlan = readEnvironment('$STYLUS_PLANURL')
self.urlSchema = readEnvironment('$STYLUS_SCHEMAURL')
def set(self, strURLs):
aryArgs = strURLs.split(',')
if len(aryArgs) > 5:
raise BilogicError('URLs requires no more than five arguments')
if len(aryArgs) > 0 and aryArgs[0]:
self.urlGenome = aryArgs[0]
if len(aryArgs) > 1 and aryArgs[1]:
self.urlHan = aryArgs[1]
if len(aryArgs) > 2 and aryArgs[2]:
self.urlHTML = aryArgs[2]
if len(aryArgs) > 3 and aryArgs[3]:
self.urlPlan = aryArgs[3]
if len(aryArgs) > 4 and aryArgs[4]:
self.urlSchema = aryArgs[4]
def validate(self, grf):
if grf & self.GENOME:
if not self.urlGenome:
raise BiologicError('Required Genome URL is missing')
self.urlGenome = self.__pathToURL__(self.urlGenome, Constants.schemeFile)
if grf & self.HAN:
if not self.urlHan:
raise BiologicError('Required Han URL is missing')
self.urlHan = self.__pathToURL__(self.urlHan, Constants.schemeFile)
self.__validateScheme__(self.urlHan)
if grf & self.HTML:
if not self.urlHTML:
raise BiologicError('Required HTML URL is missing')
self.urlHTML = self.__pathToURL__(self.urlHTML, Constants.schemeFile)
if grf & self.PLAN:
if not self.urlPlan:
raise BiologicError('Required Plan URL is missing')
self.urlPlan = self.__pathToURL__(self.urlPlan, Constants.schemeFile)
if grf & self.SCHEMA:
if not self.urlSchema:
raise BiologicError('Required Schema URL is missing')
self.urlSchema = self.__pathToURL__(self.urlSchema, Constants.schemeFile)
self.__validateScheme__(self.urlSchema)
def __pathToURL__(self, strPath, strURL):
strURL = pathToURL(strPath, strURL)
if strURL[len(strURL)-1] != Constants.urlSeparator:
strURL += Constants.urlSeparator
return strURL
def __validateScheme__(self, url):
aryURL = urllib.parse.urlsplit(url.lower())
if aryURL[0] != 'http' and aryURL[0] != 'file':
raise BiologicError(url + ' uses an unaccepted scheme - only http: or file: may be used')
#------------------------------------------------------------------------------
# Class: Names
#
#------------------------------------------------------------------------------
class Names:
DATA = 0x0001
REPORT = 0x0002
NAME = 0x0004
EXPAND = 0x0008
GENOME = 0x0010
PLAN = 0x0020
def __init__(self):
self.strDataPath = readEnvironment('$STYLUS_DATAPATH')
self.strReportPath = readEnvironment('$STYLUS_RPTPATH')
self.strName = ''
self.strNameType = 'file'
self.fAppendPlan = False
self.strExpand = ''
self.fExpandRepeat = False
self.strGenome = ''
self.strPlan = ''
self.pathData = ''
self.pathReports = ''
self.pathExpandData = ''
self.pathExpandReport = ''
self.urlGenome = ''
self.idGenome = ''
self.urlPlan = ''
self.idPlan = ''
self.nFirstPlanTrial = 0
self.cPlanTrialsToExecute = 0
def set(self, grf, strValue):
if grf & self.DATA:
self.strDataPath = strValue
elif grf & self.REPORT:
self.strReportPath = strValue
elif grf & self.NAME:
aryArg = strValue.split(',')
self.strName = aryArg[0]
if len(aryArg) > 1 and aryArg[1]:
if not aryArg[1] in [ 'asis', 'dir', 'file', 'unicode', 'uuid' ]:
raise BiologicError(aryArg[1] + ' is an unknown option for the name argument')
self.strNameType = aryArg[1]
if len(aryArg) > 2 and aryArg[2]:
if aryArg[2] != 'plan':
raise BiologicError(aryArg[2] + ' is an unknown option for the name argument')
self.fAppendPlan = True
elif grf & self.EXPAND:
aryArgs = strValue.split(',')
aryArgs[0] = ensureInteger(aryArgs[0], 0, 'expand requires a positive integer value')
if aryArgs[0] <= 0:
raise BiologicError('expand requires a positive integer value - rather than "%s"' % argArgs[0])
self.strExpand = aryArgs[0]
if len(aryArgs) > 1:
if aryArgs[1] != 'repeat':
raise BiologicError(aryArgs[1] + ' is an unknown option for the expand argument')
self.fExpandRepeat = True
elif grf & self.GENOME:
self.strGenome = strValue
elif grf & self.PLAN:
aryArgs = strValue.split(',')
if len(aryArgs) < 1 or len(aryArgs) > 3:
raise BiologicError('plan requires between one and three arguments')
if aryArgs[0]:
self.strPlan = aryArgs[0]
if len(aryArgs) > 1:
self.cPlanTrialsToExecute = ensureInteger(aryArgs[1], self.cPlanTrialsToExecute, 'plan requires an integer for the number of trials to execute')
if len(aryArgs) > 2:
self.nFirstPlanTrial = ensureInteger(aryArgs[2], self.nFirstPlanTrial, 'plan requires an integer for the first trial')
def validate(self, urls, grf):
if self.strNameType == 'asis' and not self.strName:
raise BiologicError('asis requires specifying a name')
if (grf & self.GENOME) or self.strNameType != 'dir':
if not self.strGenome:
raise BiologicError('Required genome was not supplied')
urls.validate(URLs.GENOME)
self.urlGenome = pathToURL(makeHanPath(resolvePath(self.strGenome)), urls.urlGenome)
if grf & (self.DATA | self.REPORT):
if self.strNameType == 'asis' or self.strNameType == 'file':
self.idGenome = os.path.splitext(os.path.basename(self.urlGenome))[0]
elif self.strNameType == 'uuid':
if not Constants.reUUID.match(self.strGenome):
xmlDict = XMLDict.XMLDict({ XMLDict.XMLDict.ignore : [ 'seed', 'bases', 'statistics', 'lineage', 'genes' ] })
try: self.idGenome = str(xmlDict.load(self.urlGenome)['genome']['uuid']).upper()
except LookupError as err: raise BiologicError('Genome (%s) is missing a UUID' % self.urlGenome)
elif self.strNameType == 'unicode':
aryParts = os.path.split(self.urlGenome)
mo = Constants.reBASENAMEUNICODE.match(aryParts[1])
if mo and mo.groups():
self.idGenome = mo.groups()[0]
else:
raise BiologicError('unicode name patterns required a genome file whose name begins with a unicode value')
if grf & self.PLAN or self.fAppendPlan:
if not self.strPlan:
raise BiologicError('Required plan was not supplied')
urls.validate(URLs.PLAN)
self.urlPlan = pathToURL(resolvePath(self.strPlan), urls.urlPlan)
self.idPlan = os.path.splitext(os.path.basename(self.urlPlan))[0]
if grf & self.DATA:
if not self.strDataPath:
raise BiologicError('Required datapath was not supplied')
self.pathData = resolvePath(self.strDataPath)
if self.strNameType == 'asis':
self.pathData = os.path.join(self.pathData, self.strName)
else:
idGenome = self.fAppendPlan and (self.idGenome + '--' + self.idPlan) or self.idGenome
self.pathData = os.path.join(self.pathData, self.strName, idGenome)
# Expand the data path unless expanding reports (since report expansion will set the data path as needed)
if self.strExpand and not grf & self.REPORT:
self.urlGenome, self.pathExpandData = self.__expandGenome__(self.pathData, Constants.extXML)
self.pathData = os.path.join(self.pathData, self.pathExpandData)
if not self.nFirstPlanTrial:
self.nFirstPlanTrial = 1
self.pathData = os.path.normpath(os.path.normcase(self.pathData))
if grf & self.REPORT:
if not self.strReportPath:
raise BiologicError('Required reportpath was not supplied')
self.pathReport = resolvePath(self.strReportPath)
if self.strNameType == 'asis':
self.pathReport = os.path.join(self.pathReport, self.strName)
elif self.strNameType == 'dir':
strHead, strTail = os.path.split(self.strDataPath)
if not strTail:
strHead, strTail = os.path.split(strHead)
strTail, strExt = os.path.splitext(strTail)
self.pathReport = os.path.join(self.pathReport, strTail)
else:
idGenome = self.fAppendPlan and (self.idGenome + '--' + self.idPlan) or self.idGenome
self.pathReport = os.path.join(self.pathReport, self.strName, idGenome)
if self.strNameType != 'dir' and self.strExpand:
strNotUsed, self.pathExpandReport = self.__expandGenome__(self.pathReport, Constants.extXML)
self.pathReport = os.path.join(self.pathReport, self.pathExpandReport)
# Map the report path to the data path (since the data may have already been expanded once or more)
self.pathData = os.path.normpath(os.path.normcase(os.path.join(self.pathData, self.pathExpandReport)))
self.pathReport = os.path.normpath(os.path.normcase(self.pathReport))
def __expandGenome__(self, strPath, strExt):
urlParent = ''
pathExpand = ''
for root, dirs, files in os.walk(resolvePath(strPath)):
if self.fExpandRepeat or not dirs:
fileGenome = Constants.filenameInitial + strExt
files.sort(cmpDataFilenames)
for f in files:
mo = Constants.reTRIALFILE.match(f)
if mo and mo.groups():
iTrial = int(mo.groups()[0])
if self.strExpand == iTrial:
urlParent = pathToURL(os.path.join(root, fileGenome), Constants.schemeFile)
else:
fileGenome = f
if urlParent:
break
if dirs:
dirs.sort(cmpDataFilenames)
for d in dirs:
mo = Constants.reTRIALFILE.match(d)
if mo and mo.groups():
iTrial = int(mo.groups()[0])
if iTrial >= self.strExpand:
pathExpand = os.path.join(pathExpand, d)
dirs = [ d ]
break
if not urlParent:
raise BiologicError('Unable to find trial %ld within %s' % (self.strExpand, strPath))
pathExpand = os.path.join(pathExpand, '%s%ld' % (Constants.filenameTrial, self.strExpand))
return urlParent, pathExpand
#==============================================================================
# Global Functions
#==============================================================================
#------------------------------------------------------------------------------
# Function: cmpDataFilenames
#
#------------------------------------------------------------------------------
def cmpDataFilenames(strPath1, strPath2):
ary1 = os.path.split(strPath1)
ary2 = os.path.split(strPath2)
if ary1[0] != ary2[0]:
return cmp(strPath1, strPath2)
elif ary1[1] == ary2[1]:
return 0
else:
if ary1[1].startswith(Constants.filenameFinal):
return 1
if ary2[1].startswith(Constants.filenameFinal):
return -1
isTrial1 = ary1[1].startswith(Constants.filenameTrial)
isTrial2 = ary2[1].startswith(Constants.filenameTrial)
if isTrial1 and not isTrial2:
return 1
if not isTrial1 and isTrial2:
return -1
if isTrial1 and isTrial2:
ary1 = os.path.splitext(ary1[1])
ary2 = os.path.splitext(ary2[1])
return cmp(toInteger(ary1[0][len(Constants.filenameTrial):]), toInteger(ary2[0][len(Constants.filenameTrial):]))
else:
return cmp(ary1[1], ary2[1])
#------------------------------------------------------------------------------
# Function: copyURLToFile
#
#------------------------------------------------------------------------------
def copyURLToFile(strURL, strFile):
str = readFile(strURL)
file = open(strFile, 'w')
file.write(str)
file.close()
return
#------------------------------------------------------------------------------
# Function: ensureInteger
#
#------------------------------------------------------------------------------
def ensureInteger(strValue, nDefault, strError):
if strValue:
try: return int(strValue)
except ValueError as err: raise BiologicError(strError + ' - rather than "' + strValue + '"')
else:
return nDefault
#------------------------------------------------------------------------------
# Function: ensurePath
#
#------------------------------------------------------------------------------
def ensurePath(strPath):
if not os.path.exists(strPath):
try: os.makedirs(strPath)
except OSError: raise BiologicError('Unable to create ' + strPath)
#------------------------------------------------------------------------------
# Function: isDir
#
#------------------------------------------------------------------------------
def isDir(strPath):
return os.path.exists(strPath) and os.path.isdir(strPath)
#------------------------------------------------------------------------------
# Function: makeHanPath
#
#------------------------------------------------------------------------------
def makeHanPath(strPath):
if not os.path.isabs(strPath) and not urllib.parse.urlparse(strPath)[0]:
aryParts = os.path.split(strPath)
if aryParts[1]:
mo = Constants.reBASENAMEUNICODE.match(aryParts[1])
if mo and mo.groups():
strUnicode = mo.groups()[0]
strPath = os.path.join(aryParts[0], strUnicode[:len(strUnicode)-3] + '000', aryParts[1])
return strPath
#------------------------------------------------------------------------------
# Function: pathToURL
#
#------------------------------------------------------------------------------
def pathToURL(strPath, strURL):
strPath = resolvePath(strPath)
return os.path.isabs(strPath) and urllib.parse.urljoin(Constants.schemeFile, strPath) or urllib.parse.urljoin(strURL, strPath)
#------------------------------------------------------------------------------
# Function: readEnvironment
#
#------------------------------------------------------------------------------
def readEnvironment(strVariable):
strValue = os.path.expandvars(strVariable)
if strValue == strVariable:
strValue = ''
return strValue
#------------------------------------------------------------------------------
# Function: readFile
#
#------------------------------------------------------------------------------
def readFile(strURL):
try:
file = urllib.request.urlopen(strURL)
str = file.read()
file.close()
return str
except:
raise BiologicError('Unable to read ' + strURL)
#------------------------------------------------------------------------------
# Function: redirectStderr
#
#------------------------------------------------------------------------------
def redirectStderr(strFile):
sys.stderr = open(strFile, 'a+')
Globals.fEchoStderr = True
#------------------------------------------------------------------------------
# Function: replacePath
#
#------------------------------------------------------------------------------
def replacePath(strPath):
if os.path.exists(strPath):
if not os.path.isdir(strPath):
raise BiologicError(strPath + ' exists and is not a directory')
try: shutil.rmtree(strPath)
except OSError as err: raise BiologicError('Unable to replace ' + strPath)
ensurePath(strPath)
#------------------------------------------------------------------------------
# Function: resolvePath
#
#------------------------------------------------------------------------------
def resolvePath(strPath):
if strPath:
if len(strPath) >= 2 and strPath[0] == '.':
if strPath[1] == os.sep:
strPath = os.path.join(os.getcwd(), strPath[2:])
elif strPath[1] == '.':
strPath = os.path.join(os.path.dirname(os.getcwd()),strPath[3:])
strPath = os.path.expanduser(strPath)
if not urllib.parse.urlparse(strPath)[0]:
fPreserveSeparator = (strPath[-1] == os.sep)
strPath = os.path.normpath(os.path.normcase(strPath))
if fPreserveSeparator:
strPath += os.sep
return strPath
#------------------------------------------------------------------------------
# Function: roundTo
#
#------------------------------------------------------------------------------
def roundTo(f, n, fUp):
f = fUp and (f+n) or (f-n)
return int(f / n) * n;
#------------------------------------------------------------------------------
# Function: say, sayError
#
#------------------------------------------------------------------------------
def say(strMsg, fNewline=True, fIgnoreQuiet=False):
if not Globals.fQuiet or fIgnoreQuiet:
sys.stdout.write(strMsg)
if fNewline:
sys.stdout.write('\n')
else:
sys.stdout.flush()
return True
def sayError(strMsg):
if not Globals.fQuiet:
print(strMsg, file=sys.stderr)
if Globals.fEchoStderr:
print(strMsg)
return True
#------------------------------------------------------------------------------
# Function: toFloat
#
#------------------------------------------------------------------------------
def toFloat(strValue):
try: return float(strValue)
except ValueError as err: raise BiologicError('A floating-point number is required rather than "' + strValue + '"')
#------------------------------------------------------------------------------
# Function: toInteger
#
#------------------------------------------------------------------------------
def toInteger(strValue):
try: return int(strValue)
except ValueError as err: raise BiologicError('An integer is required rather than "' + strValue + '"')
#------------------------------------------------------------------------------
# Function: trimDirectory
#
#------------------------------------------------------------------------------
def trimDirectory(strPath):
iSep = strPath.index(os.sep)
return iSep >= 0 and strPath[iSep+1:] or strPath
if __name__ == '__main__':
pass
|
python
|
from datetime import datetime
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from freezegun import freeze_time
from core.models import CoffeType, Harvest
from core.serializers import CoffeTypeSerializer, HarvestSerializer
COFFE_TYPE_URL = reverse('core:coffe_types-list')
HARVEST_URL = reverse('core:harvests-list')
STORAGE_REPORT_URL = reverse('core:storage_report-list')
LOGIN_URL = '/api/v1/login/'
def perform_login(email, password, api_client):
credentials = {'email': email, 'password': password}
get_user_model().objects.create_user(**credentials)
response = api_client.post(LOGIN_URL, credentials)
return response.data.get('token')
class CoffeTypeTestCase(TestCase):
def setUp(self):
self.coffe_type_a = CoffeType.objects.create(
name='a', expiration_time=5
)
self.coffe_type_b = CoffeType.objects.create(
name='b', expiration_time=5
)
self.client = APIClient()
self.token = perform_login(
'[email protected]', 'password', self.client
)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
def test_endpoint_requires_authentication_token(self):
unauthenticated_client = APIClient()
response = unauthenticated_client.get(COFFE_TYPE_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_listing_all_coffe_types(self):
response = self.client.get(COFFE_TYPE_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for coffe_type in (self.coffe_type_a, self.coffe_type_b):
self.assertIn(CoffeTypeSerializer(coffe_type).data, response.data)
def test_create_a_new_coffe_type(self):
payload = {
'name': 'coffe c',
'expiration_time': 50
}
response = self.client.post(COFFE_TYPE_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(CoffeType.objects.count(), 3)
def test_update_coffe_type(self):
response = self.client.patch(
reverse('core:coffe_types-detail', args=[self.coffe_type_a.id]),
{'expiration_time': 20}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
coffe_type = CoffeType.objects.get(pk=self.coffe_type_a.id)
self.assertEqual(coffe_type.expiration_time, 20)
def test_delete_coffe(self):
response = self.client.delete(
reverse('core:coffe_types-detail', args=[self.coffe_type_a.id])
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
coffe_type = CoffeType.objects.filter(id=self.coffe_type_a.id).first()
self.assertIsNone(coffe_type)
@freeze_time('2019-12-01')
class HarvestTestCase(TestCase):
def setUp(self):
self.coffe_type = CoffeType.objects.create(name='x', expiration_time=5)
self.client = APIClient()
self.token = perform_login(
'[email protected]', 'password', self.client
)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token)
def test_listing_only_own_harvests(self):
user1 = get_user_model().objects.get(email='[email protected]')
user2 = get_user_model().objects.create_user(
email='[email protected]', password='secret_password'
)
date_ = datetime(2019, 11, 25).date()
h1 = Harvest.objects.create(
farm='Fazendinha', bags=500, date=date_,
coffe_type=self.coffe_type, owner=user1
)
h2 = Harvest.objects.create(
farm='Fazendona', bags=1000, date=date_,
coffe_type=self.coffe_type, owner=user1
)
h3 = Harvest.objects.create(
farm='Fazenda', bags=750, date=date_, coffe_type=self.coffe_type,
owner=user2
)
response = self.client.get(HARVEST_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn(HarvestSerializer(h1).data, response.data)
self.assertIn(HarvestSerializer(h2).data, response.data)
self.assertNotIn(HarvestSerializer(h3).data, response.data)
def test_get_storage_report(self):
user1 = get_user_model().objects.get(email='[email protected]')
coffe_type_2 = CoffeType.objects.create(name='t2', expiration_time=15)
date1 = datetime(2019, 11, 28).date() # Coffe 1 and 2 good
date2 = datetime(2019, 11, 20).date() # Coffe 1 bad and 2 good
date3 = datetime(2019, 11, 10).date() # Coffe 1 and 2 bad
Harvest.objects.create(farm='Fazenda Tamoatá', bags=500, date=date1,
coffe_type=self.coffe_type, owner=user1)
Harvest.objects.create(farm='Fazenda Nápoles', bags=780, date=date1,
coffe_type=coffe_type_2, owner=user1)
Harvest.objects.create(farm='Fazenda da alegria', bags=910, date=date2,
coffe_type=self.coffe_type, owner=user1)
Harvest.objects.create(farm='Fazenda do Python', bags=1500, date=date2,
coffe_type=coffe_type_2, owner=user1)
Harvest.objects.create(farm='Fazenda Pinguim', bags=235, date=date3,
coffe_type=self.coffe_type, owner=user1)
Harvest.objects.create(farm='Fazenda São João', bags=700, date=date3,
coffe_type=coffe_type_2, owner=user1)
response = self.client.get(STORAGE_REPORT_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected_fields = (
'total_bags', 'non_expired_bags', 'expired_bags',
'origin_farms', 'coffe_types'
)
for field in expected_fields:
self.assertIn(field, response.data)
self.assertEqual(response.data.get('total_bags'), 4625)
self.assertEqual(response.data.get('non_expired_bags'), 2780)
self.assertIn('Fazenda do Python', response.data.get('origin_farms'))
self.assertIn('Fazenda Tamoatá', response.data.get('origin_farms'))
self.assertIn(self.coffe_type.id, response.data.get('coffe_types'))
self.assertIn(coffe_type_2.id, response.data.get('coffe_types'))
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from abduction_tools_test import GetPremisesThatMatchConclusionArgsTestCase
from abduction_tools_test import GetTreePredArgsTestCase
from category_test import CategoryTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGTestCase
from ccg2lambda_tools_test import AssignSemanticsToCCGWithFeatsTestCase
from ccg2lambda_tools_test import get_attributes_from_ccg_node_recursivelyTestCase
from ccg2lambda_tools_test import TypeRaiseTestCase
from knowledge_test import LexicalRelationsTestCase
from nltk2coq_test import Nltk2coqTestCase
from semantic_index_test import GetSemanticRepresentationTestCase
from semantic_tools_test import resolve_prefix_to_infix_operationsTestCase
from semantic_types_test import ArbiAutoTypesTestCase
from semantic_types_test import build_arbitrary_dynamic_libraryTestCase
from semantic_types_test import build_dynamic_libraryTestCase
from semantic_types_test import Coq2NLTKTypesTestCase
from semantic_types_test import Coq2NLTKSignaturesTestCase
from semantic_types_test import combine_signatures_or_rename_predsTestCase
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(AssignSemanticsToCCGWithFeatsTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(TypeRaiseTestCase)
suite4 = unittest.TestLoader().loadTestsFromTestCase(build_dynamic_libraryTestCase)
suite5 = unittest.TestLoader().loadTestsFromTestCase(resolve_prefix_to_infix_operationsTestCase)
suite6 = unittest.TestLoader().loadTestsFromTestCase(Nltk2coqTestCase)
suite7 = unittest.TestLoader().loadTestsFromTestCase(build_arbitrary_dynamic_libraryTestCase)
suite8 = unittest.TestLoader().loadTestsFromTestCase(LexicalRelationsTestCase)
suite9 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKTypesTestCase)
suite10 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKSignaturesTestCase)
suite11 = unittest.TestLoader().loadTestsFromTestCase(ArbiAutoTypesTestCase)
suite12 = unittest.TestLoader().loadTestsFromTestCase(get_attributes_from_ccg_node_recursivelyTestCase)
suite13 = unittest.TestLoader().loadTestsFromTestCase(GetSemanticRepresentationTestCase)
suite14 = unittest.TestLoader().loadTestsFromTestCase(GetTreePredArgsTestCase)
suite15 = unittest.TestLoader().loadTestsFromTestCase(GetPremisesThatMatchConclusionArgsTestCase)
suite16 = unittest.TestLoader().loadTestsFromTestCase(combine_signatures_or_rename_predsTestCase)
suite17 = unittest.TestLoader().loadTestsFromTestCase(CategoryTestCase)
suites = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6,
suite7, suite8, suite9, suite10, suite11, suite12,
suite13, suite14, suite15, suite16, suite17])
unittest.TextTestRunner(verbosity=2).run(suites)
|
python
|
"""
Color definitions for various charts
https://htmlcolorcodes.com/color-chart/
"""
colors = {
'orange1': '#E74C3C',
'blue1': '#1B4F72',
'blue2': '#2874A6',
'blue3': '#3498DB',
'blue4': '#85C1E9'
}
|
python
|
# coding=utf-8
import time
import asyncio
import logging
from asyncio import CancelledError
import aiohttp
import async_timeout
from freehp.extractor import extract_proxies
log = logging.getLogger(__name__)
class ProxySpider:
def __init__(self, config, loop=None):
self._proxy_pages = config.get('proxy_pages', {})
log.debug('Details of proxy pages: %s', [i for i in self._proxy_pages])
self._scrap_interval = config.getint("scrap_interval")
self._timeout = config.getint("spider_timeout")
self._sleep_time = config.getint("spider_sleep_time")
self._headers = config.get("spider_headers", {})
self._loop = loop or asyncio.get_event_loop()
self.futures = None
self._receivers = []
@classmethod
def from_manager(cls, manager):
return cls(manager.config, loop=manager.loop)
def subscribe(self, receiver):
self._receivers.append(receiver)
def open(self):
self.futures = []
for p in self._proxy_pages:
f = asyncio.ensure_future(self._update_proxy_task(self._proxy_pages[p]), loop=self._loop)
self.futures.append(f)
def close(self):
if self.futures:
for f in self.futures:
f.cancel()
self.futures = None
async def _update_proxy_task(self, urls):
if not isinstance(urls, list):
urls = [urls]
while True:
t = await self._update_proxy(urls)
t = self._scrap_interval - t
if t > self._sleep_time:
await asyncio.sleep(t, loop=self._loop)
async def _update_proxy(self, urls):
start_time = time.time()
for url in urls:
retry_cnt = 3
while retry_cnt > 0:
retry_cnt -= 1
try:
async with aiohttp.ClientSession(loop=self._loop) as session:
with async_timeout.timeout(self._timeout, loop=self._loop):
async with session.request("GET", url, headers=self._headers) as resp:
body = await resp.read()
body = body.decode('utf-8', errors='ignore')
except CancelledError:
raise
except Exception as e:
log.info("Failed to scrap proxy on '%s': %s", url, e)
else:
retry_cnt = 0
proxies = extract_proxies(body)
log.debug("Find %s proxies on the page '%s'", len(proxies), url)
if proxies:
for r in self._receivers:
await r(proxies)
await asyncio.sleep(self._sleep_time, loop=self._loop)
return time.time() - start_time
|
python
|
"""open_discussions constants"""
from rest_framework import status
PERMISSION_DENIED_ERROR_TYPE = "PermissionDenied"
NOT_AUTHENTICATED_ERROR_TYPE = "NotAuthenticated"
DJANGO_PERMISSION_ERROR_TYPES = (
status.HTTP_401_UNAUTHORIZED,
status.HTTP_403_FORBIDDEN,
)
ISOFORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
python
|
#
from apps.ots.event.signal_event import SignalEvent
from apps.ots.strategy.risk_manager_base import RiskManagerBase
class NaiveRiskManager(RiskManagerBase):
def __init__(self):
self.refl = ''
def get_mkt_quantity(self, signalEvent):
return 100
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2018-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# All Rights Reserved.
#
""" System Inventory Kubernetes Application Operator."""
import copy
import docker
from eventlet.green import subprocess
import glob
import grp
import functools
import io
import os
import pkg_resources
import pwd
import re
import ruamel.yaml as yaml
import shutil
import site
import six
import sys
import threading
import time
import zipfile
from collections import namedtuple
from distutils.util import strtobool
from eventlet import greenpool
from eventlet import greenthread
from eventlet import queue
from eventlet import Timeout
from fm_api import constants as fm_constants
from fm_api import fm_api
from oslo_log import log as logging
from oslo_serialization import base64
from sysinv._i18n import _
from sysinv.api.controllers.v1 import kube_app
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import kubernetes
from sysinv.common.retrying import retry
from sysinv.common import utils as cutils
from sysinv.conductor import openstack
from sysinv.helm import base as helm_base
from sysinv.helm import common
from sysinv.helm import utils as helm_utils
from sysinv.helm.lifecycle_constants import LifecycleConstants
from sysinv.helm.lifecycle_hook import LifecycleHookInfo
# Log and config
LOG = logging.getLogger(__name__)
# Constants
APPLY_SEARCH_PATTERN = 'Processing Chart,'
ARMADA_NAMESPACE = 'armada'
ARMADA_APPLICATION = 'armada'
ARMADA_CONTAINER_NAME = 'armada-api'
ARMADA_MANIFEST_APPLY_SUCCESS_MSG = 'Done applying manifest'
ARMADA_RELEASE_ROLLBACK_FAILURE_MSG = 'Error while rolling back tiller release'
CONTAINER_ABNORMAL_EXIT_CODE = 137
DELETE_SEARCH_PATTERN = 'Deleting release|no release to delete'
ROLLBACK_SEARCH_PATTERN = 'Helm rollback of release'
INSTALLATION_TIMEOUT = 3600
MAX_DOWNLOAD_THREAD = 5
MAX_DOWNLOAD_ATTEMPTS = 3
DOWNLOAD_WAIT_BEFORE_RETRY = 30
TARFILE_DOWNLOAD_CONNECTION_TIMEOUT = 60
TARFILE_TRANSFER_CHUNK_SIZE = 1024 * 512
ARMADA_LOG_MAX = 10
ARMADA_HOST_LOG_LOCATION = '/var/log/armada'
ARMADA_CONTAINER_LOG_LOCATION = '/logs'
ARMADA_CONTAINER_TMP = '/tmp'
ARMADA_LOCK_GROUP = 'armada.process'
ARMADA_LOCK_VERSION = 'v1'
ARMADA_LOCK_NAMESPACE = 'kube-system'
ARMADA_LOCK_PLURAL = 'locks'
ARMADA_LOCK_NAME = 'lock'
LOCK_NAME_APP_REAPPLY = 'app_reapply'
LOCK_NAME_PROCESS_APP_METADATA = 'process_app_metadata'
# Helper functions
def generate_armada_service_manifest_fqpn(app_name, app_version, manifest_filename):
return os.path.join('/manifests', app_name, app_version,
app_name + '-' + manifest_filename)
def generate_install_manifest_fqpn(app_name, app_version, manifest_filename):
return os.path.join(constants.APP_INSTALL_PATH,
app_name, app_version, manifest_filename)
def generate_synced_images_fqpn(app_name, app_version):
return os.path.join(
constants.APP_SYNCED_ARMADA_DATA_PATH, app_name, app_version,
app_name + '-images.yaml')
def generate_synced_helm_overrides_dir(app_name, app_version):
return os.path.join(common.HELM_OVERRIDES_PATH, app_name, app_version)
def generate_synced_app_plugins_dir(app_name, app_version):
return os.path.join(
generate_synced_helm_overrides_dir(app_name, app_version),
'plugins')
def create_app_path(path):
uid = pwd.getpwnam(constants.SYSINV_USERNAME).pw_uid
gid = os.getgid()
if not os.path.exists(constants.APP_INSTALL_PATH):
os.makedirs(constants.APP_INSTALL_PATH)
os.chown(constants.APP_INSTALL_PATH, uid, gid)
os.makedirs(path)
os.chown(path, uid, gid)
def get_app_install_root_path_ownership():
uid = os.stat(constants.APP_INSTALL_ROOT_PATH).st_uid
gid = os.stat(constants.APP_INSTALL_ROOT_PATH).st_gid
return (uid, gid)
Chart = namedtuple('Chart', 'metadata_name name namespace location release labels sequenced')
class AppOperator(object):
"""Class to encapsulate Kubernetes App operations for System Inventory"""
DOCKER_REGISTRY_SECRET = 'default-registry-key'
# List of in progress apps and their abort status
abort_requested = {}
def __init__(self, dbapi, helm_op, apps_metadata):
self._dbapi = dbapi
self._helm = helm_op
self._apps_metadata = apps_metadata
self._plugins = PluginHelper(self._dbapi, self._helm)
self._fm_api = fm_api.FaultAPIs()
self._docker = DockerHelper(self._dbapi)
self._kube = kubernetes.KubeOperator()
self._utils = kube_app.KubeAppHelper(self._dbapi)
self._image = AppImageParser()
self._lock = threading.Lock()
self._armada = ArmadaHelper(self._kube)
if not os.path.isfile(constants.ANSIBLE_BOOTSTRAP_FLAG):
self._clear_stuck_applications()
# Audit discoverable app plugins to remove any stale plugins that may
# have been removed since this host was last tasked to manage
# applications
self._plugins.audit_plugins()
def activate_app_plugins(self, rpc_app):
app = AppOperator.Application(rpc_app)
self._plugins.activate_plugins(app)
def deactivate_app_plugins(self, rpc_app):
app = AppOperator.Application(rpc_app)
self._plugins.deactivate_plugins(app)
def app_has_system_plugins(self, rpc_app):
app = AppOperator.Application(rpc_app)
return app.system_app
def _clear_stuck_applications(self):
apps = self._dbapi.kube_app_get_all()
for app in apps:
if app.status in [constants.APP_UPLOAD_IN_PROGRESS,
constants.APP_APPLY_IN_PROGRESS,
constants.APP_UPDATE_IN_PROGRESS,
constants.APP_RECOVER_IN_PROGRESS,
constants.APP_REMOVE_IN_PROGRESS]:
self._abort_operation(app, app.status, reset_status=True)
else:
continue
# Delete the Armada locks that might have been acquired previously
# for a fresh start. This guarantees that a re-apply, re-update or
# a re-remove attempt following a status reset will not fail due
# to a lock related issue.
self._armada.clear_armada_locks()
def _raise_app_alarm(self, app_name, app_action, alarm_id, severity,
reason_text, alarm_type, repair_action,
service_affecting):
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_APPLICATION,
app_name)
app_alarms = self._fm_api.get_faults(entity_instance_id)
if app_alarms:
if ((app_action == constants.APP_APPLY_FAILURE and
app_alarms[0].alarm_id ==
fm_constants.FM_ALARM_ID_APPLICATION_APPLY_FAILED) or
(app_action == constants.APP_UPLOAD_FAILURE and
app_alarms[0].alarm_id ==
fm_constants.FM_ALARM_ID_APPLICATION_UPLOAD_FAILED) or
(app_action == constants.APP_REMOVE_FAILURE and
app_alarms[0].alarm_id ==
fm_constants.FM_ALARM_ID_APPLICATION_REMOVE_FAILED) or
(app_action == constants.APP_APPLY_IN_PROGRESS and
app_alarms[0].alarm_id ==
fm_constants.FM_ALARM_ID_APPLICATION_APPLYING) or
(app_action == constants.APP_UPDATE_IN_PROGRESS and
app_alarms[0].alarm_id ==
fm_constants.FM_ALARM_ID_APPLICATION_UPDATING)):
# The same alarm was raised before, will re-raise to set the
# latest timestamp.
pass
else:
# Clear existing alarm for this app if it differs than the one to
# be raised.
self._fm_api.clear_fault(app_alarms[0].alarm_id,
app_alarms[0].entity_instance_id)
fault = fm_api.Fault(
alarm_id=alarm_id,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_APPLICATION,
entity_instance_id=entity_instance_id,
severity=severity,
reason_text=reason_text,
alarm_type=alarm_type,
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action=repair_action,
service_affecting=service_affecting)
self._fm_api.set_fault(fault)
def _clear_app_alarm(self, app_name):
entity_instance_id = "%s=%s" % (fm_constants.FM_ENTITY_TYPE_APPLICATION,
app_name)
app_alarms = self._fm_api.get_faults(entity_instance_id)
if app_alarms:
# There can only exist one alarm per app
self._fm_api.clear_fault(app_alarms[0].alarm_id,
app_alarms[0].entity_instance_id)
def _register_app_abort(self, app_name):
with self._lock:
AppOperator.abort_requested[app_name] = False
LOG.info("Register the initial abort status of app %s" % app_name)
def _deregister_app_abort(self, app_name):
with self._lock:
try:
del AppOperator.abort_requested[app_name]
except KeyError:
pass
LOG.info("Deregister the abort status of app %s" % app_name)
@staticmethod
def is_app_aborted(app_name):
try:
return AppOperator.abort_requested[app_name]
except KeyError:
return False
def _set_abort_flag(self, app_name):
with self._lock:
AppOperator.abort_requested[app_name] = True
LOG.info("Abort set for app %s" % app_name)
def _cleanup(self, app, app_dir=True):
"""" Remove application directories and override files """
self._plugins.uninstall_plugins(app)
try:
if os.path.exists(app.sync_overrides_dir):
shutil.rmtree(app.sync_overrides_dir)
if app_dir:
shutil.rmtree(os.path.dirname(
app.sync_overrides_dir))
if os.path.exists(app.sync_armada_mfile_dir):
shutil.rmtree(app.sync_armada_mfile_dir)
if app_dir:
shutil.rmtree(os.path.dirname(
app.sync_armada_mfile_dir))
if os.path.exists(app.inst_path):
shutil.rmtree(app.inst_path)
if app_dir:
shutil.rmtree(os.path.dirname(
app.inst_path))
except OSError as e:
LOG.error(e)
raise
def _update_app_status(self, app, new_status=None, new_progress=None):
""" Persist new app status """
if new_status is None:
new_status = app.status
with self._lock:
app.update_status(new_status, new_progress)
def _abort_operation(self, app, operation,
progress=constants.APP_PROGRESS_ABORTED,
user_initiated=False, reset_status=False):
if user_initiated:
progress = constants.APP_PROGRESS_ABORTED_BY_USER
if app.status == constants.APP_UPLOAD_IN_PROGRESS:
new_status = constants.APP_UPLOAD_FAILURE
op = 'application-upload'
self._raise_app_alarm(
app.name, constants.APP_UPLOAD_FAILURE,
fm_constants.FM_ALARM_ID_APPLICATION_UPLOAD_FAILED,
fm_constants.FM_ALARM_SEVERITY_WARNING,
_("Application Upload Failure"),
fm_constants.FM_ALARM_TYPE_3,
_("Check system inventory log for cause."),
False)
elif (app.status == constants.APP_APPLY_IN_PROGRESS or
app.status == constants.APP_UPDATE_IN_PROGRESS or
app.status == constants.APP_RECOVER_IN_PROGRESS):
new_status = constants.APP_APPLY_FAILURE
if reset_status:
if app.status == constants.APP_APPLY_IN_PROGRESS:
op = 'application-apply'
else:
op = 'application-update'
if app.name in self._apps_metadata[
constants.APP_METADATA_PLATFORM_MANAGED_APPS].keys():
# For platform core apps, set the new status
# to 'uploaded'. The audit task will kick in with
# all its pre-requisite checks before reapplying.
new_status = constants.APP_UPLOAD_SUCCESS
self._clear_app_alarm(app.name)
if (not reset_status or
app.name not in self._apps_metadata[
constants.APP_METADATA_PLATFORM_MANAGED_APPS].keys()):
self._raise_app_alarm(
app.name, constants.APP_APPLY_FAILURE,
fm_constants.FM_ALARM_ID_APPLICATION_APPLY_FAILED,
fm_constants.FM_ALARM_SEVERITY_MAJOR,
_("Application Apply Failure"),
fm_constants.FM_ALARM_TYPE_3,
_("Retry applying the application. If the issue persists, "
"please check system inventory log for cause."),
True)
elif app.status == constants.APP_REMOVE_IN_PROGRESS:
new_status = constants.APP_REMOVE_FAILURE
op = 'application-remove'
self._raise_app_alarm(
app.name, constants.APP_REMOVE_FAILURE,
fm_constants.FM_ALARM_ID_APPLICATION_REMOVE_FAILED,
fm_constants.FM_ALARM_SEVERITY_MAJOR,
_("Application Remove Failure"),
fm_constants.FM_ALARM_TYPE_3,
_("Retry removing the application. If the issue persists, "
"please check system inventory log for cause."),
True)
else:
# Should not get here, perhaps a new status was introduced?
LOG.error("No abort handling code for app status = '%s'!" % app.status)
return
if not reset_status:
self._update_app_status(app, new_status, progress)
if not user_initiated:
LOG.error("Application %s aborted!." % operation)
else:
LOG.info("Application %s aborted by user!." % operation)
else:
LOG.info("Resetting status of app %s from '%s' to '%s' " %
(app.name, app.status, new_status))
error_msg = "Unexpected process termination while " + op +\
" was in progress. The application status " +\
"has changed from \'" + app.status +\
"\' to \'" + new_status + "\'."
values = {'progress': error_msg, 'status': new_status}
self._dbapi.kube_app_update(app.id, values)
def _download_tarfile(self, app):
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from socket import timeout as socket_timeout
from six.moves.urllib.parse import urlsplit
def _handle_download_failure(reason):
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason=reason)
try:
remote_file = urlopen(
app.tarfile, timeout=TARFILE_DOWNLOAD_CONNECTION_TIMEOUT)
try:
remote_filename = remote_file.info()['Content-Disposition']
except KeyError:
remote_filename = os.path.basename(
urlsplit(remote_file.url).path)
filename_avail = True if (remote_filename is None or
remote_filename == '') else False
if filename_avail:
if (not remote_filename.endswith('.tgz') and
not remote_filename.endswith('.tar.gz')):
reason = app.tarfile + ' has unrecognizable tar file ' + \
'extension. Supported extensions are: .tgz and .tar.gz.'
_handle_download_failure(reason)
return None
filename = '/tmp/' + remote_filename
else:
filename = '/tmp/' + app.name + '.tgz'
with open(filename, 'w') as dest:
shutil.copyfileobj(remote_file, dest, TARFILE_TRANSFER_CHUNK_SIZE)
return filename
except HTTPError as err:
LOG.error(err)
reason = 'failed to download tarfile ' + app.tarfile + \
', error code = ' + str(err.code)
_handle_download_failure(reason)
except URLError as err:
LOG.error(err)
reason = app.tarfile + ' is unreachable.'
_handle_download_failure(reason)
except shutil.Error as err:
LOG.error(err)
err_file = os.path.basename(filename) if filename_avail else app.tarfile
reason = 'failed to process tarfile ' + err_file
_handle_download_failure(reason)
except socket_timeout as e:
LOG.error(e)
reason = 'failed to download tarfile ' + app.tarfile + \
', connection timed out.'
_handle_download_failure(reason)
def _extract_tarfile(self, app):
def _handle_extract_failure(
reason='failed to extract tarfile content.'):
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason=reason)
orig_uid, orig_gid = get_app_install_root_path_ownership()
try:
# One time set up of base armada manifest path for the system
if not os.path.isdir(constants.APP_SYNCED_ARMADA_DATA_PATH):
os.makedirs(constants.APP_SYNCED_ARMADA_DATA_PATH)
if not os.path.isdir(app.sync_armada_mfile_dir):
os.makedirs(app.sync_armada_mfile_dir)
if not os.path.isdir(app.inst_path):
create_app_path(app.inst_path)
# Temporarily change /scratch group ownership to sys_protected
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid,
grp.getgrnam(constants.SYSINV_SYSADMIN_GRPNAME).gr_gid)
# Extract the tarfile as sysinv user
if not cutils.extract_tarfile(app.inst_path, app.tarfile, demote_user=True):
_handle_extract_failure()
if app.downloaded_tarfile:
name, version, patches = self._utils._verify_metadata_file(
app.inst_path, app.name, app.version)
if (name != app.name or version != app.version):
# Save the official application info. They will be
# persisted in the next status update
app.regenerate_application_info(name, version, patches)
if not cutils.verify_checksum(app.inst_path):
_handle_extract_failure('checksum validation failed.')
mname, mfile = self._utils._find_manifest_file(app.inst_path)
# Save the official manifest file info. They will be persisted
# in the next status update
app.regenerate_manifest_filename(mname, os.path.basename(mfile))
else:
name, version, patches = cutils.find_metadata_file(
app.inst_path, constants.APP_METADATA_FILE)
app.patch_dependencies = patches
self._utils._extract_helm_charts(app.inst_path)
except exception.SysinvException as e:
_handle_extract_failure(str(e))
except OSError as e:
LOG.error(e)
_handle_extract_failure()
finally:
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid, orig_gid)
def get_image_tags_by_charts(self, app_images_file, app_manifest_file, overrides_dir):
""" Mine the image tags for charts from the images file. Add the
image tags to the manifest file if the image tags from the
charts do not exist in the manifest file. Convert the image
tags in in both override files and manifest file. Intended
for both system and custom apps.
The image tagging conversion(local docker registry address prepended):
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(ie..registry.local:9001/docker.io/mariadb:10.2.13)
"""
app_imgs = []
manifest_update_required = False
if os.path.exists(app_images_file):
with io.open(app_images_file, 'r', encoding='utf-8') as f:
images_file = yaml.safe_load(f)
if os.path.exists(app_manifest_file):
with io.open(app_manifest_file, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default,
# resulting the dumped out charts not readable in Armada.
# Set preserve_quotes=True to preserve all the quotes.
charts = list(yaml.load_all(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True))
for chart in charts:
if "armada/Chart/" in chart['schema']:
chart_data = chart['data']
chart_name = chart_data['chart_name']
chart_namespace = chart_data['namespace']
# Get the image tags by chart from the images file
helm_chart_imgs = {}
if chart_name in images_file:
helm_chart_imgs = images_file[chart_name]
# Get the image tags from the chart overrides file
overrides = chart_namespace + '-' + chart_name + '.yaml'
app_overrides_file = os.path.join(overrides_dir, overrides)
overrides_file = {}
if os.path.exists(app_overrides_file):
with io.open(app_overrides_file, 'r', encoding='utf-8') as f:
overrides_file = yaml.safe_load(f)
override_imgs = self._image.find_images_in_dict(
overrides_file.get('data', {}).get('values', {}))
override_imgs_copy = copy.deepcopy(override_imgs)
# Get the image tags from the armada manifest file
armada_chart_imgs = self._image.find_images_in_dict(
chart_data.get('values', {}))
armada_chart_imgs_copy = copy.deepcopy(armada_chart_imgs)
armada_chart_imgs = self._image.merge_dict(helm_chart_imgs, armada_chart_imgs)
# Update image tags with local registry prefix
override_imgs = self._image.update_images_with_local_registry(override_imgs)
armada_chart_imgs = self._image.update_images_with_local_registry(armada_chart_imgs)
# Generate a list of required images by chart
download_imgs = copy.deepcopy(armada_chart_imgs)
download_imgs = self._image.merge_dict(download_imgs, override_imgs)
download_imgs_list = self._image.generate_download_images_list(download_imgs, [])
app_imgs.extend(download_imgs_list)
# Update chart override file if needed
if override_imgs != override_imgs_copy:
with open(app_overrides_file, 'w') as f:
try:
overrides_file['data']['values'] = self._image.merge_dict(
overrides_file['data']['values'], override_imgs)
yaml.safe_dump(overrides_file, f, default_flow_style=False)
LOG.info("Overrides file %s updated with new image tags" %
app_overrides_file)
except (TypeError, KeyError):
LOG.error("Overrides file %s fails to update" %
app_overrides_file)
# Update armada chart if needed
if armada_chart_imgs != armada_chart_imgs_copy:
# This is to convert a empty orderedDict to dict
if 'values' in chart_data:
if not chart_data['values']:
chart_data['values'] = {}
chart_data['values'] = self._image.merge_dict(
chart_data.get('values', {}), armada_chart_imgs)
manifest_update_required = True
# Update manifest file if needed
if manifest_update_required:
with open(app_manifest_file, 'w') as f:
try:
yaml.dump_all(charts, f, Dumper=yaml.RoundTripDumper,
explicit_start=True, default_flow_style=False)
LOG.info("Manifest file %s updated with new image tags" %
app_manifest_file)
except Exception as e:
LOG.error("Manifest file %s fails to update with "
"new image tags: %s" % (app_manifest_file, e))
return list(set(app_imgs))
def _register_embedded_images(self, app):
"""
TODO(tngo): When we're ready to support air-gap scenario and private
images, the following need to be done:
a. load the embedded images
b. tag and push them to the docker registery on the controller
c. find image tag IDs in each chart and replace their values with
new tags. Alternatively, document the image tagging convention
${LOCAL_REGISTRY_SERVER}:${REGISTRY_PORT}/<image-name>
(e.g. registry.local:9001/prom/mysqld-exporter)
to be referenced in the application Helm charts.
"""
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason="embedded images are not yet supported.")
def _save_images_list(self, app):
# Extract the list of images from the charts and overrides where
# applicable. Save the list to the same location as the armada manifest
# so it can be sync'ed.
app.charts = self._get_list_of_charts(app.sync_armada_mfile)
self._plugins.activate_plugins(app)
LOG.info("Generating application overrides to discover required images.")
self._helm.generate_helm_application_overrides(
app.sync_overrides_dir, app.name, mode=None, cnamespace=None,
armada_format=True, armada_chart_info=app.charts, combined=True)
self._plugins.deactivate_plugins(app)
self._save_images_list_by_charts(app)
# Get the list of images from the updated images overrides
images_to_download = self.get_image_tags_by_charts(
app.sync_imgfile, app.sync_armada_mfile, app.sync_overrides_dir)
if not images_to_download:
# TODO(tngo): We may want to support the deployment of apps that
# set up resources only in the future. In which case, generate
# an info log and let it advance to the next step.
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason="charts specify no docker images.")
with open(app.sync_imgfile, 'a') as f:
yaml.safe_dump({"download_images": images_to_download}, f,
default_flow_style=False)
def _save_images_list_by_charts(self, app):
from six.moves.urllib.parse import urlparse
# Mine the images from values.yaml files in the charts directory.
# The list of images for each chart are saved to the images file.
images_by_charts = {}
for chart in app.charts:
chart_name = os.path.join(app.inst_charts_dir, chart.name)
if not os.path.exists(chart_name):
# If the helm chart name is not the same as the armada
# chart name in the manifest, try using the source
# to find the chart directory.
try:
# helm charts should be of the standard format:
# <chartname>-X.X.X.tgz
url_path = os.path.basename(urlparse(chart.location).path)
# strip the .tgz
chart_and_version = re.sub('\.tgz$', '', url_path)
# strip the version
chart_name_no_version = re.sub('-(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)',
'', chart_and_version)
chart_name = os.path.join(app.inst_charts_dir, chart_name_no_version)
except Exception as e:
LOG.info("Cannot parse chart path: %s" % e)
pass
chart_path = os.path.join(chart_name, 'values.yaml')
if os.path.exists(chart_path):
with io.open(chart_path, 'r', encoding='utf-8') as f:
y = yaml.safe_load(f)
chart_images = self._image.find_images_in_dict(y)
if chart_images:
images_by_charts.update({chart.name: chart_images})
with open(app.sync_imgfile, 'w') as f:
yaml.safe_dump(images_by_charts, f, explicit_start=True,
default_flow_style=False)
def _retrieve_images_list(self, app_images_file):
with io.open(app_images_file, 'r', encoding='utf-8') as f:
images_list = yaml.safe_load(f)
return images_list
def download_images(self, app):
if os.path.isdir(app.inst_images_dir):
return self._register_embedded_images(app)
if app.system_app:
# Some images could have been overwritten via user overrides
# between upload and apply, or between applies. Refresh the
# saved images list.
saved_images_list = self._retrieve_images_list(app.sync_imgfile)
saved_download_images_list = list(saved_images_list.get("download_images"))
images_to_download = self.get_image_tags_by_charts(
app.sync_imgfile, app.sync_armada_mfile, app.sync_overrides_dir)
if set(saved_download_images_list) != set(images_to_download):
saved_images_list.update({"download_images": images_to_download})
with open(app.sync_imgfile, 'w') as f:
yaml.safe_dump(saved_images_list, f, explicit_start=True,
default_flow_style=False)
else:
images_to_download = self._retrieve_images_list(
app.sync_imgfile).get("download_images")
total_count = len(images_to_download)
threads = min(MAX_DOWNLOAD_THREAD, total_count)
start = time.time()
try:
registries_info = self._docker.retrieve_specified_registries()
except Exception as e:
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason=str(e))
for idx in reversed(range(MAX_DOWNLOAD_ATTEMPTS)):
pool = greenpool.GreenPool(size=threads)
for tag, success in pool.imap(
functools.partial(self._docker.download_an_image,
app.name,
registries_info),
images_to_download):
if success:
continue
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason="operation aborted by user.")
else:
LOG.info("Failed to download image: %s", tag)
break
else:
elapsed = time.time() - start
LOG.info("All docker images for application %s were successfully "
"downloaded in %d seconds", app.name, elapsed)
break
# don't sleep after last download attempt
if idx:
LOG.info("Retry docker images download for application %s "
"after %d seconds", app.name, DOWNLOAD_WAIT_BEFORE_RETRY)
time.sleep(DOWNLOAD_WAIT_BEFORE_RETRY)
else:
raise exception.KubeAppApplyFailure(
name=app.name,
version=app.version,
reason=constants.APP_PROGRESS_IMAGES_DOWNLOAD_FAILED)
def _validate_helm_charts(self, app):
failed_charts = []
for r, f in cutils.get_files_matching(app.inst_charts_dir, 'Chart.yaml'):
# Eliminate redundant validation for system app
if app.system_app and '/charts/helm-toolkit' in r:
continue
try:
output = subprocess.check_output( # pylint: disable=not-callable
['helm', 'lint', r], universal_newlines=True)
if "linted, 0 chart(s) failed" in output:
LOG.info("Helm chart %s validated" % os.path.basename(r))
else:
LOG.error("Validation failed for helm chart %s" %
os.path.basename(r))
failed_charts.append(r)
except Exception as e:
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason=str(e))
if len(failed_charts) > 0:
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason="one or more charts failed validation.")
def _get_chart_data_from_metadata(self, app):
"""Get chart related data from application metadata
This extracts the helm repo from the application metadata where the
chart should be loaded.
This also returns the list of charts that are disabled by default.
:param app: application
"""
repo = common.HELM_REPO_FOR_APPS
disabled_charts = []
lfile = os.path.join(app.inst_path, constants.APP_METADATA_FILE)
if os.path.exists(lfile) and os.path.getsize(lfile) > 0:
with io.open(lfile, 'r', encoding='utf-8') as f:
try:
y = yaml.safe_load(f)
repo = y.get('helm_repo', common.HELM_REPO_FOR_APPS)
disabled_charts = y.get('disabled_charts', [])
except KeyError:
pass
LOG.info("Application %s (%s) will load charts to chart repo %s" % (
app.name, app.version, repo))
LOG.info("Application %s (%s) will disable charts %s by default" % (
app.name, app.version, disabled_charts))
return (repo, disabled_charts)
def _upload_helm_charts(self, app):
# Set env path for helm-upload execution
env = os.environ.copy()
env['PATH'] = '/usr/local/sbin:' + env['PATH']
charts = [os.path.join(r, f)
for r, f in cutils.get_files_matching(app.inst_charts_dir, '.tgz')]
orig_uid, orig_gid = get_app_install_root_path_ownership()
(helm_repo, disabled_charts) = self._get_chart_data_from_metadata(app)
try:
# Temporarily change /scratch group ownership to sys_protected
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid,
grp.getgrnam(constants.SYSINV_SYSADMIN_GRPNAME).gr_gid)
with open(os.devnull, "w") as fnull:
for chart in charts:
subprocess.check_call(['helm-upload', helm_repo, chart], # pylint: disable=not-callable
env=env, stdout=fnull, stderr=fnull)
LOG.info("Helm chart %s uploaded" % os.path.basename(chart))
# Make sure any helm repo changes are reflected for the users
helm_utils.refresh_helm_repo_information()
except Exception as e:
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason=str(e))
finally:
os.chown(constants.APP_INSTALL_ROOT_PATH, orig_uid, orig_gid)
# For system applications with plugin support, establish user override
# entries and disable charts based on application metadata.
self._plugins.activate_plugins(app)
db_app = self._dbapi.kube_app_get(app.name)
app_ns = self._helm.get_helm_application_namespaces(db_app.name)
for chart, namespaces in six.iteritems(app_ns):
for namespace in namespaces:
try:
db_chart = self._dbapi.helm_override_get(
db_app.id, chart, namespace)
except exception.HelmOverrideNotFound:
# Create it
try:
db_chart = self._dbapi.helm_override_create(
{'app_id': db_app.id, 'name': chart,
'namespace': namespace})
except Exception as e:
LOG.exception(e)
# Since we are uploading a fresh application. Ensure that
# charts are disabled based on metadata
system_overrides = db_chart.system_overrides
system_overrides.update({common.HELM_CHART_ATTR_ENABLED:
chart not in disabled_charts})
try:
self._dbapi.helm_override_update(
db_app.id, chart, namespace, {'system_overrides':
system_overrides})
except exception.HelmOverrideNotFound:
LOG.exception("Helm Override Not Found")
self._plugins.deactivate_plugins(app)
def _validate_labels(self, labels):
expr = re.compile(r'[a-z0-9]([-a-z0-9]*[a-z0-9])')
for label in labels:
if not expr.match(label):
return False
return True
def _update_kubernetes_labels(self, hostname, label_dict):
body = {
'metadata': {
'labels': {}
}
}
body['metadata']['labels'].update(label_dict)
if (common.LABEL_COMPUTE_LABEL in label_dict and
label_dict[common.LABEL_COMPUTE_LABEL] is None):
host = self.dbapi.ihost_get_by_hostname(hostname)
app_isolated_cpus = helm_base._get_host_cpu_list(host,
function=constants.ISOLATED_FUNCTION,
threads=True)
vswitch_cpus = helm_base._get_host_cpu_list(host,
function=constants.VSWITCH_FUNCTION,
threads=True)
if len(app_isolated_cpus) > 0 and len(vswitch_cpus) > 0:
raise exception.SysinvException(_(
"Failed to update kubernetes labels:"
" Only compute nodes may have application-isolated cores"
" and vswitch cores at the same time."))
try:
self._kube.kube_patch_node(hostname, body)
except exception.KubeNodeNotFound:
pass
def _assign_host_labels(self, hosts, labels):
for host in hosts:
if host.administrative != constants.ADMIN_LOCKED:
continue
for label_str in labels:
k, v = label_str.split('=')
try:
self._dbapi.label_create(
host.id, {'host_id': host.id,
'label_key': k,
'label_value': v})
except exception.HostLabelAlreadyExists:
pass
label_dict = {k: v for k, v in (i.split('=') for i in labels)}
try:
self._update_kubernetes_labels(host.hostname, label_dict)
except Exception as e:
LOG.exception(e)
def _find_label(self, host_uuid, label_str):
host_labels = self._dbapi.label_get_by_host(host_uuid)
for label_obj in host_labels:
if label_str == label_obj.label_key + '=' + label_obj.label_value:
return label_obj
return None
def _remove_host_labels(self, hosts, labels):
for host in hosts:
if host.administrative != constants.ADMIN_LOCKED:
continue
null_labels = {}
for label_str in labels:
lbl_obj = self._find_label(host.uuid, label_str)
if lbl_obj:
self._dbapi.label_destroy(lbl_obj.uuid)
key = lbl_obj.label_key
null_labels[key] = None
if null_labels:
try:
self._update_kubernetes_labels(host.hostname, null_labels)
except Exception as e:
LOG.exception(e)
def audit_local_registry_secrets(self, context):
"""
local registry uses admin's username&password for authentication.
K8s stores the authentication info in secrets in order to access
local registry, while admin's password is saved in keyring.
Admin's password could be changed by openstack client cmd outside of
sysinv and K8s. It will cause info mismatch between keyring and
k8s's secrets, and leads to authentication failure.
There are two ways to keep k8s's secrets updated with data in keyring:
1. Polling. Use a periodic task to sync info from keyring to secrets.
2. Notification. Keystone send out notification when there is password
update, and notification receiver to do the data sync.
To ensure k8s's secrets are timely and always synced with keyring, both
methods are used here. And this function will be called in both cases
to audit password info between keyring and registry-local-secret, and
update keyring's password to all local registry secrets if need.
"""
# Use lock to synchronize call from timer and notification
lock_name = "AUDIT_LOCAL_REGISTRY_SECRETS"
@cutils.synchronized(lock_name, external=False)
def _sync_audit_local_registry_secrets(self):
try:
secret = self._kube.kube_get_secret("registry-local-secret", kubernetes.NAMESPACE_KUBE_SYSTEM)
if secret is None:
return
secret_auth_body = base64.decode_as_text(secret.data['.dockerconfigjson'])
secret_auth_info = (secret_auth_body.split('auth":')[1]).split('"')[1]
registry_auth = cutils.get_local_docker_registry_auth()
registry_auth_info = '{0}:{1}'.format(registry_auth['username'],
registry_auth['password'])
if secret_auth_info == base64.encode_as_text(registry_auth_info):
LOG.debug("Auth info is the same, no update is needed for k8s secret.")
return
except Exception as e:
LOG.error(e)
return
try:
# update secret with new auth info
token = '{{\"auths\": {{\"{0}\": {{\"auth\": \"{1}\"}}}}}}'.format(
constants.DOCKER_REGISTRY_SERVER, base64.encode_as_text(registry_auth_info))
secret.data['.dockerconfigjson'] = base64.encode_as_text(token)
self._kube.kube_patch_secret("registry-local-secret", kubernetes.NAMESPACE_KUBE_SYSTEM, secret)
LOG.info("Secret registry-local-secret under Namespace kube-system is updated")
except Exception as e:
LOG.error("Failed to update Secret %s under Namespace kube-system: %s"
% ("registry-local-secret", e))
return
# update "default-registry-key" secret info under all namespaces
try:
ns_list = self._kube.kube_get_namespace_name_list()
for ns in ns_list:
secret = self._kube.kube_get_secret(AppOperator.DOCKER_REGISTRY_SECRET, ns)
if secret is None:
continue
try:
secret_auth_body = base64.decode_as_text(secret.data['.dockerconfigjson'])
if constants.DOCKER_REGISTRY_SERVER in secret_auth_body:
secret.data['.dockerconfigjson'] = base64.encode_as_text(token)
self._kube.kube_patch_secret(AppOperator.DOCKER_REGISTRY_SECRET, ns, secret)
LOG.info("Secret %s under Namespace %s is updated"
% (AppOperator.DOCKER_REGISTRY_SECRET, ns))
except Exception as e:
LOG.error("Failed to update Secret %s under Namespace %s: %s"
% (AppOperator.DOCKER_REGISTRY_SECRET, ns, e))
continue
except Exception as e:
LOG.error(e)
return
_sync_audit_local_registry_secrets(self)
def _wait_for_pod_termination(self, namespace):
loop_timeout = 0
loop_check_interval = 10
timeout = 300
try:
LOG.info("Waiting for pod termination in namespace %s ..." % namespace)
# Pod termination timeout 5mins
while(loop_timeout <= timeout):
if not self._kube.kube_namespaced_pods_exist(namespace):
# Pods have terminated
break
loop_timeout += loop_check_interval
time.sleep(loop_check_interval)
if loop_timeout > timeout:
raise exception.KubePodTerminateTimeout(name=namespace)
LOG.info("Pod termination in Namespace %s completed." % namespace)
except Exception as e:
LOG.error(e)
raise
def _get_list_of_charts(self, manifest_file):
"""Get the charts information from the manifest file
The following chart data for each chart in the manifest file
are extracted and stored into a namedtuple Chart object:
- metadata_name
- chart_name
- namespace
- location
- release
- pre-delete job labels
The method returns a list of namedtuple charts which following
the install order in the manifest chart_groups.
:param manifest_file: the manifest file of the application
:return: a list of namedtuple charts
"""
charts = []
release_prefix = ""
chart_group = {}
chart_groups = []
armada_charts = {}
with io.open(manifest_file, 'r', encoding='utf-8') as f:
docs = yaml.safe_load_all(f)
for doc in docs:
# iterative docs in the manifest file to get required
# chart information
try:
if "armada/Manifest/" in doc['schema']:
release_prefix = doc['data']['release_prefix']
chart_groups = doc['data']['chart_groups']
elif "armada/ChartGroup/" in doc['schema']:
chart_group.update(
{doc['metadata']['name']: {
'chart_group': doc['data']['chart_group'],
'sequenced': doc.get('data').get('sequenced', False)}})
elif "armada/Chart/" in doc['schema']:
labels = []
delete_resource = \
doc['data'].get('upgrade', {}).get('pre', {}).get('delete', [])
for resource in delete_resource:
if resource.get('type') == 'job':
label = ''
for k, v in resource['labels'].items():
label = k + '=' + v + ',' + label
labels.append(label[:-1])
armada_charts.update(
{doc['metadata']['name']: {
'chart_name': doc['data']['chart_name'],
'namespace': doc['data']['namespace'],
'location': doc['data']['source']['location'],
'release': doc['data']['release'],
'labels': labels}})
LOG.debug("Manifest: Chart: {} Namespace: {} "
"Location: {} Release: {}".format(
doc['data']['chart_name'],
doc['data']['namespace'],
doc['data']['source']['location'],
doc['data']['release']))
except KeyError:
pass
# Push Chart to the list that following the order
# in the chart_groups(install list)
for c_group in chart_groups:
for chart in chart_group[c_group]['chart_group']:
charts.append(Chart(
metadata_name=chart,
name=armada_charts[chart]['chart_name'],
namespace=armada_charts[chart]['namespace'],
location=armada_charts[chart]['location'],
release=armada_charts[chart]['release'],
labels=armada_charts[chart]['labels'],
sequenced=chart_group[c_group]['sequenced']))
del armada_charts[chart]
del chart_group[c_group]
# Push Chart to the list that are not referenced
# in the chart_groups (install list)
if chart_group:
for c_group in chart_group:
for chart in chart_group[c_group]['chart_group']:
charts.append(Chart(
metadata_name=chart,
name=armada_charts[chart]['chart_name'],
namespace=armada_charts[chart]['namespace'],
location=armada_charts[chart]['location'],
release=armada_charts[chart]['release'],
labels=armada_charts[chart]['labels'],
sequenced=chart_group[c_group]['sequenced']))
del armada_charts[chart]
if armada_charts:
for chart in armada_charts:
charts.append(Chart(
metadata_name=chart,
name=armada_charts[chart]['chart_name'],
namespace=armada_charts[chart]['namespace'],
location=armada_charts[chart]['location'],
release=armada_charts[chart]['release'],
labels=armada_charts[chart]['labels'],
sequenced=False))
# Update each Chart in the list if there has release prefix
# for each release
if release_prefix:
for i, chart in enumerate(charts):
charts[i] = chart._replace(
release=release_prefix + "-" + chart.release)
return charts
def _get_overrides_files(self, overrides_dir, charts, app_name, mode):
"""Returns list of override files or None, used in
application-install and application-delete."""
missing_helm_overrides = []
available_helm_overrides = []
for chart in charts:
overrides = chart.namespace + '-' + chart.name + '.yaml'
overrides_file = os.path.join(overrides_dir, overrides)
if not os.path.exists(overrides_file):
missing_helm_overrides.append(overrides_file)
else:
available_helm_overrides.append(overrides_file)
if missing_helm_overrides:
LOG.error("Missing the following overrides: %s" % missing_helm_overrides)
return None
# Get the armada manifest overrides files
manifest_op = self._helm.get_armada_manifest_operator(app_name)
armada_overrides = manifest_op.load_summary(overrides_dir)
return (available_helm_overrides, armada_overrides)
def _generate_armada_overrides_str(self, app_name, app_version,
helm_files, armada_files):
overrides_str = ""
if helm_files:
overrides_str += " ".join([
' --values {0}/overrides/{1}/{2}/{3}'.format(
ARMADA_CONTAINER_TMP,
app_name, app_version, os.path.basename(i))
for i in helm_files
])
if armada_files:
overrides_str += " ".join([
' --values {0}/manifests/{1}/{2}/{3}'.format(
ARMADA_CONTAINER_TMP,
app_name, app_version, os.path.basename(i))
for i in armada_files
])
return overrides_str
def _remove_chart_overrides(self, overrides_dir, manifest_file):
charts = self._get_list_of_charts(manifest_file)
for chart in charts:
if chart.name in self._helm.chart_operators:
self._helm.remove_helm_chart_overrides(overrides_dir,
chart.name,
chart.namespace)
def _update_app_releases_version(self, app_name):
"""Update application helm releases records
This method retrieves the deployed helm releases and updates the
releases records in sysinv db if needed
:param app_name: the name of the application
"""
try:
deployed_releases = helm_utils.retrieve_helm_releases()
LOG.debug('deployed_releases = %s', deployed_releases)
app = self._dbapi.kube_app_get(app_name)
app_releases = self._dbapi.kube_app_chart_release_get_all(app.id)
for r in app_releases:
LOG.debug('app.id=%r, release=%r, version=%r, namespace=%r',
app.id, r.release, r.version, r.namespace)
if (r.release in deployed_releases and
r.namespace in deployed_releases[r.release] and
r.version != deployed_releases[r.release][r.namespace]):
self._dbapi.kube_app_chart_release_update(
app.id, r.release, r.namespace,
{'version': deployed_releases[r.release][r.namespace]})
except Exception as e:
LOG.exception(e)
raise exception.SysinvException(_(
"Failed to update/record application %s releases' versions." % str(e)))
def _create_app_releases_version(self, app_name, app_charts):
"""Create application helm releases records
This method creates/initializes the helm releases objects for the application.
:param app_name: the name of the application
:param app_charts: the charts of the application
"""
kube_app = self._dbapi.kube_app_get(app_name)
app_releases = self._dbapi.kube_app_chart_release_get_all(kube_app.id)
if app_releases:
return
for chart in app_charts:
values = {
'release': chart.release,
'version': 0,
'namespace': chart.namespace,
'app_id': kube_app.id
}
try:
self._dbapi.kube_app_chart_release_create(values)
except Exception as e:
LOG.exception(e)
def _get_metadata_value(self, app, key_or_keys, default=None,
enforce_type=False):
"""
Get application metadata value from nested dictionary.
If a default value is specified, this will enforce that
the value returned is of the same type.
:param app: application object
:param key_or_keys: single key string, or list of keys
:param default: default value (and type)
:param enforce_type: enforce type check between return value and default
:return: The value from nested dictionary D[key1][key2][...] = value
assuming all keys are present, otherwise default.
"""
value = default
if isinstance(key_or_keys, list):
keys = key_or_keys
else:
keys = [key_or_keys]
metadata_file = os.path.join(app.inst_path,
constants.APP_METADATA_FILE)
if os.path.exists(metadata_file) and os.path.getsize(metadata_file) > 0:
with io.open(metadata_file, 'r', encoding='utf-8') as f:
try:
metadata = yaml.safe_load(f) or {}
value = cutils.deep_get(metadata, keys, default=default)
# TODO(jgauld): There is inconsistent treatment of YAML
# boolean between the module ruamel.yaml and module yaml
# in utils.py, health.py, and kube_app.py. Until these
# usage variants are unified, leave the following check
# as optional.
if enforce_type and default is not None and value is not None:
default_type = type(default)
if type(value) != default_type:
raise exception.SysinvException(_(
"Invalid {}: {} {!r} expected value is {}."
"".format(metadata_file, '.'.join(keys),
value, default_type)))
except KeyError:
# metadata file does not have the key
pass
LOG.debug('_get_metadata_value: metadata_file=%s, keys=%s, default=%r, value=%r',
metadata_file, keys, default, value)
return value
def _preserve_user_overrides(self, from_app, to_app):
"""Dump user overrides
In the scenario of updating application to a new version, this
method is used to copy the user overrides from the old version
to the new version.
:param from_app: application object that application updating from
:param to_app: application object that application updating to
"""
to_db_app = self._dbapi.kube_app_get(to_app.name)
from_db_app = self._dbapi.kube_app_get_inactive_by_name_version(
from_app.name, version=from_app.version)
from_app_db_charts = self._dbapi.helm_override_get_all(from_db_app.id)
from_app_charts = {}
for chart in from_app_db_charts:
from_app_charts.setdefault(chart.name, {}).update(
{chart.namespace: chart.user_overrides})
for chart in to_app.charts:
if (chart.name in from_app_charts and
chart.namespace in from_app_charts[chart.name] and
from_app_charts[chart.name][chart.namespace]):
user_overrides = {'user_overrides': from_app_charts[chart.name][chart.namespace]}
try:
self._dbapi.helm_override_update(
app_id=to_db_app.id, name=chart.name,
namespace=chart.namespace, values=user_overrides)
except exception.HelmOverrideNotFound:
# Unexpected
values = {
'name': chart.name,
'namespace': chart.namespace,
'app_id': to_db_app.id
}
values.update(user_overrides)
self._dbapi.helm_override_create(values=values)
LOG.info("Application %s (%s) will apply the user overrides for"
"Chart %s from version %s" % (to_app.name, to_app.version,
chart.name, from_app.version))
@retry(retry_on_exception=lambda x: isinstance(x, exception.ApplicationApplyFailure),
stop_max_attempt_number=5, wait_fixed=30 * 1000)
def _make_armada_request_with_monitor(self, app, request, overrides_str=None):
"""Initiate armada request with monitoring
This method delegates the armada request to docker helper and starts
a monitoring thread to persist status and progress along the way.
:param app: application data object
:param request: type of request (apply or delete)
:param overrides_str: list of overrides in string format to be applied
"""
def _get_armada_log_stats(pattern, logfile):
"""
TODO(tngo): In the absence of an Armada API that provides the current
status of an apply/delete manifest operation, the progress is derived
from specific log entries extracted from the execution logs. This
inner method is to be replaced with an official API call when
it becomes available.
"""
if pattern == ROLLBACK_SEARCH_PATTERN:
print_chart = '{print $10}'
else:
print_chart = '{print $NF}'
p1 = subprocess.Popen(['grep', pattern, logfile],
stdout=subprocess.PIPE)
p2 = subprocess.Popen(['awk', print_chart], stdin=p1.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
p1.stdout.close()
result, err = p2.communicate()
if result:
# Scrape information from command output, example 'validate' log:
# 2020-03-26 09:47:58.594 1105 INFO armada.cli [-] Successfully validated:\
# ('/tmp/manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest.yaml',)
# Strip out ANSI color code that might be in the text stream
r = re.compile("\x1b\[[0-9;]*m")
result = r.sub('', result).replace(',', '')
matches = result.split()
num_chart_processed = len(matches)
last_chart_processed = matches[num_chart_processed - 1]
if '=' in last_chart_processed:
last_chart_processed = last_chart_processed.split('=')[1]
return last_chart_processed, num_chart_processed
return None, None
def _check_progress(monitor_flag, app, pattern, logfile):
""" Progress monitoring task, to be run in a separate thread """
LOG.info("Starting progress monitoring thread for app %s" % app.name)
try:
adjust = self._get_metadata_value(app,
constants.APP_METADATA_APPLY_PROGRESS_ADJUST,
constants.APP_METADATA_APPLY_PROGRESS_ADJUST_DEFAULT_VALUE)
with Timeout(INSTALLATION_TIMEOUT,
exception.KubeAppProgressMonitorTimeout()):
charts_count = len(app.charts)
while True:
try:
monitor_flag.get_nowait()
LOG.debug("Received monitor stop signal for %s" % app.name)
monitor_flag.task_done()
break
except queue.Empty:
last, num = _get_armada_log_stats(pattern, logfile)
if last:
if charts_count == 0:
percent = 100
else:
tadjust = 0
if app.system_app:
tadjust = adjust
if tadjust >= charts_count:
LOG.error("Application metadata key '{}'"
"has an invalid value {} (too few charts)".
format(constants.APP_METADATA_APPLY_PROGRESS_ADJUST,
adjust))
tadjust = 0
percent = round((float(num) / # pylint: disable=W1619
(charts_count - tadjust)) * 100)
progress_str = "processing chart: {}, overall completion: {}%".\
format(last, percent)
if app.progress != progress_str:
LOG.info("%s" % progress_str)
self._update_app_status(app, new_progress=progress_str)
greenthread.sleep(1)
except Exception as e:
# timeout or subprocess error
LOG.exception(e)
finally:
LOG.info("Exiting progress monitoring thread for app %s" % app.name)
def _cleanup_armada_log(location, app_name, request):
"""Cleanup the oldest armada log if reach the maximum"""
list_of_logs = [os.path.join(location, f) for f in os.listdir(location)
if re.match(r'{}-{}.*.log'.format(app_name, request), f)]
try:
if len(list_of_logs) > ARMADA_LOG_MAX:
oldest_logfile = min(list_of_logs, key=os.path.getctime)
os.remove(oldest_logfile)
except OSError:
pass
# Body of the outer method
# This check is for cases where an abort is issued while
# this function waits between retries. In such cases, it
# should just return False
if AppOperator.is_app_aborted(app.name):
return False
# TODO(dvoicule): Maybe pass a hook from outside to this function
# need to change perform_app_recover/rollback/update to support this.
# All the other hooks store the operation of the app itself (apply,
# remove, delete, upload, update) yet this hook stores the armada
# operation in the operation field. This is inconsistent behavior and
# should be changed the moment a hook from outside is passed here.
lifecycle_hook_info = LifecycleHookInfo()
lifecycle_hook_info.operation = request
lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST
self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info)
mqueue = queue.Queue()
rc = True
logname = time.strftime(app.name + '-' + request + '_%Y-%m-%d-%H-%M-%S.log')
logfile = ARMADA_HOST_LOG_LOCATION + '/' + logname
if request == constants.APP_APPLY_OP:
pattern = APPLY_SEARCH_PATTERN
elif request == constants.APP_DELETE_OP:
pattern = DELETE_SEARCH_PATTERN
else:
pattern = ROLLBACK_SEARCH_PATTERN
monitor = greenthread.spawn_after(1, _check_progress, mqueue, app,
pattern, logfile)
rc = self._armada.make_armada_request(request, app.armada_service_mfile,
overrides_str, app.releases, logfile)
_cleanup_armada_log(ARMADA_HOST_LOG_LOCATION, app.name, request)
mqueue.put('done')
monitor.kill()
# Here a manifest retry can be performed by throwing ApplicationApplyFailure
lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_ARMADA_REQUEST
lifecycle_hook_info[LifecycleConstants.EXTRA][LifecycleConstants.RETURN_CODE] = rc
self.app_lifecycle_actions(None, None, app._kube_app, lifecycle_hook_info)
return rc
def _record_auto_update_failed_versions(self, from_app, to_app):
"""Record the new application version in the old application
metadata when the new application fails to be updated"""
new_metadata = copy.deepcopy(from_app.app_metadata)
try:
failed_versions = new_metadata[constants.APP_METADATA_UPGRADES][
constants.APP_METADATA_FAILED_VERSIONS]
if to_app.version not in failed_versions:
failed_versions.append(to_app.version)
except KeyError:
new_metadata.setdefault(constants.APP_METADATA_UPGRADES, {}).update(
{constants.APP_METADATA_FAILED_VERSIONS: [to_app.version]})
with self._lock:
from_app.update_app_metadata(new_metadata)
def _perform_app_recover(self, old_app, new_app, armada_process_required=True):
"""Perform application recover
This recover method is triggered when application update failed, it cleans
up the files/data for the new application and recover helm charts for the
old application. If the armada process is required, armada apply is invoked
to recover the application releases for the old version.
The app status will be populated to "apply-failed" if recover fails so that
the user can re-apply app.
:param old_app: the application object that application recovering to
:param new_app: the application object that application recovering from
:param armada_process_required: boolean, whether armada operation is needed
"""
def _activate_old_app_plugins(old_app):
# Enable the old app plugins.
self._plugins.activate_plugins(old_app)
LOG.info("Starting recover Application %s from version: %s to version: %s" %
(old_app.name, new_app.version, old_app.version))
# Ensure that the the failed app plugins are disabled prior to cleanup
self._plugins.deactivate_plugins(new_app)
self._update_app_status(
old_app, constants.APP_RECOVER_IN_PROGRESS,
constants.APP_PROGRESS_UPDATE_ABORTED.format(old_app.version, new_app.version) +
constants.APP_PROGRESS_RECOVER_IN_PROGRESS.format(old_app.version))
# Set the status for the new app to inactive
self._update_app_status(new_app, constants.APP_INACTIVE_STATE)
try:
self._cleanup(new_app, app_dir=False)
self._utils._patch_report_app_dependencies(
new_app.name + '-' + new_app.version)
self._dbapi.kube_app_destroy(new_app.name,
version=new_app.version,
inactive=True)
LOG.info("Recovering helm charts for Application %s (%s)..."
% (old_app.name, old_app.version))
self._update_app_status(old_app,
new_progress=constants.APP_PROGRESS_RECOVER_CHARTS)
with self._lock:
self._upload_helm_charts(old_app)
rc = True
if armada_process_required:
overrides_str = ''
old_app.charts = self._get_list_of_charts(old_app.sync_armada_mfile)
if old_app.system_app:
(helm_files, armada_files) = self._get_overrides_files(
old_app.sync_overrides_dir, old_app.charts, old_app.name, mode=None)
overrides_str = self._generate_armada_overrides_str(
old_app.name, old_app.version, helm_files, armada_files)
# Ensure that the old app plugins are enabled prior to armada process.
_activate_old_app_plugins(old_app)
if self._make_armada_request_with_monitor(old_app,
constants.APP_APPLY_OP,
overrides_str):
old_app_charts = [c.release for c in old_app.charts]
deployed_releases = helm_utils.retrieve_helm_releases()
for new_chart in new_app.charts:
if (new_chart.release not in old_app_charts and
new_chart.release in deployed_releases):
# Cleanup the releases in the new application version
# but are not in the old application version
helm_utils.delete_helm_release(new_chart.release)
else:
rc = False
except exception.ApplicationApplyFailure:
rc = False
except Exception as e:
# ie. patch report error, cleanup application files error
# helm release delete failure
self._update_app_status(
old_app, constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_UPDATE_ABORTED.format(old_app.version, new_app.version) +
constants.APP_PROGRESS_RECOVER_COMPLETED.format(old_app.version) +
constants.APP_PROGRESS_CLEANUP_FAILED.format(new_app.version) +
'Please check logs for details.')
LOG.error(e)
return
finally:
# Ensure that the old app plugins are enabled after recovery
_activate_old_app_plugins(old_app)
self._record_auto_update_failed_versions(old_app, new_app)
if rc:
self._update_app_status(
old_app, constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_UPDATE_ABORTED.format(old_app.version, new_app.version) +
constants.APP_PROGRESS_RECOVER_COMPLETED.format(old_app.version) +
'Please check logs for details.')
# Recovery from an app update failure succeeded, clear app alarm
self._clear_app_alarm(old_app.name)
LOG.info("Application %s recover to version %s completed."
% (old_app.name, old_app.version))
else:
self._update_app_status(
old_app, constants.APP_APPLY_FAILURE,
constants.APP_PROGRESS_UPDATE_ABORTED.format(old_app.version, new_app.version) +
constants.APP_PROGRESS_RECOVER_ABORTED.format(old_app.version) +
'Please check logs for details.')
LOG.error("Application %s recover to version %s aborted!"
% (old_app.name, old_app.version))
def _perform_app_rollback(self, from_app, to_app):
"""Perform application rollback request
This method invokes Armada to rollback the application releases to
previous installed versions. The jobs for the current installed
releases require to be cleaned up before starting armada rollback.
:param from_app: application object that application updating from
:param to_app: application object that application updating to
:return boolean: whether application rollback was successful
"""
LOG.info("Application %s (%s) rollback started." % (to_app.name, to_app.version))
try:
if AppOperator.is_app_aborted(to_app.name):
raise exception.KubeAppAbort()
to_db_app = self._dbapi.kube_app_get(to_app.name)
to_app_releases = \
self._dbapi.kube_app_chart_release_get_all(to_db_app.id)
from_db_app = self._dbapi.kube_app_get_inactive_by_name_version(
from_app.name, version=from_app.version)
from_app_releases = \
self._dbapi.kube_app_chart_release_get_all(from_db_app.id)
from_app_r_dict = {r.release: r.version for r in from_app_releases}
self._update_app_status(
to_app, new_progress=constants.APP_PROGRESS_ROLLBACK_RELEASES)
if AppOperator.is_app_aborted(to_app.name):
raise exception.KubeAppAbort()
charts_sequence = {c.release: c.sequenced for c in to_app.charts}
charts_labels = {c.release: c.labels for c in to_app.charts}
for to_app_r in to_app_releases:
if to_app_r.version != 0:
if (to_app_r.release not in from_app_r_dict or
(to_app_r.release in from_app_r_dict and
to_app_r.version != from_app_r_dict[to_app_r.release])):
# Append the release which needs to be rolled back
to_app.releases.append(
{'release': to_app_r.release,
'version': to_app_r.version,
'sequenced': charts_sequence[to_app_r.release]})
# Cleanup the jobs for the current installed release
if to_app_r.release in charts_labels:
for label in charts_labels[to_app_r.release]:
self._kube.kube_delete_collection_namespaced_job(
to_app_r.namespace, label)
LOG.info("Jobs deleted for release %s" % to_app_r.release)
if AppOperator.is_app_aborted(to_app.name):
raise exception.KubeAppAbort()
if self._make_armada_request_with_monitor(to_app,
constants.APP_ROLLBACK_OP):
self._update_app_status(to_app, constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_COMPLETED)
LOG.info("Application %s (%s) rollback completed."
% (to_app.name, to_app.version))
return True
except exception.KubeAppAbort:
# If the update operation is aborted before Armada request is made,
# we don't want to return False which would trigger the recovery
# routine with an Armada request.
raise
except Exception as e:
# unexpected KubeAppNotFound, KubeAppInactiveNotFound, KeyError
# k8s exception:fail to cleanup release jobs
LOG.exception(e)
LOG.error("Application rollback aborted!")
return False
def perform_app_upload(self, rpc_app, tarfile, lifecycle_hook_info_app_upload, images=False):
"""Process application upload request
This method validates the application manifest. If Helm charts are
included, they are validated and uploaded to local Helm repo. It also
downloads the required docker images for custom apps during upload
stage.
:param rpc_app: application object in the RPC request
:param tarfile: location of application tarfile
:param lifecycle_hook_info_app_upload: LifecycleHookInfo object
:param images: save application images in the registry as part of app upload
"""
app = AppOperator.Application(rpc_app)
LOG.info("Application %s (%s) upload started." % (app.name, app.version))
try:
# TODO (rchurch): Remove this version check once all applications
# have been decoupled. Since compatible plugins will be delivered
# with the versioned application tarball, no version check will be
# required. For decoupled apps, plugins are loaded later in this
# method and this base class version check is called.
if not self._helm.version_check(app.name, app.version):
LOG.info("Application %s (%s) upload rejected. Unsupported version."
% (app.name, app.version))
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason="Unsupported application version.")
app.tarfile = tarfile
if cutils.is_url(app.tarfile):
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_TARFILE_DOWNLOAD)
downloaded_tarfile = self._download_tarfile(app)
if downloaded_tarfile is None:
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason="Failed to find the downloaded tarball.")
else:
app.tarfile = downloaded_tarfile
app.downloaded_tarfile = True
# Full extraction of application tarball at /scratch/apps.
# Manifest file is placed under /opt/platform/armada
# which is managed by drbd-sync and visible to Armada.
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_EXTRACT_TARFILE)
with self._lock:
self._extract_tarfile(app)
self._plugins.install_plugins(app)
# Copy the armada manfest and metadata file to the drbd
shutil.copy(app.inst_armada_mfile, app.sync_armada_mfile)
inst_metadata_file = os.path.join(
app.inst_path, constants.APP_METADATA_FILE)
if os.path.exists(inst_metadata_file):
sync_metadata_file = os.path.join(
app.sync_armada_mfile_dir, constants.APP_METADATA_FILE)
shutil.copy(inst_metadata_file, sync_metadata_file)
if not self._armada.make_armada_request(
'validate', manifest_file=app.armada_service_mfile):
raise exception.KubeAppUploadFailure(
name=app.name,
version=app.version,
reason="Failed to validate application manifest.")
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_VALIDATE_UPLOAD_CHARTS)
if os.path.isdir(app.inst_charts_dir):
self._validate_helm_charts(app)
with self._lock:
self._upload_helm_charts(app)
# System overrides will be generated here. Plugins must be activated
# prior to scraping chart/system/armada overrides for images
self._save_images_list(app)
if images:
# We need to download the images at upload_app so that subclouds
# may use the distributed cloud registry
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_DOWNLOAD_IMAGES)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
self.download_images(app)
if app.patch_dependencies:
self._utils._patch_report_app_dependencies(
app.name + '-' + app.version, app.patch_dependencies)
self._create_app_releases_version(app.name, app.charts)
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS,
constants.APP_PROGRESS_COMPLETED)
LOG.info("Application %s (%s) upload completed." % (app.name, app.version))
return app
except exception.KubeAppUploadFailure as e:
LOG.exception(e)
self._abort_operation(app, constants.APP_UPLOAD_OP, str(e))
raise
except Exception as e:
LOG.exception(e)
self._abort_operation(app, constants.APP_UPLOAD_OP)
raise exception.KubeAppUploadFailure(
name=app.name, version=app.version, reason=e)
def set_reapply(self, app_name):
lock_name = "%s_%s" % (LOCK_NAME_APP_REAPPLY, app_name)
@cutils.synchronized(lock_name, external=False)
def _sync_set_reapply(app_name):
return self._unsafe_set_reapply(app_name)
return _sync_set_reapply(app_name)
def _unsafe_set_reapply(self, app_name):
# Create app reapply flag
reapply_flag = cutils.app_reapply_flag_file(app_name)
open(reapply_flag, "w").close()
# Raise the pending automatic reapply alarm
entity = cutils.app_reapply_pending_fault_entity(app_name)
fault = fm_api.Fault(
alarm_id=fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING,
alarm_state=fm_constants.FM_ALARM_STATE_SET,
entity_type_id=fm_constants.FM_ENTITY_TYPE_APPLICATION,
entity_instance_id=entity,
severity=fm_constants.FM_ALARM_SEVERITY_WARNING,
reason_text=_(
"A configuration change requires a reapply of "
"the %s application.") % app_name,
alarm_type=fm_constants.FM_ALARM_TYPE_0,
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN,
proposed_repair_action=_(
"The application will be automatically reapplied."),
service_affecting=False)
self._fm_api.set_fault(fault)
def clear_reapply(self, app_name):
lock_name = "%s_%s" % (LOCK_NAME_APP_REAPPLY, app_name)
@cutils.synchronized(lock_name, external=False)
def _sync_clear_reapply(app_name):
return self._unsafe_clear_reapply(app_name)
return _sync_clear_reapply(app_name)
def _unsafe_clear_reapply(self, app_name):
# Remove app reapply flag
try:
reapply_flag = cutils.app_reapply_flag_file(app_name)
os.remove(reapply_flag)
except OSError:
pass
# Clear the pending automatic reapply alarm
target_entity = cutils.app_reapply_pending_fault_entity(app_name)
for alarm in self._fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING) or []:
if alarm.entity_instance_id == target_entity:
self._fm_api.clear_fault(alarm.alarm_id,
alarm.entity_instance_id)
def needs_reapply(self, app_name):
lock_name = "%s_%s" % (LOCK_NAME_APP_REAPPLY, app_name)
@cutils.synchronized(lock_name, external=False)
def _sync_needs_reapply(app_name):
return self._unsafe_needs_reapply(app_name)
return _sync_needs_reapply(app_name)
def _unsafe_needs_reapply(self, app_name):
reapply_flag = cutils.app_reapply_flag_file(app_name)
flag_exists = os.path.isfile(reapply_flag)
if not flag_exists:
# Clear any stuck reapply alarm
target_entity = cutils.app_reapply_pending_fault_entity(app_name)
for alarm in self._fm_api.get_faults_by_id(
fm_constants.FM_ALARM_ID_APPLICATION_REAPPLY_PENDING) or []:
if alarm.entity_instance_id == target_entity:
self._fm_api.clear_fault(alarm.alarm_id,
alarm.entity_instance_id)
return flag_exists
def app_lifecycle_actions(self, context, conductor_obj, rpc_app, hook_info):
"""Perform application specific lifecycle actions
This method will perform any lifecycle actions necessary for the
application based on the operation and relative_timing of the operation.
:param context: request context
:param conductor_obj: conductor object
:param rpc_app: application object in the RPC request
:param hook_info: LifecycleHookInfo object
"""
app = AppOperator.Application(rpc_app)
# TODO(dvoicule): activate plugins once on upload, deactivate once during delete
# create another commit for this
self.activate_app_plugins(rpc_app)
LOG.info("lifecycle hook for application {} ({}) started {}."
.format(app.name, app.version, hook_info))
lifecycle_op = self._helm.get_app_lifecycle_operator(app.name)
lifecycle_op.app_lifecycle_actions(context, conductor_obj, self, app, hook_info)
@staticmethod
def recompute_app_evaluation_order(apps_metadata_dict):
""" Get the order of app reapplies based on dependencies
The following algorithm uses these concepts:
Root apps are apps that have no dependency.
Chain depth for an app is the number of apps that form the longest
chain ending in the current app.
Main logic:
Compute reverse graph (after_apps).
Determine root apps.
Detect cycles and abort.
Compute the longest dependency chain.
Traverse again to populate ordered list.
Assumptions:
In theory there is one or few root apps that are dominant vertices.
Other than the dominant vertices, there are very sparse vertices with
a degree more than one, most of the vertices are either leaves or
isolated.
Chain depth is usually 0 or 1, few apps have a chain depth of 2, 3, 4
The structure is a sparse digraph, or multiple separate sparse digraphs
with a total number of vertices equal to the number of apps.
Complexity analysis:
Spatial complexity O(V+E)
Cycle detection: O(V+E)
After cycle detection the graph is a DAG.
For computing the chain depth and final traversal a subgraph may be
revisited. Complexity would be O(V*E).
Let k = number of apps with a vertex that have the in degree > 1 and
that are not leaf apps. We can bind k to be 0<=k<=10000, shall we reach
that app number.
Each node and each vertex will be visited once O(V+E) (root apps
+ vertex to leaf).
Only k nodes will trigger a revisit of a subset of vertices (k * O(E)).
Complexity now becomes O(V+(k+1)*E) = O(V+E)
Limitations:
If an app(current) depends only on non-existing apps, then
current app will not be properly ordered. It will not be present in
the ordered list before other apps based on it.
If an app(current) depends only on non platform managed apps, then
current app will not be properly ordered. It will not be present in
the ordered list before other apps based on it.
:param: apps_metadata_dict dictionary containing parsed and processed
metadata collection
:return: Sorted list containing the app reapply order.
"""
# Apps directly after current
after_apps = {}
# Remember the maximum depth
chain_depth = {}
# Used to detect cycles
cycle_depth = {}
# Used for second traversal when populating ordered list
traverse_depth = {}
# Final result
ordered_apps = []
apps_metadata_dict[constants.APP_METADATA_ORDERED_APPS] = ordered_apps
# Initialize structures
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
after_apps[app_name] = []
chain_depth[app_name] = 0
cycle_depth[app_name] = 0
traverse_depth[app_name] = 0
# For each app remember which apps are directly after
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
app_metadata = apps_metadata_dict[constants.APP_METADATA_APPS][app_name]
metadata_after = app_metadata.get(constants.APP_METADATA_BEHAVIOR, None)
if metadata_after is not None:
metadata_after = metadata_after.get(constants.APP_METADATA_EVALUATE_REAPPLY, None)
if metadata_after is not None:
metadata_after = metadata_after.get(constants.APP_METADATA_AFTER, None)
if metadata_after is not None:
for before_app in metadata_after:
# This one may be a non-existing app, need to initialize
if after_apps.get(before_app, None) is None:
after_apps[before_app] = []
# Store information
after_apps[before_app].append(app_name)
# Remember that current app is before at least one
chain_depth[app_name] = 1
traverse_depth[app_name] = 1
# Identify root apps
root_apps = []
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
if chain_depth.get(app_name, None) == 0:
root_apps.append(app_name)
# Used for cycle detection
stack_ = queue.LifoQueue()
cycle_checked = {}
max_depth = len(apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS])
# Detect cycles and abort
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
# Skip already checked app
if cycle_checked.get(app_name, False) is True:
continue
# Start from this
stack_.put(app_name)
# Reinitialize temporary visited
visited = {}
# Traverse DFS to detect cycles
while not stack_.empty():
app_name = stack_.get_nowait()
visited[app_name] = True
# Skip already checked app
if cycle_checked.get(app_name, False) is True:
continue
for after in after_apps[app_name]:
cycle_depth[after] = max(cycle_depth[app_name] + 1, cycle_depth[after])
# Detected cycle
if cycle_depth[after] > max_depth:
return ordered_apps
stack_.put(after)
# Remember the temporary visited apps to skip them in the future
for r in visited.keys():
cycle_checked[r] = True
# Used for traversal
queue_ = queue.Queue()
# Compute the longest dependency chain starting from root apps
for app_name in root_apps:
queue_.put(app_name)
# Traverse similar to BFS to compute the longest dependency chain
while not queue_.empty():
app_name = queue_.get_nowait()
for after in after_apps[app_name]:
chain_depth[after] = max(chain_depth[app_name] + 1, chain_depth[after])
queue_.put(after)
# Traverse graph again similar to BFS
# Add to ordered list when the correct chain depth is reached
found = {}
for app_name in root_apps:
queue_.put(app_name)
found[app_name] = True
ordered_apps.append(app_name)
while not queue_.empty():
app_name = queue_.get_nowait()
for after in after_apps[app_name]:
traverse_depth[after] = max(traverse_depth[app_name] + 1, traverse_depth[after])
# This is the correct depth, add to ordered list
if traverse_depth[after] == chain_depth[after]:
# Skip if already added
if found.get(after, False) is True:
continue
found[after] = True
ordered_apps.append(after)
queue_.put(after)
# Add apps that have dependencies on non-existing apps
for app_name in apps_metadata_dict[constants.APP_METADATA_PLATFORM_MANAGED_APPS]:
if found.get(app_name, False) is True:
continue
ordered_apps.append(app_name)
LOG.info("Applications reapply order: {}".format(ordered_apps))
apps_metadata_dict[constants.APP_METADATA_ORDERED_APPS] = ordered_apps
@staticmethod
@cutils.synchronized(LOCK_NAME_PROCESS_APP_METADATA, external=False)
def update_and_process_app_metadata(apps_metadata_dict, app_name, metadata, overwrite=True):
""" Update the cached metadata for an app
:param apps_metadata_dict: The dictionary being the cache
:param app_name: Name of the app
:param metadata: Metadata that will replace the old one
:param overwrite: If metadata is already present in the cache for this app,
then overwrite needs to be enabled to do the replacement
"""
if not overwrite and \
app_name in apps_metadata_dict[constants.APP_METADATA_APPS]:
LOG.info("Updating metadata for app {} skipped because metadata "
"is present and overwrite is not enabled"
"".format(app_name))
return
apps_metadata_dict[constants.APP_METADATA_APPS][app_name] = metadata
LOG.info("Loaded metadata for app {}: {}".format(app_name, metadata))
behavior = metadata.get(constants.APP_METADATA_BEHAVIOR, None)
if behavior is not None:
is_managed = behavior.get(constants.APP_METADATA_PLATFORM_MANAGED_APP, None)
desired_state = behavior.get(constants.APP_METADATA_DESIRED_STATE, None)
# Remember if the app wants to be managed by the platform
if cutils.is_valid_boolstr(is_managed):
apps_metadata_dict[
constants.APP_METADATA_PLATFORM_MANAGED_APPS][app_name] = None
LOG.info("App {} requested to be platform managed"
"".format(app_name))
# Recompute app reapply order
AppOperator.recompute_app_evaluation_order(apps_metadata_dict)
# Remember the desired state the app should achieve
if desired_state is not None:
apps_metadata_dict[
constants.APP_METADATA_DESIRED_STATES][app_name] = desired_state
LOG.info("App {} requested to achieve {} state"
"".format(app_name, desired_state))
def load_application_metadata_from_database(self, rpc_app):
""" Load the application metadata from the database
:param rpc_app: KubeApp model object
"""
LOG.info("Loading application metadata for {} from database"
"".format(rpc_app.name))
app = AppOperator.Application(rpc_app)
metadata = {}
# Load metadata as a dictionary from a column in the database
db_app = self._dbapi.kube_app_get(app.name)
if db_app.app_metadata:
metadata = db_app.app_metadata or {}
AppOperator.update_and_process_app_metadata(self._apps_metadata,
app.name,
metadata)
def load_application_metadata_from_file(self, rpc_app):
""" Load the application metadata from the metadata file of the app
:param rpc_app: data object provided in the rpc request
"""
LOG.info("Loading application metadata for {} from file"
"".format(rpc_app.name))
app = AppOperator.Application(rpc_app)
metadata = {}
if os.path.exists(app.sync_metadata_file):
with io.open(app.sync_metadata_file, 'r', encoding='utf-8') as f:
# The RoundTripLoader removes the superfluous quotes by default.
# Set preserve_quotes=True to preserve all the quotes.
# The assumption here: there is just one yaml section
metadata = yaml.load(
f, Loader=yaml.RoundTripLoader, preserve_quotes=True) or {}
AppOperator.update_and_process_app_metadata(self._apps_metadata,
app.name,
metadata)
# Save metadata as a dictionary in a column in the database
rpc_app.app_metadata = metadata
rpc_app.save()
def perform_app_apply(self, rpc_app, mode, lifecycle_hook_info_app_apply, caller=None):
"""Process application install request
This method processes node labels per configuration and invokes
Armada to apply the application manifest.
For OpenStack app (system app), the method generates combined
overrides (a merge between system and user overrides if available)
for the charts that comprise the app before downloading docker images
and applying the manifest.
Usage: the method can be invoked at initial install or after the
user has either made some manual configuration changes or
or applied (new) user overrides to some Helm chart(s) to
correct/update a previous manifest apply.
:param rpc_app: application object in the RPC request
:param mode: mode to control how to apply application manifest
:param lifecycle_hook_info_app_apply: LifecycleHookInfo object
:param caller: internal caller, None if it is an RPC call,
otherwise apply is invoked from update method
:return boolean: whether application apply was successful
"""
app = AppOperator.Application(rpc_app)
# If apply is called from update method, the app's abort status has
# already been registered.
if not caller:
self._register_app_abort(app.name)
self._raise_app_alarm(app.name, constants.APP_APPLY_IN_PROGRESS,
fm_constants.FM_ALARM_ID_APPLICATION_APPLYING,
fm_constants.FM_ALARM_SEVERITY_WARNING,
_("Application Apply In Progress"),
fm_constants.FM_ALARM_TYPE_0,
_("No action required."),
True)
self.clear_reapply(app.name)
LOG.info("Application %s (%s) apply started." % (app.name, app.version))
overrides_str = ''
ready = True
try:
app.charts = self._get_list_of_charts(app.sync_armada_mfile)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
# Perform app resources actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
# Perform rbd actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_GENERATE_OVERRIDES)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
LOG.info("Generating application overrides...")
self._helm.generate_helm_application_overrides(
app.sync_overrides_dir, app.name, mode, cnamespace=None,
armada_format=True, armada_chart_info=app.charts, combined=True)
(helm_files, armada_files) = self._get_overrides_files(
app.sync_overrides_dir, app.charts, app.name, mode)
if helm_files or armada_files:
LOG.info("Application overrides generated.")
overrides_str = self._generate_armada_overrides_str(
app.name, app.version, helm_files, armada_files)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_DOWNLOAD_IMAGES)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
self.download_images(app)
else:
ready = False
except Exception as e:
LOG.exception(e)
if AppOperator.is_app_aborted(app.name):
self._abort_operation(app, constants.APP_APPLY_OP,
user_initiated=True)
else:
self._abort_operation(app, constants.APP_APPLY_OP,
constants.APP_PROGRESS_ABORTED)
if not caller:
# If apply is not called from update method, deregister the app's
# abort status. Otherwise, it will be done in the update method.
self._deregister_app_abort(app.name)
if isinstance(e, exception.KubeAppApplyFailure):
# ex:Image download failure
raise
else:
# ex:K8s resource creation failure, user abort
raise exception.KubeAppApplyFailure(
name=app.name, version=app.version, reason=e)
try:
if ready:
# Perform pre apply manifest actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_MANIFEST
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_APPLY_MANIFEST)
if AppOperator.is_app_aborted(app.name):
raise exception.KubeAppAbort()
if self._make_armada_request_with_monitor(app,
constants.APP_APPLY_OP,
overrides_str):
self._update_app_releases_version(app.name)
self._update_app_status(app,
constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_COMPLETED)
app.update_active(True)
if not caller:
self._clear_app_alarm(app.name)
LOG.info("Application %s (%s) apply completed." % (app.name, app.version))
# Perform post apply manifest actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_MANIFEST
lifecycle_hook_info_app_apply[LifecycleConstants.EXTRA][LifecycleConstants.MANIFEST_APPLIED] = True
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
return True
except Exception as e:
# ex: update release version failure, user abort
LOG.exception(e)
# Perform post apply manifest actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_MANIFEST
lifecycle_hook_info_app_apply[LifecycleConstants.EXTRA][LifecycleConstants.MANIFEST_APPLIED] = False
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
# Perform rbd actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
# Perform app resources actions
lifecycle_hook_info_app_apply.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_apply.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_apply)
# If it gets here, something went wrong
if AppOperator.is_app_aborted(app.name):
self._abort_operation(app, constants.APP_APPLY_OP, user_initiated=True)
else:
self._abort_operation(app, constants.APP_APPLY_OP)
if not caller:
# If apply is not called from update method, deregister the app's abort status.
# Otherwise, it will be done in the update method.
self._deregister_app_abort(app.name)
return False
def perform_app_update(self, from_rpc_app, to_rpc_app, tarfile,
operation, lifecycle_hook_info_app_update, reuse_user_overrides=None):
"""Process application update request
This method leverages the existing application upload workflow to
validate/upload the new application tarfile, then invokes Armada
apply or rollback to update application from an applied version
to the new version. If any failure happens during updating, the
recover action will be triggered to recover the application to
the old version.
After apply/rollback to the new version is done, the files for the
old application version will be cleaned up as well as the releases
which are not in the new application version.
The app status will be populated to "applied" once update is completed
so that user can continue applying app with user overrides.
Usage ex: the method can be used to update from v1 to v2 and also
update back from v2 to v1
:param from_rpc_app: application object in the RPC request that
application updating from
:param to_rpc_app: application object in the RPC request that
application updating to
:param tarfile: location of application tarfile
:param operation: apply or rollback
:param lifecycle_hook_info_app_update: LifecycleHookInfo object
:param reuse_user_overrides: (optional) True or False
"""
from_app = AppOperator.Application(from_rpc_app)
to_app = AppOperator.Application(to_rpc_app)
self._register_app_abort(to_app.name)
self._raise_app_alarm(to_app.name, constants.APP_UPDATE_IN_PROGRESS,
fm_constants.FM_ALARM_ID_APPLICATION_UPDATING,
fm_constants.FM_ALARM_SEVERITY_WARNING,
_("Application Update In Progress"),
fm_constants.FM_ALARM_TYPE_0,
_("No action required."),
True)
LOG.info("Start updating Application %s from version %s to version %s ..."
% (to_app.name, from_app.version, to_app.version))
try:
# Upload new app tarball. The upload will enable the new plugins to
# generate overrides for images. Disable the plugins for the current
# application as the new plugin module will have the same name. Only
# one version of the module can be enabled at any given moment
self._plugins.deactivate_plugins(from_app)
# Note: this will not trigger the upload hooks present in conductor/manager:perform_app_upload
# Note: here we lose the information that this is an upload triggered by an update
# TODO(dvoicule): we may want to also trigger the upload hooks
# TODO(dvoicule): we may want to track the fact that this is called during an update
lifecycle_hook_info_app_update.operation = constants.APP_UPLOAD_OP
to_app = self.perform_app_upload(to_rpc_app, tarfile,
lifecycle_hook_info_app_upload=lifecycle_hook_info_app_update)
lifecycle_hook_info_app_update.operation = constants.APP_UPDATE_OP
# Semantic checking for N+1 app
try:
lifecycle_hook_info = copy.deepcopy(lifecycle_hook_info_app_update)
lifecycle_hook_info.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info.lifecycle_type = constants.APP_LIFECYCLE_TYPE_SEMANTIC_CHECK
lifecycle_hook_info[LifecycleConstants.EXTRA][LifecycleConstants.TO_APP] = True
self.app_lifecycle_actions(None, None, to_rpc_app, lifecycle_hook_info)
except exception.LifecycleSemanticCheckException as e:
LOG.info("App {} rejected operation {} for reason: {}"
"".format(to_app.name, constants.APP_UPDATE_OP, str(e)))
# lifecycle hooks not used in perform_app_recover
return self._perform_app_recover(from_app, to_app,
armada_process_required=False)
except Exception as e:
LOG.error("App {} operation {} semantic check error: {}"
"".format(to_app.name, constants.APP_UPDATE_OP, str(e)))
# lifecycle hooks not used in perform_app_recover
return self._perform_app_recover(from_app, to_app,
armada_process_required=False)
self.load_application_metadata_from_file(to_rpc_app)
# Check whether the new application is compatible with the current k8s version
self._utils._check_app_compatibility(to_app.name, to_app.version)
self._update_app_status(to_app, constants.APP_UPDATE_IN_PROGRESS)
# Get the skip_recovery flag from app metadata
keys = [constants.APP_METADATA_UPGRADES,
constants.APP_METADATA_UPDATE_FAILURE_SKIP_RECOVERY]
skip_recovery = bool(strtobool(str(self._get_metadata_value(to_app, keys, False))))
result = False
if operation == constants.APP_APPLY_OP:
reuse_overrides = \
self._get_metadata_value(to_app,
constants.APP_METADATA_MAINTAIN_USER_OVERRIDES,
False)
if reuse_user_overrides is not None:
reuse_overrides = reuse_user_overrides
# Preserve user overrides for the new app
if reuse_overrides:
self._preserve_user_overrides(from_app, to_app)
# The app_apply will generate new versioned overrides for the
# app upgrade and will enable the new plugins for that version.
# Note: this will not trigger the apply hooks present in conductor/manager:perform_app_apply
# Note: here we lose the information that this is an apply triggered by an update
# TODO(dvoicule): we may want to also trigger the apply hooks
# TODO(dvoicule): we may want to track the fact that this is called during an update
lifecycle_hook_info_app_update.operation = constants.APP_APPLY_OP
result = self.perform_app_apply(to_rpc_app, mode=None,
lifecycle_hook_info_app_apply=lifecycle_hook_info_app_update,
caller='update')
lifecycle_hook_info_app_update.operation = constants.APP_UPDATE_OP
elif operation == constants.APP_ROLLBACK_OP:
# The app_rollback will use the previous helm releases known to
# the k8s cluster. Overrides are not generated from any plugins
# in the case. Make sure that the enabled plugins correspond to
# the version expected to be activated
self._plugins.activate_plugins(to_app)
# lifecycle hooks not used in perform_app_rollback
result = self._perform_app_rollback(from_app, to_app)
operation_successful = result
# If operation failed consider doing the app recovery
do_recovery = not operation_successful
# Here the app operation failed (do_recovery is True)
# but skip_recovery requested.
if skip_recovery and do_recovery:
LOG.info("Application %s (%s) has configured skip_recovery %s"
", recovery skipped.",
to_app.name, to_app.version, skip_recovery)
do_recovery = False
# If recovery is requested stop the flow of execution here
if do_recovery:
LOG.error("Application %s update from version %s to version "
"%s aborted." % (to_app.name, from_app.version, to_app.version))
# lifecycle hooks not used in perform_app_recover
return self._perform_app_recover(from_app, to_app)
self._update_app_status(to_app, constants.APP_UPDATE_IN_PROGRESS,
"cleanup application version {}".format(from_app.version))
# App apply/rollback succeeded or it failed but skip_recovery was set
# Starting cleanup old application
from_app.charts = self._get_list_of_charts(from_app.sync_armada_mfile)
to_app_charts = [c.release for c in to_app.charts]
deployed_releases = helm_utils.retrieve_helm_releases()
for from_chart in from_app.charts:
if (from_chart.release not in to_app_charts and
from_chart.release in deployed_releases):
# Cleanup the releases in the old application version
# but are not in the new application version
helm_utils.delete_helm_release(from_chart.release)
LOG.info("Helm release %s for Application %s (%s) deleted"
% (from_chart.release, from_app.name, from_app.version))
self._cleanup(from_app, app_dir=False)
self._utils._patch_report_app_dependencies(
from_app.name + '-' + from_app.version)
# The initial operation for to_app is successful
if operation_successful:
self._update_app_status(
to_app, constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_UPDATE_COMPLETED.format(
from_app.version, to_app.version))
LOG.info("Application %s update from version %s to version "
"%s completed." % (to_app.name, from_app.version, to_app.version))
# The initial operation for to_app failed
# This is reached here only when skip_recovery is requested
# Need to inform the user
else:
message = \
constants.APP_PROGRESS_UPDATE_FAILED_SKIP_RECOVERY.format(
to_app.name, from_app.version, to_app.version)
self._update_app_status(
to_app, constants.APP_APPLY_FAILURE, message)
LOG.info(message)
except (exception.IncompatibleKubeVersion,
exception.KubeAppUploadFailure,
exception.KubeAppApplyFailure,
exception.KubeAppAbort) as e:
# Error occurs during app uploading or applying but before
# armada apply process...
# ie.images download/k8s resource creation failure
# Start recovering without trigger armada process
LOG.exception(e)
# lifecycle hooks not used in perform_app_recover
return self._perform_app_recover(from_app, to_app,
armada_process_required=False)
except Exception as e:
# Application update successfully(armada apply/rollback)
# Error occurs during cleanup old app
# ie. delete app files failure, patch controller failure,
# helm release delete failure
self._update_app_status(
to_app, constants.APP_APPLY_SUCCESS,
constants.APP_PROGRESS_UPDATE_COMPLETED.format(from_app.version, to_app.version) +
constants.APP_PROGRESS_CLEANUP_FAILED.format(from_app.version) +
'please check logs for detail.')
LOG.exception(e)
finally:
self._deregister_app_abort(to_app.name)
self._clear_app_alarm(to_app.name)
return True
def perform_app_remove(self, rpc_app, lifecycle_hook_info_app_remove):
"""Process application remove request
This method invokes Armada to delete the application manifest.
For system app, it also cleans up old test pods.
:param rpc_app: application object in the RPC request
:param lifecycle_hook_info_app_remove: LifecycleHookInfo object
:return boolean: whether application remove was successful
"""
app = AppOperator.Application(rpc_app)
self._register_app_abort(app.name)
self.clear_reapply(app.name)
LOG.info("Application (%s) remove started." % app.name)
rc = True
app.charts = self._get_list_of_charts(app.sync_armada_mfile)
app.update_active(False)
self._update_app_status(
app, new_progress=constants.APP_PROGRESS_DELETE_MANIFEST)
if self._make_armada_request_with_monitor(app, constants.APP_DELETE_OP):
# After armada delete, the data for the releases are purged from
# tiller/etcd, the releases info for the active app stored in sysinv
# db should be set back to 0 and the inactive apps require to be
# destroyed too.
db_app = self._dbapi.kube_app_get(app.name)
app_releases = self._dbapi.kube_app_chart_release_get_all(db_app.id)
for r in app_releases:
if r.version != 0:
self._dbapi.kube_app_chart_release_update(
db_app.id, r.release, r.namespace, {'version': 0})
if self._dbapi.kube_app_get_inactive(app.name):
self._dbapi.kube_app_destroy(app.name, inactive=True)
try:
# Perform rbd actions
lifecycle_hook_info_app_remove.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_remove.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_remove)
# Perform app resources actions
lifecycle_hook_info_app_remove.relative_timing = constants.APP_LIFECYCLE_TIMING_POST
lifecycle_hook_info_app_remove.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_remove)
except Exception as e:
self._abort_operation(app, constants.APP_REMOVE_OP)
LOG.exception(e)
self._deregister_app_abort(app.name)
return False
self._update_app_status(app, constants.APP_UPLOAD_SUCCESS,
constants.APP_PROGRESS_COMPLETED)
# In case there is an existing alarm for previous remove failure
self._clear_app_alarm(app.name)
LOG.info("Application (%s) remove completed." % app.name)
else:
if AppOperator.is_app_aborted(app.name):
self._abort_operation(app, constants.APP_REMOVE_OP,
user_initiated=True)
else:
self._abort_operation(app, constants.APP_REMOVE_OP)
rc = False
self._deregister_app_abort(app.name)
return rc
def activate(self, rpc_app):
app = AppOperator.Application(rpc_app)
with self._lock:
return app.update_active(True)
def deactivate(self, rpc_app):
app = AppOperator.Application(rpc_app)
with self._lock:
return app.update_active(False)
def get_appname(self, rpc_app):
app = AppOperator.Application(rpc_app)
return app.name
def is_app_active(self, rpc_app):
app = AppOperator.Application(rpc_app)
return app.active
def perform_app_abort(self, rpc_app, lifecycle_hook_info_app_abort):
"""Process application abort request
This method retrieves the latest application status from the
database and sets the abort flag if the apply/update/remove
operation is still in progress. The corresponding app processing
thread will check the flag and abort the operation in the very
next opportunity. The method also stops the Armada service and
clears locks in case the app processing thread has made a
request to Armada.
:param rpc_app: application object in the RPC request
:param lifecycle_hook_info_app_abort: LifecycleHookInfo object
"""
app = AppOperator.Application(rpc_app)
# Retrieve the latest app status from the database
db_app = self._dbapi.kube_app_get(app.name)
if db_app.status in [constants.APP_APPLY_IN_PROGRESS,
constants.APP_UPDATE_IN_PROGRESS,
constants.APP_REMOVE_IN_PROGRESS]:
# Turn on the abort flag so the processing thread that is
# in progress can bail out in the next opportunity.
self._set_abort_flag(app.name)
# Stop the Armada request in case it has reached this far and
# remove locks.
# TODO(jgauld): Need to correct lock mechanism, something is no
# longer working for application aborts. The lock lingers around,
# and only automatically get cleaned up after a long period.
# Subsequent reapply fails since it we cannot get lock.
with self._lock:
self._armada.stop_armada_request()
self._armada.clear_armada_locks()
else:
# Either the previous operation has completed or already failed
LOG.info("Abort request ignored. The previous operation for app %s "
"has either completed or failed." % app.name)
def perform_app_delete(self, rpc_app, lifecycle_hook_info_app_delete):
"""Process application remove request
This method removes the application entry from the database and
performs cleanup which entails removing node labels where applicable
and purge all application files from the system.
:param rpc_app: application object in the RPC request
:param lifecycle_hook_info_app_delete: LifecycleHookInfo object
"""
app = AppOperator.Application(rpc_app)
try:
# Perform rbd actions
lifecycle_hook_info_app_delete.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_delete.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RBD
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_delete)
# Perform app resources actions
lifecycle_hook_info_app_delete.relative_timing = constants.APP_LIFECYCLE_TIMING_PRE
lifecycle_hook_info_app_delete.lifecycle_type = constants.APP_LIFECYCLE_TYPE_RESOURCE
self.app_lifecycle_actions(None, None, rpc_app, lifecycle_hook_info_app_delete)
self._plugins.deactivate_plugins(app)
self._dbapi.kube_app_destroy(app.name)
self._cleanup(app)
self._utils._patch_report_app_dependencies(app.name + '-' + app.version)
# One last check of app alarm, should be no-op unless the
# user deletes the application following an upload failure.
self._clear_app_alarm(app.name)
LOG.info("Application (%s) has been purged from the system." %
app.name)
msg = None
except Exception as e:
# Possible exceptions are KubeAppDeleteFailure,
# OSError and unexpectedly KubeAppNotFound
LOG.exception(e)
msg = str(e)
return msg
class Application(object):
""" Data object to encapsulate all data required to
support application related operations.
"""
def __init__(self, rpc_app):
self._kube_app = rpc_app
self.tarfile = None
self.downloaded_tarfile = False
# Directories: Installation specific, local to a controller. Not
# synced
self.inst_path = os.path.join(constants.APP_INSTALL_PATH,
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.inst_charts_dir = os.path.join(self.inst_path, 'charts')
self.inst_images_dir = os.path.join(self.inst_path, 'images')
self.inst_plugins_dir = os.path.join(self.inst_path, 'plugins')
# Files: Installation specific, local to a controller. Not synced
self.inst_armada_mfile = generate_install_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
# Directories: DRBD Synced between controllers
self.sync_overrides_dir = generate_synced_helm_overrides_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_plugins_dir = generate_synced_app_plugins_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_armada_mfile_dir = cutils.generate_synced_armada_dir(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
# Files: DRBD synced between controllers
self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
self.sync_imgfile = generate_synced_images_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
self.sync_metadata_file = cutils.generate_synced_metadata_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'))
# Files: FQPN formatted for the docker armada_service
self.armada_service_mfile = generate_armada_service_manifest_fqpn(
self._kube_app.get('name'),
self._kube_app.get('app_version'),
self._kube_app.get('manifest_file'))
self.patch_dependencies = []
self.charts = []
self.releases = []
@property
def system_app(self):
if (os.path.exists(self.sync_plugins_dir) and
os.listdir(self.sync_plugins_dir)):
return True
return False
@property
def name(self):
return self._kube_app.get('name')
@property
def version(self):
return self._kube_app.get('app_version')
@property
def status(self):
return self._kube_app.get('status')
@property
def progress(self):
return self._kube_app.get('progress')
@property
def active(self):
return self._kube_app.get('active')
@property
def recovery_attempts(self):
return self._kube_app.get('recovery_attempts')
@property
def mode(self):
return self._kube_app.get('mode')
@property
def app_metadata(self):
return self._kube_app.get('app_metadata')
def update_app_metadata(self, new_metadata):
if self.app_metadata != new_metadata:
self._kube_app.app_metadata = new_metadata
self._kube_app.save()
def update_status(self, new_status, new_progress):
self._kube_app.status = new_status
if new_progress:
self._kube_app.progress = new_progress
self._kube_app.save()
def update_active(self, active):
was_active = self.active
if active != self.active:
self._kube_app.active = active
self._kube_app.save()
return was_active
def regenerate_manifest_filename(self, new_mname, new_mfile):
self._kube_app.manifest_name = new_mname
self._kube_app.manifest_file = new_mfile
self.armada_service_mfile = generate_armada_service_manifest_fqpn(
self.name, self.version, new_mfile)
self.sync_armada_mfile = cutils.generate_synced_armada_manifest_fqpn(
self.name, self.version, new_mfile)
self.inst_armada_mfile = generate_install_manifest_fqpn(
self.name, self.version, new_mfile)
def regenerate_application_info(self, new_name, new_version, new_patch_dependencies):
self._kube_app.name = new_name
self._kube_app.app_version = new_version
new_armada_dir = cutils.generate_synced_armada_dir(
self.name, self.version)
shutil.move(self.sync_armada_mfile_dir, new_armada_dir)
shutil.rmtree(os.path.dirname(self.sync_armada_mfile_dir))
self.sync_armada_mfile_dir = new_armada_dir
new_path = os.path.join(
constants.APP_INSTALL_PATH, self.name, self.version)
shutil.move(self.inst_path, new_path)
shutil.rmtree(os.path.dirname(self.inst_path))
self.inst_path = new_path
self.inst_charts_dir = os.path.join(self.inst_path, 'charts')
self.inst_images_dir = os.path.join(self.inst_path, 'images')
self.sync_imgfile = generate_synced_images_fqpn(self.name, self.version)
self.sync_overrides_dir = generate_synced_helm_overrides_dir(self.name, self.version)
self.patch_dependencies = new_patch_dependencies
self.inst_plugins_dir = os.path.join(self.inst_path, 'plugins')
self.sync_plugins_dir = generate_synced_app_plugins_dir(new_name, new_version)
class DockerHelper(object):
""" Utility class to encapsulate Docker related operations """
def __init__(self, dbapi):
self._dbapi = dbapi
def _parse_barbican_secret(self, secret_ref):
"""Get the registry credentials from the
barbican secret payload
The format of the credentials stored in
barbican secret:
username:xxx password:xxx
:param secret_ref: barbican secret ref/uuid
:return: dict of registry credentials
"""
operator = openstack.OpenStackOperator(self._dbapi)
payload = operator.get_barbican_secret_payload(secret_ref)
if not payload:
raise exception.SysinvException(_(
"Unable to get the payload of Barbican secret "
"%s" % secret_ref))
try:
username, password = payload.split()
username = username.split('username:')[1]
password = password.split('password:')[1]
return dict(username=username, password=password)
except Exception as e:
LOG.error("Unable to parse the secret payload, "
"unknown format of the registry secret: %s" % e)
raise exception.SysinvException(_(
"Unable to parse the secret payload"))
def retrieve_specified_registries(self):
registries_info = \
copy.deepcopy(constants.DEFAULT_REGISTRIES_INFO)
registries_url = {}
registries_type = {}
registries_auth = {}
registries_overrides = {}
registries = self._dbapi.service_parameter_get_all(
service=constants.SERVICE_TYPE_DOCKER)
for r in registries:
if r.name == constants.SERVICE_PARAM_NAME_DOCKER_URL:
registries_url.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_TYPE:
registries_type.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_AUTH_SECRET:
registries_auth.update({r.section: str(r.value)})
elif r.name == constants.SERVICE_PARAM_NAME_DOCKER_ADDITIONAL_OVERRIDES:
registries_overrides.update({r.section: str(r.value)})
if not registries_url:
# return directly if no user specified registries
return registries_info
for section, url in registries_url.items():
try:
registries_info[section]['registry_replaced'] = str(url)
if section in registries_overrides:
registries_info[section]['registry_default'] = \
registries_overrides[section]
if section in registries_auth:
secret_ref = registries_auth[section]
if secret_ref != 'None':
# If user specified registry requires the
# authentication, get the registry auth
# from barbican secret
auth = self._parse_barbican_secret(secret_ref)
if (section in registries_type and
registries_type[section] == constants.DOCKER_REGISTRY_TYPE_AWS_ECR):
auth = cutils.get_aws_ecr_registry_credentials(
self._dbapi, url, auth['username'], auth['password'])
registries_info[section]['registry_auth'] = auth
except exception.SysinvException:
raise exception.SysinvException(_(
"Unable to get the credentials to access "
"registry %s" % url))
except KeyError:
# Unexpected
pass
return registries_info
def _get_img_tag_with_registry(self, pub_img_tag, registries_info):
"""Regenerate public image tag with user specified registries
An example of passed public image reference:
docker.io/starlingx/stx-keystone:latest
"""
if registries_info == constants.DEFAULT_REGISTRIES_INFO:
# return if no user specified registries
return pub_img_tag, None
for registry_info in registries_info.values():
registry_auth = registry_info['registry_auth']
if pub_img_tag.startswith(registry_info['registry_default']):
registry = registry_info['registry_replaced']
if registry:
img_name = pub_img_tag.split(
registry_info['registry_default'])[1]
return registry + img_name, registry_auth
return pub_img_tag, registry_auth
elif pub_img_tag.startswith(registry_info['registry_replaced']):
return pub_img_tag, registry_auth
# In case the image is overridden via "system helm-override-update"
# with a custom registry that is not from any of the known registries
# (ie..k8s.gcr.io, gcr.io, quay.io, docker.io. docker.elastic.co)
# , pull directly from the custom registry (Note: The custom registry
# must be unauthenticated in this case.)
return pub_img_tag, None
def download_an_image(self, app_name, registries_info, img_tag):
rc = True
start = time.time()
if img_tag.startswith(constants.DOCKER_REGISTRY_HOST):
try:
if AppOperator.is_app_aborted(app_name):
LOG.info("User aborted. Skipping download of image %s " % img_tag)
return img_tag, False
LOG.info("Image %s download started from local registry" % img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
local_registry_auth = cutils.get_local_docker_registry_auth()
auth = '{0}:{1}'.format(local_registry_auth['username'],
local_registry_auth['password'])
subprocess.check_call(["crictl", "pull", "--creds", auth, img_tag]) # pylint: disable=not-callable
except subprocess.CalledProcessError:
try:
# Pull the image from the public/private registry
LOG.info("Image %s is not available in local registry, "
"download started from public/private registry"
% img_tag)
pub_img_tag = img_tag.replace(
constants.DOCKER_REGISTRY_SERVER + "/", "")
target_img_tag, registry_auth = \
self._get_img_tag_with_registry(pub_img_tag, registries_info)
client.pull(target_img_tag, auth_config=registry_auth)
except Exception as e:
rc = False
LOG.error("Image %s download failed from public/private"
"registry: %s" % (img_tag, e))
return img_tag, rc
try:
# Tag and push the image to the local registry
client.tag(target_img_tag, img_tag)
# admin password may be changed by openstack client cmd in parallel.
# So we cannot cache auth info, need refresh it each time.
local_registry_auth = cutils.get_local_docker_registry_auth()
client.push(img_tag, auth_config=local_registry_auth)
except Exception as e:
rc = False
LOG.error("Image %s push failed to local registry: %s" % (img_tag, e))
return img_tag, rc
try:
# remove docker container image after it is pushed to local registry.
LOG.info("Remove image %s after push to local registry." % (target_img_tag))
client.remove_image(target_img_tag)
client.remove_image(img_tag)
except Exception as e:
LOG.warning("Image %s remove failed: %s" % (target_img_tag, e))
except Exception as e:
rc = False
LOG.error("Image %s download failed from local registry: %s" % (img_tag, e))
else:
try:
LOG.info("Image %s download started from public/private registry" % img_tag)
client = docker.APIClient(timeout=INSTALLATION_TIMEOUT)
target_img_tag, registry_auth = \
self._get_img_tag_with_registry(img_tag, registries_info)
client.pull(target_img_tag, auth_config=registry_auth)
client.tag(target_img_tag, img_tag)
except Exception as e:
rc = False
LOG.error("Image %s download failed from public/private registry: %s" % (img_tag, e))
elapsed_time = time.time() - start
if rc:
LOG.info("Image %s download succeeded in %d seconds" %
(img_tag, elapsed_time))
return img_tag, rc
class ArmadaHelper(object):
""" Armada class to encapsulate Armada related operations """
def __init__(self, kube):
self._kube = kube
self._lock = threading.Lock()
self.overrides_dir = common.HELM_OVERRIDES_PATH
self.manifests_dir = constants.APP_SYNCED_ARMADA_DATA_PATH
self.logs_dir = ARMADA_HOST_LOG_LOCATION
# Generate kubectl wrapped bash command that can run in
# a specific container of a namespaced pod.
def wrap_kubectl_bash(self, name, namespace, exec_command,
container=None):
kcmd = ['kubectl', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF,
'exec', '-n', namespace, name]
if container is not None:
kcmd.extend(['--container', container])
kcmd.extend(['--', '/bin/bash', '-c', exec_command])
return kcmd
# Wrapper for kubectl exec to run bash commands in a specific container
# of a namespaced pod.
# Returns command stdout and stderr, and stderr if kubectl command fails.
# This should be replaced with the core kubernetes client API
# connect_get_namespaced_pod_exec when that can be made to work properly
# with error handling, separate stdout, stderr, timeout, poll and flush
# of output streams, and wait for command completion.
def kube_exec_container_bash(self, name, namespace, exec_command,
container=None):
kcmd = self.wrap_kubectl_bash(name, namespace, exec_command,
container=container)
stdout, stderr = cutils.trycmd(*kcmd, discard_warnings=True,
run_as_root=False)
return stdout, stderr
# Wrapper for kubectl cp to a container. One of 'src' and 'dest' must
# be a remote file specification.
# Returns command stdout and stderr, and stderr if kubectl command fails.
# Limitation: kubectl cp command does not return an error when
# the source file does not exist.
# https://github.com/kubernetes/kubernetes/issues/78879
def kube_cp_container(self, namespace, src, dest, container=None):
kcmd = ['kubectl', '--kubeconfig', kubernetes.KUBERNETES_ADMIN_CONF,
'cp', '-n', namespace, src, dest]
if container is not None:
kcmd.extend(['--container', container])
stdout, stderr = cutils.trycmd(*kcmd, discard_warnings=True,
run_as_root=False)
return stdout, stderr
def copy_manifests_and_overrides_to_armada(self, armada_pod, mfile):
# NOTE: The armada pod may run on either controller.
# We do not want to mount host directories since DRBD
# /opt/platform is only visible on active controller.
# As a workaround, we can copy the required files into
# the armada container.
# Derive manifests and overrides directories for both
# source source and destination paths. We use well-known
# directories and a filename given the following format.
# /manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest-del.yaml
manifests_dest = '{}/{}'.format(ARMADA_CONTAINER_TMP, 'manifests')
overrides_dest = '{}/{}'.format(ARMADA_CONTAINER_TMP, 'overrides')
app_name = mfile.split('/', 3)[2]
# Create manifests and overrides directories in container
cmd = 'mkdir -v -p {}; mkdir -v -p {}'.\
format(manifests_dest, overrides_dest)
stdout, stderr = self.kube_exec_container_bash(
armada_pod, ARMADA_NAMESPACE, cmd, container=ARMADA_CONTAINER_NAME)
if stderr:
LOG.error("Failed to create manifests and overrides, error: %s",
stderr)
return False
# Copy manifests and overrides directories to container
# NOTE: kubectl cp command does not return an error when
# the source file does not exist.
# https://github.com/kubernetes/kubernetes/issues/78879
src_dest_dirs = \
[('{}/{}'.format(self.manifests_dir, app_name),
'{}:{}'.format(armada_pod, manifests_dest)),
('{}/{}'.format(self.overrides_dir, app_name),
'{}:{}'.format(armada_pod, overrides_dest))]
for src_dir, dest_dir in src_dest_dirs:
# If there are no overrides it's not a fatal error.
if (src_dir.startswith(self.overrides_dir) and
not os.path.exists(src_dir)):
LOG.info("%s doesn't exist, skipping it." % src_dir)
continue
LOG.info("Copy %s to %s ." % (src_dir, dest_dir))
stdout, stderr = self.kube_cp_container(
ARMADA_NAMESPACE, src_dir, dest_dir,
container=ARMADA_CONTAINER_NAME)
if stderr:
LOG.error("Failed to copy %s to %s, error: %s",
src_dir, dest_dir, stderr)
return False
return True
def check_pod_ready_probe(self, pod):
"""Pod is of the form returned by self._kube.kube_get_pods_by_selector.
Returns true if last probe shows the container is in 'Ready' state.
"""
conditions = list([x for x in pod.status.conditions if x.type == 'Ready'])
if not conditions:
return False
return conditions[0].status == 'True'
def _prefer_select_one_running_ready_pod(self, pods):
"""Find one running and ready pod.
Return found if one, otherwise first pod.
"""
for pod in pods:
if pod.status.phase == 'Running' and \
pod.metadata.deletion_timestamp is None and \
self.check_pod_ready_probe(pod):
return pod
return pods[0]
def clear_armada_locks(self):
lock_name = "{}.{}.{}".format(ARMADA_LOCK_PLURAL,
ARMADA_LOCK_GROUP,
ARMADA_LOCK_NAME)
try:
self._kube.delete_custom_resource(ARMADA_LOCK_GROUP,
ARMADA_LOCK_VERSION,
ARMADA_LOCK_NAMESPACE,
ARMADA_LOCK_PLURAL,
lock_name)
except Exception:
# Best effort delete
LOG.warning("Failed to clear Armada locks.")
pass
def _start_armada_service(self):
"""Armada pod is managed by Kubernetes / Helm.
This routine checks and waits for armada to be providing service.
"""
self.overrides_dir = common.HELM_OVERRIDES_PATH
self.manifests_dir = constants.APP_SYNCED_ARMADA_DATA_PATH
try:
# Create the armada log folder if it does not exists
if not os.path.exists(ARMADA_HOST_LOG_LOCATION):
os.mkdir(ARMADA_HOST_LOG_LOCATION)
os.chmod(ARMADA_HOST_LOG_LOCATION, 0o755)
os.chown(ARMADA_HOST_LOG_LOCATION, 1000,
grp.getgrnam("sys_protected").gr_gid)
if not os.path.exists(common.HELM_OVERRIDES_PATH):
os.makedirs(common.HELM_OVERRIDES_PATH, 0o755)
except OSError as oe:
LOG.error("Unable to create armada log folder : %s" % oe)
return False
# Wait for armada to be ready for cmd execution.
# NOTE: make_armada_requests() also has retry mechanism
TIMEOUT_DELTA = 5
TIMEOUT_SLEEP = 5
TIMEOUT_START_VALUE = 30
timeout = TIMEOUT_START_VALUE
while timeout > 0:
try:
pods = self._kube.kube_get_pods_by_selector(
ARMADA_NAMESPACE,
"application=%s" % ARMADA_APPLICATION, "")
if not pods:
raise RuntimeError('armada pod not found')
pod = self._prefer_select_one_running_ready_pod(pods)
if pod and pod.status.phase != 'Running':
# Delete the pod, it should restart if it can
if not self._kube.kube_delete_pod(pod.metadata.name,
ARMADA_NAMESPACE, grace_periods_seconds=0):
LOG.warning("Pod %s/%s deletion unsuccessful...",
ARMADA_NAMESPACE, pod.metadata.name)
if pod and pod.status.phase == 'Running' and \
self.check_pod_ready_probe(pod):
# Test that we can copy files into armada-api container
src = '/etc/build.info'
dest_dir = '{}:{}'.format(pod.metadata.name, '/tmp')
stdout, stderr = self.kube_cp_container(
ARMADA_NAMESPACE, src, dest_dir,
container=ARMADA_CONTAINER_NAME)
if stderr:
LOG.error("Failed to copy %s to %s, error: %s",
src, dest_dir, stderr)
raise RuntimeError('armada pod not ready')
break
except Exception as e:
LOG.info("Could not get Armada service : %s " % e)
time.sleep(TIMEOUT_SLEEP)
timeout -= TIMEOUT_DELTA
if timeout <= 0:
LOG.error("Failed to get Armada service after {seconds} seconds.".
format(seconds=TIMEOUT_START_VALUE))
return False
# We don't need to loop through the code that checks the pod's status
# again. Once the previous loop exits with pod 'Running' we can test
# the connectivity to the tiller postgres backend:
timeout = TIMEOUT_START_VALUE
while timeout > 0:
try:
_ = helm_utils.retrieve_helm_v2_releases()
break
except exception.HelmTillerFailure:
LOG.warn("Could not query Helm/Tiller releases")
time.sleep(TIMEOUT_SLEEP)
timeout -= TIMEOUT_DELTA
continue
except Exception as ex:
LOG.error("Unhandled exception : {error}".format(error=str(ex)))
return False
if timeout <= 0:
LOG.error("Failed to query Helm/Tiller for {seconds} seconds.".
format(seconds=TIMEOUT_START_VALUE))
return False
return True
def stop_armada_request(self):
"""A simple way to cancel an on-going manifest apply/rollback/delete
request. This logic will be revisited in the future.
"""
try:
pods = self._kube.kube_get_pods_by_selector(
ARMADA_NAMESPACE, "application=%s" % ARMADA_APPLICATION, "")
if not pods:
raise RuntimeError('armada pod not found')
for pod in pods:
if pod.status.phase == 'Running':
# Delete the pod, it should restart if it can
LOG.info("Stopping Armada service %s.", pod.metadata.name)
if not self._kube.kube_delete_pod(pod.metadata.name,
ARMADA_NAMESPACE,
grace_periods_seconds=0):
LOG.warning("Pod %s/%s deletion unsuccessful.",
ARMADA_NAMESPACE, pod.metadata.name)
except Exception as e:
LOG.error("Failed to stop Armada service : %s " % e)
def make_armada_request(self, request, manifest_file='', overrides_str='',
app_releases=None, logfile=None):
if logfile is None:
# Infer app name from the manifest file
# e.g., /tmp/manifests/oidc-auth-apps/1.0-0/oidc-auth-apps-manifest.yaml
app_name = manifest_file.split('/', 3)[2]
logname = time.strftime(app_name + '-' + request + '_%Y-%m-%d-%H-%M-%S.log')
logfile = ARMADA_HOST_LOG_LOCATION + '/' + logname
if app_releases is None:
app_releases = []
rc = True
# Configure additional armada options (e.g., such as --tiller-host),
# currently none are required.
tiller_host = " "
LOG.debug('make_armada_request: request=%s, '
'manifest_file=%s, overrides_str=%s, '
'app_releases=%r, logfile=%r',
request, manifest_file, overrides_str,
app_releases, logfile)
try:
# Ensure armada service is ready.
with self._lock:
ret = self._start_armada_service()
if ret:
# The armada pod name may change, get it each time
pods = self._kube.kube_get_pods_by_selector(
ARMADA_NAMESPACE, "application=%s" % ARMADA_APPLICATION,
"status.phase=Running")
if not pods:
raise RuntimeError('armada pod not found')
armada_pod = self._prefer_select_one_running_ready_pod(pods).metadata.name
if not self.copy_manifests_and_overrides_to_armada(armada_pod, manifest_file):
raise RuntimeError('could not access armada pod')
if request == 'validate':
cmd = ''.join(['armada validate ',
ARMADA_CONTAINER_TMP,
manifest_file])
LOG.info("Armada %s command: '%s'", request, cmd)
kcmd = self.wrap_kubectl_bash(
armada_pod, ARMADA_NAMESPACE, cmd,
container=ARMADA_CONTAINER_NAME)
p = subprocess.Popen(kcmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p.stdout, open(logfile, 'w') as log:
while p.poll() is None:
line = p.stdout.readline()
if line != b"":
log.write(line)
log.flush()
if p.returncode != 0:
rc = False
LOG.error("Failed to validate application manifest %s "
"with exit code %s. See %s for details." %
(manifest_file, p.returncode, logfile))
else:
LOG.info("Manifest file %s was successfully validated." %
manifest_file)
elif request == constants.APP_APPLY_OP:
cmd = ''.join(['armada apply --debug ',
'--enable-chart-cleanup ',
ARMADA_CONTAINER_TMP,
manifest_file,
overrides_str,
tiller_host])
LOG.info("Armada %s command: '%s'", request, cmd)
kcmd = self.wrap_kubectl_bash(
armada_pod, ARMADA_NAMESPACE, cmd,
container=ARMADA_CONTAINER_NAME)
p = subprocess.Popen(kcmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p.stdout, open(logfile, 'w') as log:
while p.poll() is None:
line = p.stdout.readline()
if line != b"":
LOG.debug('%s: %s', request, line)
log.write(line)
log.flush()
if p.returncode != 0:
rc = False
LOG.error("Failed to apply application manifest %s "
"with exit code %s. See %s for details." %
(manifest_file, p.returncode, logfile))
if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE:
self.clear_armada_locks()
else:
LOG.info("Application manifest %s was successfully "
"applied/re-applied." % manifest_file)
elif request == constants.APP_ROLLBACK_OP:
for app_release in app_releases:
release = app_release.get('release')
version = app_release.get('version')
sequenced = app_release.get('sequenced')
if sequenced:
cmd = ''.join(['armada rollback --debug ',
'--wait --timeout 1800 ',
'--release ' + release + ' ',
'--version ' + str(version),
tiller_host])
else:
cmd = ''.join(['armada rollback --debug ',
'--release ' + release + ' ',
'--version ' + str(version),
tiller_host])
LOG.info("Armada %s command: '%s'", request, cmd)
kcmd = self.wrap_kubectl_bash(
armada_pod, ARMADA_NAMESPACE, cmd,
container=ARMADA_CONTAINER_NAME)
p = subprocess.Popen(kcmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p.stdout, open(logfile, 'w') as log:
while p.poll() is None:
line = p.stdout.readline()
if line != "":
log.write(line)
log.flush()
if p.returncode != 0:
rc = False
LOG.error("Failed to rollback release %s "
"with exit code %s. See %s for details." %
(release, p.returncode, logfile))
if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE:
self.clear_armada_locks()
break
if rc:
LOG.info("Application releases %s were successfully "
"rolled back." % app_releases)
elif request == constants.APP_DELETE_OP:
# Since armada delete doesn't support --values overrides
# files, use the delete manifest generated from the
# ArmadaManifestOperator during overrides generation. It
# will contain an accurate view of what was applied
manifest_delete_file = "%s-del%s" % os.path.splitext(manifest_file)
cmd = ''.join(['armada delete --debug ',
'--manifest ',
ARMADA_CONTAINER_TMP,
manifest_delete_file,
tiller_host])
LOG.info("Armada %s command: '%s'", request, cmd)
kcmd = self.wrap_kubectl_bash(
armada_pod, ARMADA_NAMESPACE, cmd,
container=ARMADA_CONTAINER_NAME)
p = subprocess.Popen(kcmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p.stdout, open(logfile, 'w') as log:
while p.poll() is None:
line = p.stdout.readline()
if line != "":
log.write(line)
log.flush()
if p.returncode != 0:
rc = False
LOG.error("Failed to delete application manifest %s "
"with exit code %s. See %s for details." %
(manifest_file, p.returncode, logfile))
if p.returncode == CONTAINER_ABNORMAL_EXIT_CODE:
self.clear_armada_locks()
else:
LOG.info("Application charts were successfully "
"deleted with manifest %s." % manifest_delete_file)
else:
rc = False
LOG.error("Unsupported armada request: %s." % request)
else:
# Armada sevice failed to start/restart
rc = False
LOG.error("Armada service failed to start/restart")
except Exception as e:
rc = False
self.clear_armada_locks()
LOG.error("Armada request %s for manifest %s failed: %s " %
(request, manifest_file, e))
return rc
class AppImageParser(object):
"""Utility class to help find images for an application"""
TAG_LIST = ['tag', 'imageTag', 'imagetag']
def _find_images_in_dict(self, var_dict):
"""A generator to find image references in a nested dictionary.
Supported image formats in app:
1. images:
tags: <dict>
2. image: <str>
3. image:
repository: <str>
tag: <str>
4. image: <str>
imageTag(tag/imagetag): <str>
:param var_dict: dict
:return: a list of image references
"""
if isinstance(var_dict, dict):
for k, v in six.iteritems(var_dict):
if k == 'images':
try:
yield {k: {'tags': v['tags']}}
except (KeyError, TypeError):
pass
elif k == 'image':
try:
image = {}
keys = v.keys()
if 'repository' in keys:
image.update({'repository': v['repository']})
if 'tag' in keys:
image.update({'tag': v['tag']})
if image:
yield {k: image}
except (KeyError, TypeError, AttributeError):
if isinstance(v, str) or v is None:
yield {k: v}
elif k in self.TAG_LIST:
if isinstance(v, str) or v is None:
yield {k: v}
elif isinstance(v, dict):
for result in self._find_images_in_dict(v):
yield {k: result}
def find_images_in_dict(self, var_dict):
"""Find image references in a nested dictionary.
This function is used to find images from helm chart,
chart overrides file and armada manifest file.
:param var_dict: dict
:return: a dict of image references
"""
images_dict = {}
images = list(self._find_images_in_dict(var_dict))
for img in images:
images_dict = self.merge_dict(images_dict, img)
return images_dict
def merge_dict(self, source_dict, overrides_dict):
"""Recursively merge two nested dictionaries. The
'overrides_dict' is merged into 'source_dict'.
"""
for k, v in six.iteritems(overrides_dict):
if isinstance(v, dict):
source_dict[k] = self.merge_dict(
source_dict.get(k, {}), v)
else:
source_dict[k] = v
return source_dict
def update_images_with_local_registry(self, imgs_dict):
"""Update image references with local registry prefix.
:param imgs_dict: a dict of images
:return: a dict of images with local registry prefix
"""
if not isinstance(imgs_dict, dict):
raise exception.SysinvException(_(
"Unable to update images with local registry "
"prefix: %s is not a dict." % imgs_dict))
for k, v in six.iteritems(imgs_dict):
if v and isinstance(v, str):
if (not re.search(r'^.+:.+/', v) and
k not in self.TAG_LIST):
if not cutils.is_valid_domain_name(v[:v.find('/')]):
# Explicitly specify 'docker.io' in the image
v = '{}/{}'.format(
constants.DEFAULT_DOCKER_DOCKER_REGISTRY, v)
v = '{}/{}'.format(constants.DOCKER_REGISTRY_SERVER, v)
imgs_dict[k] = v
elif isinstance(v, dict):
self.update_images_with_local_registry(v)
return imgs_dict
def generate_download_images_list(self, download_imgs_dict, download_imgs_list):
"""Generate a list of images that is required to be downloaded.
"""
if not isinstance(download_imgs_dict, dict):
raise exception.SysinvException(_(
"Unable to generate download images list: %s "
"is not a dict." % download_imgs_dict))
for k, v in six.iteritems(download_imgs_dict):
if k == 'images':
try:
imgs = [_f for _f in v['tags'].values() if _f]
download_imgs_list.extend(imgs)
except (KeyError, TypeError):
pass
elif k == 'image':
try:
img = v['repository'] + ':' + v['tag']
except (KeyError, TypeError):
img = ''
if v and isinstance(v, str):
img = v
for t in self.TAG_LIST:
if t in download_imgs_dict and download_imgs_dict[t]:
img = img + ':' + download_imgs_dict[t]
break
if re.search(r'/.+:.+$', img):
download_imgs_list.append(img)
elif isinstance(v, dict):
self.generate_download_images_list(v, download_imgs_list)
return list(set(download_imgs_list))
class PluginHelper(object):
""" Utility class to help manage application plugin lifecycle """
# An enabled plugin will have a python path configuration file name with the
# following format: stx_app-platform-integ-apps-1.0-8.pth
PTH_PREFIX = 'stx_app-'
PTH_PATTERN = re.compile("{}/([\w-]+)/(\d+\.\d+-\d+.*)/plugins".format(
common.HELM_OVERRIDES_PATH))
def __init__(self, dbapi, helm_op):
self._dbapi = dbapi
self._helm_op = helm_op
self._system_path = self._get_python_system_path()
def _get_python_system_path(self):
path = None
try:
if six.PY2:
path = site.getsitepackages()[0]
else:
for p in site.getsitepackages():
if os.path.exists(p):
path = p
break
except AttributeError:
# Based on https://github.com/pypa/virtualenv/issues/737.
# site.getsitepackages() function is not available in a virtualenv.
# So use a tox friendly method when in a virtualenv
try:
from distutils.sysconfig import get_python_lib
path = get_python_lib()
except Exception as e:
raise exception.SysinvException(_(
"Failed to determine the python site packages path" % str(e)))
if not path:
raise exception.SysinvException(_(
"Failed to determine the python site packages path."))
return path
def _get_pth_fqpn(self, app):
return "{}/{}{}-{}.pth".format(
self._system_path, self.PTH_PREFIX, app.name, app.version)
def audit_plugins(self):
""" Verify that only enabled application plugins are discoverable """
pattern = '{}/{}*.pth'.format(self._system_path, self.PTH_PREFIX)
discoverable_pths = glob.glob(pattern)
LOG.debug("PluginHelper: Discoverable app plugins: %s" % discoverable_pths)
# Examine existing pth files to make sure they are still valid
for pth in discoverable_pths:
with open(pth, 'r') as f:
contents = f.readlines()
if len(contents) == 1:
LOG.debug("PluginHelper: Plugin Path: %s" % contents[0])
match = self.PTH_PATTERN.match(contents[0])
if match:
app = match.group(1)
ver = match.group(2)
try:
app_obj = self._dbapi.kube_app_get(app)
if app_obj.app_version == ver:
LOG.info("PluginHelper: App %s, version %s: Found "
"valid plugin" % (app, ver))
continue
else:
LOG.warning("PluginHelper: Stale plugin pth file "
"found %s: Wrong plugin version "
"enabled %s != %s." % (
pth, ver, app_obj.app_version))
except exception.KubeAppNotFound:
LOG.warning("PluginHelper: Stale plugin pth file found"
" %s: App is not active." % pth)
else:
LOG.warning("PluginHelper: Invalid pth file %s: Invalid "
"name or version." % pth)
else:
LOG.warning("PluginHelper: Invalid pth file %s: Only one path"
" is expected." % pth)
LOG.info("PluginHelper: Removing invalid plugin pth: %s" % pth)
os.remove(pth)
self.activate_apps_plugins()
def activate_apps_plugins(self):
# Examine existing applications in an applying/restoring state and make
# sure they are activated
apps = self._dbapi.kube_app_get_all()
for app in apps:
# If the app is in some form of apply/restore the the plugins
# should be enabled
if app.status in [constants.APP_APPLY_IN_PROGRESS,
constants.APP_APPLY_SUCCESS,
constants.APP_APPLY_FAILURE,
constants.APP_RESTORE_REQUESTED]:
self.activate_plugins(AppOperator.Application(app))
def install_plugins(self, app):
""" Install application plugins. """
# An app may be packaged with multiple wheels, discover and install them
# in the synced app plugin directory
pattern = '{}/*.whl'.format(app.inst_plugins_dir)
discovered_whls = glob.glob(pattern)
if not discovered_whls:
LOG.info("PluginHelper: %s does not contains any platform plugins." %
app.name)
return
if not os.path.isdir(app.sync_plugins_dir):
LOG.info("PluginHelper: Creating %s plugin directory %s." % (
app.name, app.sync_plugins_dir))
os.makedirs(app.sync_plugins_dir)
for whl in discovered_whls:
LOG.info("PluginHelper: Installing %s plugin %s to %s." % (
app.name, whl, app.sync_plugins_dir))
with zipfile.ZipFile(whl) as zf:
zf.extractall(app.sync_plugins_dir)
def uninstall_plugins(self, app):
""" Uninstall application plugins."""
if os.path.isdir(app.sync_plugins_dir):
try:
LOG.info("PluginHelper: Removing plugin directory %s" %
app.sync_plugins_dir)
shutil.rmtree(app.sync_plugins_dir)
except OSError:
LOG.exception("PluginHelper: Failed to remove plugin directory:"
" %s" % app.sync_plugins_dir)
else:
LOG.info("PluginHelper: Plugin directory %s does not exist. No "
"need to remove." % app.sync_plugins_dir)
def activate_plugins(self, app):
pth_fqpn = self._get_pth_fqpn(app)
# If this isn't an app with plugins or the plugin path is already
# active, skip activation
if not app.system_app or os.path.isfile(pth_fqpn):
return
# Add a .pth file to a site-packages directory so the plugin is picked
# automatically on a conductor restart
with open(pth_fqpn, 'w') as f:
f.write(app.sync_plugins_dir + '\n')
LOG.info("PluginHelper: Enabled plugin directory %s: created %s" % (
app.sync_plugins_dir, pth_fqpn))
# Make sure the sys.path reflects enabled plugins Add the plugin to
# sys.path
site.addsitedir(app.sync_plugins_dir)
# Find the distribution and add it to the resources working set
for d in pkg_resources.find_distributions(app.sync_plugins_dir,
only=True):
pkg_resources.working_set.add(d, entry=None, insert=True,
replace=True)
if self._helm_op:
self._helm_op.discover_plugins()
def deactivate_plugins(self, app):
# If the application doesn't have any plugins, skip deactivation
if not app.system_app:
return
pth_fqpn = self._get_pth_fqpn(app)
if os.path.exists(pth_fqpn):
# Remove the pth file, so on a conductor restart this installed
# plugin is not discoverable
try:
os.remove(pth_fqpn)
LOG.info("PluginHelper: Disabled plugin directory %s: removed "
"%s" % (app.sync_plugins_dir, pth_fqpn))
except OSError:
# Not present, should be, but continue on...
pass
# Make sure the sys.path reflects only enabled plugins
try:
sys.path.remove(app.sync_plugins_dir)
except ValueError:
# Not present, should be, but continue on...
LOG.warning("sys.path (%s) is missing plugin (%s)" % (
sys.path, app.sync_plugins_dir))
# Determine distributions installed by this plugin
if app.sync_plugins_dir in pkg_resources.working_set.entry_keys:
plugin_distributions = pkg_resources.working_set.entry_keys[app.sync_plugins_dir]
LOG.info("PluginHelper: Disabling distributions: %s" % plugin_distributions)
# Clean up the distribution(s) module names
module_name_cleanup = []
for module_name, value in six.iteritems(sys.modules):
for distribution in plugin_distributions:
distribution_module_name = distribution.replace('-', '_')
if ((module_name == distribution_module_name) or
(module_name.startswith(distribution_module_name + '.'))):
LOG.debug("PluginHelper: Removing module name: %s: %s" % (module_name, value))
module_name_cleanup.append(module_name)
for module_name in module_name_cleanup:
del sys.modules[module_name]
# Clean up the working set
for distribution in plugin_distributions:
try:
del pkg_resources.working_set.by_key[distribution]
except KeyError:
LOG.warn("Plugin distribution %s not enabled for version %s"
", but expected to be. Continuing with plugin "
"deactivation." % (distribution, app.version))
del pkg_resources.working_set.entry_keys[app.sync_plugins_dir]
pkg_resources.working_set.entries.remove(app.sync_plugins_dir)
if self._helm_op:
# purge this plugin from the stevedore plugin cache so this version
# of the plugin endoints are not discoverable
self._helm_op.purge_cache_by_location(app.sync_plugins_dir)
|
python
|
"""Functions for parsing the measurement data files"""
import yaml
import pkgutil
from flavio.classes import Measurement, Observable
from flavio._parse_errors import constraints_from_string, errors_from_string
from flavio.statistics import probability
import numpy as np
from math import sqrt
import warnings
def _load(obj):
"""Read measurements from a YAML stream or file."""
measurements = yaml.load(obj)
for m_name, m_data in measurements.items():
m = Measurement(m_name)
for arg in ['inspire', 'hepdata', 'experiment', 'url', 'description']:
if arg in m_data:
setattr(m, arg, m_data[arg])
if 'observables' in m_data:
# for multivariate constraints
pd = probability.dict2dist(m_data['values'])
pd = probability.convolve_distributions(pd)
# for observables without arguments (i.e. strings), this is trivial;
obs_list = [obs if isinstance(obs, str)
# for obs. with arguments, need to convert dict of the form
# {'name': myname, 'arg1': v1, ...} to a tuple of the form
# (myname, v1, ...)
else tuple(
[obs['name']]
+ [obs[arg] for arg in Observable[obs['name']].arguments])
for obs in m_data['observables']]
m.add_constraint(obs_list, pd)
elif 'correlation' not in m_data:
# for univariate constraints
if isinstance(m_data['values'], list):
for value_dict in m_data['values']:
args = Observable[value_dict['name']].arguments
# numerical values of arguments, e.g. [1, 6]
args_num = [value_dict[a] for a in args]
# insert string name in front of argument values and turn it
# into a tuple, e.g. ('FL(B0->K*mumu)', 1, 6)
args_num.insert(0, value_dict['name'])
obs_tuple = tuple(args_num)
if isinstance(value_dict['value'], dict):
m.set_constraint(obs_tuple, constraint_dict=value_dict['value'])
else:
m.set_constraint(obs_tuple, value_dict['value'])
else: # otherwise, 'values' is a dict just containing name: constraint_string
for obs, value in m_data['values'].items():
if isinstance(value, dict) or isinstance(value, list):
m.set_constraint(obs, constraint_dict=value)
else:
m.set_constraint(obs, value)
else:
# for multivariate normal constraints
observables = []
central_values = []
errors = []
if isinstance(m_data['values'], list):
for value_dict in m_data['values']:
# if "value" is a list, it contains the values of observable
# arguments (like q^2)
args = Observable[value_dict['name']].arguments
args_num = [value_dict[a] for a in args]
error_dict = errors_from_string(value_dict['value'])
args_num.insert(0, value_dict['name'])
obs_tuple = tuple(args_num)
observables.append(obs_tuple)
central_values.append(error_dict['central_value'])
squared_error = 0.
for sym_err in error_dict['symmetric_errors']:
squared_error += sym_err**2
for asym_err in error_dict['asymmetric_errors']:
squared_error += asym_err[0]*asym_err[1]
errors.append(sqrt(squared_error))
else: # otherwise, 'values' is a dict just containing name: constraint_string
for obs, value in m_data['values'].items():
observables.append(obs)
error_dict = errors_from_string(value)
central_values.append(error_dict['central_value'])
squared_error = 0.
for sym_err in error_dict['symmetric_errors']:
squared_error += sym_err**2
for asym_err in error_dict['asymmetric_errors']:
squared_error += asym_err[0]*asym_err[1]
errors.append(sqrt(squared_error))
correlation = _fix_correlation_matrix(m_data['correlation'], len(observables))
covariance = np.outer(np.asarray(errors), np.asarray(errors))*correlation
m.add_constraint(observables, probability.MultivariateNormalDistribution(central_values, covariance))
return list(measurements.keys())
def _load_new(obj):
"""Read measurements from a YAML stream or file that are compatible
with the format generated by the `get_yaml` method of
`flavio.classes.Measurement`."""
measurements = yaml.load(obj)
if isinstance(measurements, dict):
measurements = [Measurement.from_yaml_dict(measurements, pname='observables')]
else:
measurements = [Measurement.from_yaml_dict(m, pname='observables') for m in measurements]
return [m.name for m in measurements]
def _fix_correlation_matrix(corr, n_dim):
"""In the input file, the correlation matrix can be specified as a list
of lists containing only the upper right of the symmetric correlation
matrix, e.g. [[1, 0.1, 0.2], [1, 0.3], [1, 0.05]]. This function builds the
full matrix.
Alternatively, if only a number x is given, the correlation matrix is
reconstructed as [[1, x, x, ...], ..., [..., x, x, 1]]"""
try:
float(corr)
except TypeError:
# if it's not a number, go on below
pass
else:
# if it's a number, return delta_ij + (1-delta_ij)*x
return np.eye(n_dim) + (np.ones((n_dim, n_dim))-np.eye(n_dim))*float(corr)
if not isinstance(corr, list):
raise TypeError("Correlation matrix must be of type list")
if len(corr) != n_dim:
raise ValueError("The correlation matrix has inappropriate number of dimensions")
corr_out = np.zeros((n_dim, n_dim))
for i, line in enumerate(corr):
if len(line) == n_dim:
if line[i] != 1:
raise ValueError("The correlation matrix must have 1.0 on the diagonal")
corr_out[i] = line
elif len(line) == n_dim - i:
if line[0] != 1:
raise ValueError("The correlation matrix must have 1.0 on the diagonal")
corr_out[i,i:] = line
else:
raise ValueError("Correlation matrix not understood")
if not np.allclose(corr_out, corr_out.T):
# if the covariance is not symmetric, it is assumed that only the values above the diagonal are present.
# then: M -> M + M^T - diag(M)
corr_out = corr_out + corr_out.T - np.diag(np.diag(corr_out))
return corr_out
def write_file(filename, measurements):
"""Write measurements to a YAML file.
measurements can be a list of string names or a list of measurement
instances."""
measurement_instances = [m if isinstance(m, Measurement)
else Measurement[m] for m in measurements]
with open(filename, 'w') as f:
yaml.dump([m.get_yaml_dict(pname='observables')
for m in measurement_instances], f)
def read_file(filename):
"""Read measurements from a YAML file."""
with open(filename, 'r') as f:
try:
return _load_new(f)
except:
f.seek(0) # rewind
return _load(f)
def read_url(url):
"""Read measurements from a URL."""
try:
import requests
except:
raise ImportError("You need to install the python requests module to load measurements from a URL.")
res = requests.get(url)
return _load(res.text)
def load(obj):
"""Alias for `read_file` for backwards compatibility. Don't use."""
warnings.warn("The function `flavio.measurements.load` was replaced "
"by `flavio.measurements.read_file` in v0.13 "
"and might be removed in the future. "
"Please update your code.", FutureWarning)
return read_file(obj)
def read_default():
"""Read all measurements from `data/measurements.yml`.
This function is invoked once when the package is loaded."""
return _load(pkgutil.get_data('flavio', 'data/measurements.yml'))
read_default()
|
python
|
# Generated by Django 2.2.13 on 2020-07-15 13:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("licenses", "0002_import_license_data"),
]
operations = [
migrations.AlterModelOptions(
name="legalcode",
options={"ordering": ["url"]},
),
migrations.AlterModelOptions(
name="license",
options={"ordering": ["-version", "license_code", "jurisdiction_code"]},
),
migrations.AddField(
model_name="legalcode",
name="raw_html",
field=models.TextField(blank=True, default=""),
),
migrations.AlterField(
model_name="legalcode",
name="language_code",
field=models.CharField(
help_text="E.g. 'en', 'en-ca', 'sr-Latn', or 'x-i18n'. Case-sensitive?",
max_length=8,
),
),
migrations.AlterField(
model_name="translatedlicensename",
name="language_code",
field=models.CharField(
help_text="E.g. 'en', 'en-ca', 'sr-Latn', or 'x-i18n'. Case-sensitive?",
max_length=8,
),
),
]
|
python
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .forms import CreateNewEvent, CreateNewParticipant
from .models import Event,Attendee
from .utils import check_event,generate_event_url, get_event_by_url
from creator.tasks import generate_certificates
def is_logged_in(request):
return request.user.is_authenticated
def create_event(request):
if not is_logged_in(request):
return HttpResponseRedirect('/login/')
if request.method == 'POST':
form = CreateNewEvent(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/home/')
return HttpResponse('It Failed')
else:
form = CreateNewEvent()
return render(request, 'records/create_event.html',{'title': 'testing', 'form': form})
def view_events(request):
if not is_logged_in(request):
return HttpResponseRedirect('/login/')
if request.method == 'GET':
data = Event.objects.all()
return render(request, 'records/view.html', {'title': 'view', 'data': data})
elif request.method == 'POST':
data = request.POST.get('event_url')
event = get_event_by_url(data)
attendees = event.attendees.all()
event_id = request.POST.get('event_id')
generate_certificates(event_id, request)
# request certificate generation here(async)
# try to use celery here
return HttpResponse("Done")
def view_participants(request):
if not is_logged_in(request):
return HttpResponseRedirect('/login/')
data = Attendee.objects.all()
return render(request, 'records/view_participants.html', {'title': 'Attendees', 'data': data})
def view_attendees(request, event, hash):
if not is_logged_in(request):
return HttpResponseRedirect('/login/')
url = generate_event_url(event)
event = get_event_by_url(url)
data = event.attendees.all()
return render(request, 'records/view_participants.html', {'title': 'Attendees', 'data': data})
def add_participant(request, event, hash):
if check_event(event, hash):
if request.method == 'POST':
form = CreateNewParticipant(request.POST)
if(form.is_valid()):
# print(form.fields)
p = Attendee(**form.cleaned_data)
p.save()
url = generate_event_url(event)
event = get_event_by_url(url)
event.attendees.add(p)
#for field in data.keys():
#p.save()
return HttpResponse('done!!!')
return HttpResponse("Failed ....")
elif request.method == 'GET':
form = CreateNewParticipant(request.POST)
return render(request, 'records/add_participant.html', {'title': 'Redeem you certificate', 'form': form})
else:
return HttpResponse('404 not found!')
def test(request, te):
return HttpResponse('sup ? '+te)
|
python
|
import os
import numpy as np
from typing import Any, Dict
import torch
from torch.utils.data import Dataset
from core.utils.others.image_helper import read_image
class CILRSDataset(Dataset):
def __init__(self, root_dir: str, transform: bool = False, preloads: str = None) -> None:
self._root_dir = root_dir
self._transform = transform
preload_file = preloads
if preload_file is not None:
print('[DATASET] Loading from NPY')
self._sensor_data_names, self._measurements = np.load(preload_file, allow_pickle=True)
def __len__(self) -> int:
return len(self._sensor_data_names)
def __getitem__(self, index: int) -> Any:
img_path = os.path.join(self._root_dir, self._sensor_data_names[index])
img = read_image(img_path)
if self._transform:
img = img.transpose(2, 0, 1)
img = img / 255.
img = img.astype(np.float32)
img = torch.from_numpy(img).type(torch.FloatTensor)
measurements = self._measurements[index].copy()
data = dict()
data['rgb'] = img
for k, v in measurements.items():
v = torch.from_numpy(np.asanyarray([v])).type(torch.FloatTensor)
data[k] = v
return data
|
python
|
# -*- coding: utf-8 -*-
"""
Contains RabbitMQ Consumer class
Use under MIT License
"""
__author__ = 'G_T_Y'
__license__ = 'MIT'
__version__ = '1.0.0'
import pika
from . import settings
from .exceptions import RabbitmqConnectionError
class Publisher:
"""Used to publish messages on response queue"""
def __init__(self):
self._host = settings.RABBITMQ_HOST
self._vhost = settings.RABBITMQ_VHOST
self._port = settings.RABBITMQ_PORT
self._username = settings.RABBITMQ_USERNAME
self._password = settings.RABBITMQ_PASSWORD
self._response_queue = settings.RESPONSE_QUEUE
self.connection = None
self.channel = None
def connect(self):
"""Establish connection to rabbitmq server using parameters set by init function
It update values of connection and channel parameters
"""
if settings.DEBUG:
parameters = pika.ConnectionParameters(self._host)
else:
credentials = pika.PlainCredentials(
username=settings.RABBITMQ_USERNAME,
password=settings.RABBITMQ_PASSWORD
)
parameters = pika.ConnectionParameters(
host=self._host,
port=self._port,
virtual_host=self._vhost,
credentials=credentials
)
try:
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
except Exception as e:
raise RabbitmqConnectionError(str(e))
def send_message(self, message):
"""Send message to response queue
:param message: message to send
:type message: `str`
:return:
"""
self.connect()
self.channel.queue_declare(self._response_queue)
self.channel.basic_publish(
'',
self._response_queue,
message,
pika.BasicProperties(
content_type='application/json',
delivery_mode=1
)
)
self.connection.close()
|
python
|
from .jira.client import JiraClient
from .gitlab.client import GitLabClientWrapper as GitLabClient
|
python
|
import sys
sys.path.append("../src")
import numpy as np
import seaborn as sns
from scipy.stats import pearsonr
from scipy.special import binom
import matplotlib.pyplot as plt
from matplotlib import gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import gnk_model
import C_calculation
import utils
import structure_utils
import data_utils
import plot_utils
import warnings
warnings.filterwarnings("ignore")
plt.style.use(['seaborn-deep', '../paper.mplstyle'])
"""
This script produces the first row of Figure 4D, which displays the results
of various tests on the mTagBFP empirical fitness function, and prints
quantities related to this analysis. Run as:
$ python figure4_top.py
"""
q = 2
L = 13
# get WH coefficients
emp_beta = data_utils.calculate_mtagbfp_wh_coefficients()
# Calculate fraction variance explained by empirical coefficients
beta_mag_sq = emp_beta**2 / np.sum(emp_beta**2) # normalize sum of squares to one
bm_fv = utils.calc_frac_var_explained(emp_beta)
# calculate the coefficient variances corresponding to neighborhoods
gnk_beta_var_, V = data_utils.calculate_mtagbfp_gnk_wh_coefficient_vars(return_neighborhoods=True)
gnk_beta_var = gnk_beta_var_/np.sum(gnk_beta_var_) # normalize beta
gnk_sparsity = np.count_nonzero(gnk_beta_var)
pv_at_gnk = 100*bm_fv[gnk_sparsity]
num_samples = int(np.ceil(gnk_sparsity*C_calculation.C_VAL*np.log10(q**L)))
print("Sparsity of mTagBFP Structural GNK model: %i" % gnk_sparsity)
print("Number of samples to recover mTagBFP GNK: %s" % num_samples)
print("Percent variance explained by largest %i mTagBFP empirical coefficients: %.3f" % (gnk_sparsity, pv_at_gnk))
# calculate fraction variance explained by samples of GNK coefficients
gnk_fv_mean, gnk_fv_std = utils.calc_frac_var_explained_from_beta_var(gnk_beta_var_, samples=1000, up_to=76)
# Load LASSO results
results_dict = np.load("../results/mtagbfp_lasso_results.npy", allow_pickle=True).item()
ns = results_dict['n']
pearson = results_dict['pearson']
mean_r = np.mean(pearson**2, axis=1)
std_r = np.std(pearson**2, axis=1)
idx = np.where(ns==num_samples)[0][0]
r2_val = mean_r[idx]
print("LASSO R^2 at mTagBFP GNK predicted number of samples: %.3f" % r2_val)
# Load example results
example_results = np.load("../results/mtagbfp_lasso_example.npy")
########################
### Make large panel ###
########################
fig = plt.figure(figsize=(15, 3))
gs = fig.add_gridspec(1,5)
# plot neighborhoods
ax = fig.add_subplot(gs[0, 0])
plot_utils.plot_neighborhoods(ax, V, L,
data_utils.MTAGBFP_POSITIONS,
label_rotation=60, s=90)
# plot beta comparison
ax = fig.add_subplot(gs[0, 1:3])
max_order = 5
num_coeffs = int(np.sum([binom(13, i) for i in range(max_order+1)])) # up to 5th order interactions
plot_utils.plot_beta_comparison(ax, L, num_coeffs,
beta_mag_sq,
gnk_beta_var,
max_order=max_order,
use_order_labels=True)
# plot percent variance explained
colors = sns.color_palette('Set1', n_colors=2)
ax = fig.add_subplot(gs[0, 3])
plot_utils.plot_percent_variance_explained(ax, 100*bm_fv,
100*gnk_fv_mean,
100*gnk_fv_std,
gnk_sparsity,
xlim=75,
xticks=(0, 25, 50, 75)
)
###########################
### plot LASSO results ###
###########################
range1 = [0.45, 1]
range2 = [0, 0.08]
height_ratio = ((range1[1]-range1[0]) / (range2[1]-range2[0]))
gs_ = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=gs[0, 4], height_ratios=[height_ratio, 1])
ax1 = plt.subplot(gs_[0])
ax2 = plt.subplot(gs_[1])
colors = sns.color_palette('Set1')
c = colors[1]
# c = colors(0.6)
ax1.errorbar(ns, mean_r, yerr=std_r, lw=0.5, marker='o', markersize=0,
c='k', zorder=12, fmt='none', capsize=1, markeredgewidth=0.5)
ax2.errorbar(ns, mean_r, yerr=std_r, c='k', lw=0.5, marker='o', markersize=0,
zorder=12, fmt='none', capsize=1, markeredgewidth=0.5)
ax1.plot(ns, mean_r, c=c, lw=1, marker='o', markersize=3, zorder=10)
ax2.plot(ns, mean_r, c=c, lw=1, marker='o', markersize=3, zorder=10)
ax1.set_ylim(range1[0], range1[1])
ax2.set_ylim(range2[0], range2[1])
ax1.set_xlim([0, 2000])
ax2.set_xlim([0, 2000])
ax1.plot((0, 2000), (range1[0]+0.002, range1[0]+0.002), lw=0.5,c='k', alpha=0.2)
ax2.plot((0, 2000), (range2[1], range2[1]), lw=0.5,c='k', alpha=0.2)
ax1.plot((num_samples, num_samples), (0, r2_val), ls='--', c='k', lw=0.75, zorder=0)
ax1.plot((0, num_samples), (r2_val, r2_val), ls='--', c='k', lw=0.75)
ax1.scatter([num_samples], [r2_val], edgecolor='k', facecolor=colors[0],
zorder=11, s=15, linewidth=0.75)
ax2.plot((num_samples, num_samples), (0, r2_val), ls='--', c='k', lw=0.75, zorder=0)
ax2.plot((0, num_samples), (r2_val, r2_val), ls='--', c='k', lw=0.75)
ax2.set_yticklabels(["0", "0.05"])
# hide the spines between ax and ax2
ax1.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.xaxis.tick_top()
ax1.tick_params(labeltop=False) # don't put tick labels at the top
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax1.transAxes, color='k', clip_on=False)
ax1.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax1.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - height_ratio*d, 1 + height_ratio*d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - height_ratio*d, 1 + height_ratio*d), **kwargs)
ax1.tick_params(axis='y', which='major', right=False, labelright=False)
ax1.tick_params(axis='x', which='major', top=False, labeltop=False)
ax2.tick_params(axis='y', which='major', right=False, labelright=False)
ax2.tick_params(axis='x', which='major', top=False, labeltop=False)
ax1.set_ylabel("Prediction $R^2$", fontsize=12, labelpad=2)
ax2.set_xlabel("Number of training samples", fontsize=12, )
ax1.yaxis.set_label_coords(-0.1,0.4)
axins = inset_axes(ax1, width="80%", height="100%",
bbox_to_anchor=(0.45, 0.13, .6, .5),
bbox_transform=ax1.transAxes, loc=3)
plot_utils.plot_lasso_example_inset(axins, example_results)
plt.subplots_adjust(hspace=0.08)
plt.tight_layout()
plt.savefig("plots/figure4_tagbfp.png", dpi=500, bbox_inches='tight', transparent=True)
|
python
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import importlib
import qiime2.plugin
from q2_types.sample_data import SampleData
from q2_types.per_sample_sequences import (
SequencesWithQuality, PairedEndSequencesWithQuality,
JoinedSequencesWithQuality)
import q2_quality_filter
from q2_quality_filter._type import QualityFilterStats
from q2_quality_filter._format import (QualityFilterStatsFmt,
QualityFilterStatsDirFmt)
citations = qiime2.plugin.Citations.load(
'citations.bib', package='q2_quality_filter')
plugin = qiime2.plugin.Plugin(
name='quality-filter',
version=q2_quality_filter.__version__,
website='https://github.com/qiime2/q2-quality-filter',
package='q2_quality_filter',
user_support_text=None,
description=('This QIIME 2 plugin supports filtering and trimming of '
'sequence reads based on PHRED scores and ambiguous '
'nucleotide characters.'),
short_description='Plugin for PHRED-based filtering and trimming.',
citations=citations
)
plugin.register_formats(QualityFilterStatsFmt, QualityFilterStatsDirFmt)
plugin.register_semantic_types(QualityFilterStats)
plugin.register_semantic_type_to_format(
QualityFilterStats,
artifact_format=QualityFilterStatsDirFmt)
InputMap, OutputMap = qiime2.plugin.TypeMap({
SampleData[SequencesWithQuality | PairedEndSequencesWithQuality]:
SampleData[SequencesWithQuality],
SampleData[JoinedSequencesWithQuality]:
SampleData[JoinedSequencesWithQuality],
})
_q_score_parameters = {
'min_quality': qiime2.plugin.Int,
'quality_window': qiime2.plugin.Int,
'min_length_fraction': qiime2.plugin.Float,
'max_ambiguous': qiime2.plugin.Int
}
_q_score_input_descriptions = {
'demux': 'The demultiplexed sequence data to be quality filtered.'
}
_q_score_parameter_descriptions = {
'min_quality': 'The minimum acceptable PHRED score. All PHRED scores '
'less that this value are considered to be low PHRED '
'scores.',
'quality_window': 'The maximum number of low PHRED scores that '
'can be observed in direct succession before '
'truncating a sequence read.',
'min_length_fraction': 'The minimum length that a sequence read can '
'be following truncation and still be '
'retained. This length should be provided '
'as a fraction of the input sequence length.',
'max_ambiguous': 'The maximum number of ambiguous (i.e., N) base '
'calls. This is applied after trimming sequences '
'based on `min_length_fraction`.'
}
_q_score_output_descriptions = {
'filtered_sequences': 'The resulting quality-filtered sequences.',
'filter_stats': 'Summary statistics of the filtering process.'
}
plugin.methods.register_function(
function=q2_quality_filter.q_score,
inputs={'demux': InputMap},
parameters=_q_score_parameters,
outputs=[
('filtered_sequences', OutputMap),
('filter_stats', QualityFilterStats)
],
input_descriptions=_q_score_input_descriptions,
parameter_descriptions=_q_score_parameter_descriptions,
output_descriptions=_q_score_output_descriptions,
name='Quality filter based on sequence quality scores.',
description=('This method filters sequence based on quality scores and '
'the presence of ambiguous base calls.')
)
importlib.import_module('q2_quality_filter._transformer')
|
python
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Copyright (c) The Lab of Professor Weiwei Lin ([email protected]),
# School of Computer Science and Engineering, South China University of Technology.
# A-Tune is licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# Create: 2020-01-04
x1 = 3
x2 = 1
x3 = 5
x4 = 3
x5 = 3
x6 = 3
x7 = 3
x8 = 3
x9 = 3
x10 = 2
x11 = 2
x12 = 4
x13 = 4
x14 = 2
x15 = 2
x16 = 1
x17 = 2
x18 = 5
x19 = 1
x20 = 1
x21 = 1
x22 = 1
x23 = 1
x24 = 2
x25 = 4
x26 = 2
x27 = 3
x28 = 1
x29 = 2
x30 = 4
x31 = 4
x32 = 1
x33 = 4
x34 = 1
x35 = 2
x36 = 1
x37 = 3
x38 = 2
x39 = 1
x40 = 2
x41 = 3
x42 = 3
x43 = 2
x44 = 2
x45 = 2
x46 = 4
x47 = 4
x48 = 2
x49 = 2
x50 = 2
x51 = 2
x52 = 1
x53 = 4
x54 = 3
x55 = 3
x56 = 1
x57 = 2
x58 = 3
x59 = 3
x60 = 3
x61 = 1
x62 = 3
x63 = 3
x64 = 4
x65 = 3
x66 = 2
x67 = 3
x68 = 3
x69 = 3
x70 = 2
x71 = 4
x72 = 1
x73 = 3
x74 = 2
x75 = 3
x76 = 1
x77 = 3
x78 = 1
x79 = 4
x80 = 2
x81 = 1
x82 = 1
x83 = 2
x84 = 4
x85 = 5
x86 = 3
x87 = 4
x88 = 2
x89 = 2
x90 = 1
x91 = 2
x92 = 1
x93 = 2
x94 = 1
x95 = 2
x96 = 3
x97 = 3
x98 = 2
x99 = 2
x100 = 3
x101 = 4
x102 = 3
x103 = 2
x104 = 2
x105 = 3
x106 = 5
x107 = 4
x108 = 2
x109 = 1
x110 = 4
x111 = 3
x112 = 4
x113 = 2
x114 = 2
x115 = 4
x116 = 4
x117 = 2
x118 = 3
x119 = 2
x120 = 4
x121 = 3
x122 = 2
x123 = 4
x124 = 4
x125 = 3
x126 = 4
x127 = 1
x128 = 3
x129 = 3
x130 = 5
x131 = 4
x132 = 3
x133 = 1
x134 = 2
x135 = 1
x136 = 1
x137 = 4
x138 = 4
x139 = 3
x140 = 1
x141 = 4
x142 = 1
x143 = 1
x144 = 4
x145 = 5
x146 = 4
x147 = 1
x148 = 4
x149 = 3
x150 = 3
y = 1 * x147 ** 1 + 2 * x80 ** 1 + 3 * x55 ** 1 + 4 * x81 ** 1 + 5 * x87 ** 1 + 1 * x82 ** 2 + 2 * x88 ** 2 + \
3 * x83 ** 2 + 4 * x144 ** 2 + 5 * x38 ** 2 + 1 * x135 ** 3 + 2 * x125 ** 3 + 3 * x14 ** 3 + 4 * x65 ** 3 + \
5 * x95 ** 3 + 1 * x73 ** 4 + 2 * x37 ** 4 + 3 * x105 ** 4 + 4 * x28 ** 4 + 5 * x121 ** 4 + 1 * x100 ** 5 + \
2 * x141 ** 5 + 3 * x69 ** 5 + 4 * x97 ** 5 + 5 * x53 ** 5 + 1 * x126 ** 6 + 2 * x104 ** 6 + 3 * x103 ** 6 + \
4 * x27 ** 6 + 5 * x10 ** 6 + 1 * x140 ** 7 + 2 * x54 ** 7 + 3 * x5 ** 7 + 4 * x70 ** 7 + 5 * x114 ** 7 + \
1 * x57 ** 8 + 2 * x74 ** 8 + 3 * x26 ** 8 + 4 * x19 ** 8 + 5 * x111 ** 8 + 1 * x108 ** 9 + 2 * x48 ** 9 + \
3 * x11 ** 9 + 4 * x59 ** 9 + 5 * x123 ** 9 + 1 * x61 ** 10 + 2 * x6 ** 10 + 3 * x79 ** 10 + 4 * x71 ** 10 + \
5 * x98 ** 10 + 1 * x34 ** 11 + 2 * x112 ** 11 + 3 * x25 ** 11 + 4 * x93 ** 11 + 5 * x86 ** 11 + 1 * x64 ** 12 + \
2 * x120 ** 12 + 3 * x20 ** 12 + 4 * x16 ** 12 + 5 * x94 ** 12 + 1 * x76 ** 13 + 2 * x21 ** 13 + 3 * x129 ** 13 + \
4 * x146 ** 13 + 5 * x77 ** 13 + 1 * x46 ** 14 + 2 * x91 ** 14 + 3 * x31 ** 14 + 4 * x67 ** 14 + 5 * x150 ** 14 + \
1 * x72 ** 15 + 2 * x84 ** 15 + 3 * x136 ** 15 + 4 * x15 ** 15 + 5 * x149 ** 15 + 1 * x2 ** 16 + 2 * x116 ** 16 + \
3 * x66 ** 16 + 4 * x42 ** 16 + 5 * x45 ** 16 + 1 * x63 ** 17 + 2 * x85 ** 17 + 3 * x143 ** 17 + 4 * x4 ** 17 + \
5 * x29 ** 17 + 1 * x113 ** 18 + 2 * x50 ** 18 + 3 * x132 ** 18 + 4 * x127 ** 18 + 5 * x30 ** 18 + 1 * x109 ** 19 +\
2 * x131 ** 19 + 3 * x36 ** 19 + 4 * x9 ** 19 + 5 * x43 ** 19 + 1 * x119 ** 20 + 2 * x8 ** 20 + 3 * x68 ** 20 + \
4 * x107 ** 20 + 5 * x12 ** 20 + 1 * x32 ** 21 + 2 * x122 ** 21 + 3 * x115 ** 21 + 4 * x75 ** 21 + 5 * x49 ** 21 + \
1 * x110 ** 22 + 2 * x40 ** 22 + 3 * x17 ** 22 + 4 * x134 ** 22 + 5 * x128 ** 22 + 1 * x18 ** 23 + 2 * x142 ** 23 +\
3 * x133 ** 23 + 4 * x24 ** 23 + 5 * x102 ** 23 + 1 * x145 ** 24 + 2 * x33 ** 24 + 3 * x106 ** 24 + 4 * x58 ** 24 +\
5 * x47 ** 24 + 1 * x22 ** 25 + 2 * x118 ** 25 + 3 * x44 ** 25 + 4 * x35 ** 25 + 5 * x90 ** 25 + 1 * x96 ** 26 + \
2 * x62 ** 26 + 3 * x78 ** 26 + 4 * x39 ** 26 + 5 * x99 ** 26 + 1 * x117 ** 27 + 2 * x1 ** 27 + 3 * x3 ** 27 + \
4 * x7 ** 27 + 5 * x52 ** 27 + 1 * x60 ** 28 + 2 * x124 ** 28 + 3 * x139 ** 28 + 4 * x101 ** 28 + 5 * x23 ** 28 + \
1 * x92 ** 29 + 2 * x148 ** 29 + 3 * x137 ** 29 + 4 * x89 ** 29 + 5 * x51 ** 29 + 1 * x41 ** 30 + 2 * x13 ** 30 + \
3 * x130 ** 30 + 4 * x138 ** 30 + 5 * x56 ** 30
print("y = %s" % y)
|
python
|
import os
from aws_cdk import (
core,
aws_ec2 as ec2,
aws_cloudformation as cloudformation,
aws_elasticache as elasticache,
)
class ElastiCacheStack(cloudformation.NestedStack):
def __init__(self, scope: core.Construct, id: str, **kwargs):
super().__init__(scope, id, **kwargs)
self.elasticache_security_group = ec2.CfnSecurityGroup(
self,
"ElastiCacheSecurityGroup",
vpc_id=scope.vpc.vpc_id,
group_description="ElastiCacheSecurityGroup",
security_group_ingress=[
ec2.CfnSecurityGroup.IngressProperty(
ip_protocol="tcp",
to_port=6379,
from_port=6379,
source_security_group_id=scope.vpc.vpc_default_security_group,
)
],
)
self.elasticache_subnet_group = elasticache.CfnSubnetGroup(
self,
"CfnSubnetGroup",
subnet_ids=scope.vpc.select_subnets(
subnet_type=ec2.SubnetType.ISOLATED
).subnet_ids,
description="The subnet group for ElastiCache",
)
self.elasticache = elasticache.CfnCacheCluster(
self,
"ElastiCacheClusterRedis",
cache_node_type="cache.t2.micro",
engine="redis",
num_cache_nodes=1,
vpc_security_group_ids=[
self.elasticache_security_group.get_att("GroupId").to_string()
],
cache_subnet_group_name=self.elasticache_subnet_group.ref, # noqa
)
|
python
|
import numpy as np
from .base import BaseLoop
from .utils import EarlyStopping
class BaseTrainerLoop(BaseLoop):
def __init__(self, model, optimizer, **kwargs):
super().__init__()
self.model = model
self.optimizer = optimizer
## kwargs assignations
self.patience = kwargs.get('patience', 5)
self.saving_path = kwargs.get('saving_path', '.')
self.cuda = kwargs.get('cuda', False)
self.verbose = kwargs.get('verbose', False)
self.simulate_mini_batch = kwargs.get('simulate_mini_batch', False)
def get_state(self):
state = {'weights': copy.deepcopy(self.model.state_dict()),
'train_losses': self.train_losses,
'valid_losses': self.valid_losses,
'early_stopper': self.early_stopper}
return state
def load_state(self, state):
self.model.load_state_dict(state['weights'])
self.train_losses = state['train_losses']
self.valid_losses = state['valid_losses']
self.early_stopper = state['early_stopper']
def save_sate(self):
with open(os.path.join(self.saving_path, 'checkpoint.pkl') , 'wb') as f:
pkl.dump(self.get_state, f)
## public functions
def train(self, trainloader, validloader, nb_epochs, early_stopping=False, checkpoint=None):
if checkpoint is not None:
self.load_state(checkpoint)
else:
self.train_losses, self.valid_losses = [], []
self.early_stopper = EarlyStopping(self.patience)
for i_epoch in range(nb_epochs):
train_loss = self._BaseLoop__train_one_epoch(trainloader)
valid_loss = self._BaseLoop__validate_one_epoch(validloader)
self.train_losses.append(train_loss)
self.valid_losses.append(valid_loss)
if self.verbose:
print(' [-]: epoch {}, train loss: {:.4f}, valid loss: {:.4f}, {}'\
.format(i_epoch+1, train_loss, valid_loss, self.show_metrics()))
stop = self.early_stopper.stopping(i_epoch, valid_loss, self.model.state_dict())
self.save_state()
if stop and early_stopping:
if self.verbose:
print(' [-]: early stopping at epoch {}'.format(i_epoch+1))
break
self.model.load_state_dict(self.early_stopper.best_weights)
return train_losses, valid_losses, early_stopper
|
python
|
# Multiple 1D fibers (monodomain), biceps geometry
# This is similar to the fibers_emg example, but without EMG.
# To see all available arguments, execute: ./multiple_fibers settings_multiple_fibers_cubes_partitioning.py -help
#
# if fiber_file=cuboid.bin, it uses a small cuboid test example (Contrary to the "cuboid" example, this produces a real cuboid).
#
# You have to set n_subdomains such that it matches the number of processes, e.g. 2x2x1 = 4 processes.
# Decomposition is in x,y,z direction, the fibers are aligned with the z axis.
# E.g. --n_subdomains 2 2 1 which is 2x2x1 means no subdivision per fiber,
# --n_subdomains 8 8 4 means every fiber will be subdivided to 4 processes and all fibers will be computed by 8x8 processes.
#
# Example with 4 processes and end time 5, and otherwise default parameters:
# mpirun -n 4 ./multiple_fibers ../settings_multiple_fibers_cubes_partitioning.py --n_subdomains 2 2 1 --end_time=5.0
#
# Three files contribute to the settings:
# A lot of variables are set by the helper.py script, the variables and their values are defined in variables.py and this file
# creates the composite config that is needed by opendihu.
# You can provide parameter values in a custom_variables.py file in the variables subfolder. (Instead of custom_variables.py you can choose any filename.)
# This custom variables file should be the next argument on the command line after settings_fibers_emg.py, e.g.:
#
# ./multiple_fibers ../settings_multiple_fibers_cubes_partitioning.py custom_variables.py --n_subdomains 1 1 1 --end_time=5.0
#
# E.g. try
# ./multiple_fibers_57_states ../settings_multiple_fibers_cubes_partitioning.py compare_to_opencmiss.py
# mpirun -n 2 ./multiple_fibers_57_states ../settings_multiple_fibers_cubes_partitioning.py --n_subdomains 2 1 1
import sys, os
import timeit
import argparse
import importlib
import copy
# parse rank arguments
rank_no = (int)(sys.argv[-2])
n_ranks = (int)(sys.argv[-1])
# add variables subfolder to python path where the variables script is located
script_path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, script_path)
sys.path.insert(0, os.path.join(script_path,'variables'))
import variables # file variables.py, defined default values for all parameters, you can set the parameters there
from create_partitioned_meshes_for_settings import * # file create_partitioned_meshes_for_settings with helper functions about own subdomain
# if first argument contains "*.py", it is a custom variable definition file, load these values
if ".py" in sys.argv[0]:
variables_path_and_filename = sys.argv[0]
variables_path,variables_filename = os.path.split(variables_path_and_filename) # get path and filename
sys.path.insert(0, os.path.join(script_path,variables_path)) # add the directory of the variables file to python path
variables_module,_ = os.path.splitext(variables_filename) # remove the ".py" extension to get the name of the module
if rank_no == 0:
print("Loading variables from \"{}\".".format(variables_path_and_filename))
custom_variables = importlib.import_module(variables_module, package=variables_filename) # import variables module
variables.__dict__.update(custom_variables.__dict__)
sys.argv = sys.argv[1:] # remove first argument, which now has already been parsed
# define command line arguments
parser = argparse.ArgumentParser(description='fibers_emg')
parser.add_argument('--scenario_name', help='The name to identify this run in the log.', default=variables.scenario_name)
parser.add_argument('--n_subdomains', nargs=3, help='Number of subdomains in x,y,z direction.', type=int)
parser.add_argument('--n_subdomains_x', '-x', help='Number of subdomains in x direction.', type=int, default=variables.n_subdomains_x)
parser.add_argument('--n_subdomains_y', '-y', help='Number of subdomains in y direction.', type=int, default=variables.n_subdomains_y)
parser.add_argument('--n_subdomains_z', '-z', help='Number of subdomains in z direction.', type=int, default=variables.n_subdomains_z)
parser.add_argument('--diffusion_solver_type', help='The solver for the diffusion.', default=variables.diffusion_solver_type, choices=["gmres","cg","lu","gamg","richardson","chebyshev","cholesky","jacobi","sor","preonly"])
parser.add_argument('--diffusion_preconditioner_type', help='The preconditioner for the diffusion.', default=variables.diffusion_preconditioner_type, choices=["jacobi","sor","lu","ilu","gamg","none"])
parser.add_argument('--paraview_output', help='Enable the paraview output writer.', default=variables.paraview_output, action='store_true')
parser.add_argument('--adios_output', help='Enable the MegaMol/ADIOS output writer.', default=variables.adios_output, action='store_true')
parser.add_argument('--fiber_file', help='The filename of the file that contains the fiber data.', default=variables.fiber_file)
parser.add_argument('--cellml_file', help='The filename of the file that contains the cellml model.', default=variables.cellml_file)
parser.add_argument('--fiber_distribution_file', help='The filename of the file that contains the MU firing times.', default=variables.fiber_distribution_file)
parser.add_argument('--firing_times_file', help='The filename of the file that contains the cellml model.', default=variables.firing_times_file)
parser.add_argument('--end_time', '--tend', '-t', help='The end simulation time.', type=float, default=variables.end_time)
parser.add_argument('--output_timestep', help='The timestep for writing outputs.', type=float, default=variables.output_timestep)
parser.add_argument('--dt_0D', help='The timestep for the 0D model.', type=float, default=variables.dt_0D)
parser.add_argument('--dt_1D', help='The timestep for the 1D model.', type=float, default=variables.dt_1D)
parser.add_argument('--dt_splitting', help='The timestep for the splitting.', type=float, default=variables.dt_splitting)
parser.add_argument('--disable_firing_output', help='Disables the initial list of fiber firings.', default=variables.disable_firing_output, action='store_true')
parser.add_argument('--v', help='Enable full verbosity in c++ code')
parser.add_argument('-v', help='Enable verbosity level in c++ code', action="store_true")
parser.add_argument('-vmodule', help='Enable verbosity level for given file in c++ code')
parser.add_argument('-pause', help='Stop at parallel debugging barrier', action="store_true")
parser.add_argument('--n_fibers_y', help='Number of fibers when simulating a cuboid example.', type=int, default=variables.n_fibers_y)
# parse command line arguments and assign values to variables module
args, other_args = parser.parse_known_args(args=sys.argv[:-2], namespace=variables)
if len(other_args) != 0 and rank_no == 0:
print("Warning: These arguments were not parsed by the settings python file\n " + "\n ".join(other_args), file=sys.stderr)
# initialize some dependend variables
if variables.n_subdomains is not None:
variables.n_subdomains_x = variables.n_subdomains[0]
variables.n_subdomains_y = variables.n_subdomains[1]
variables.n_subdomains_z = variables.n_subdomains[2]
variables.n_subdomains = variables.n_subdomains_x*variables.n_subdomains_y*variables.n_subdomains_z
# automatically initialize partitioning if it has not been set
if n_ranks != variables.n_subdomains:
# create all possible partitionings to the given number of ranks
optimal_value = n_ranks**(1/3)
possible_partitionings = []
for i in range(1,n_ranks+1):
for j in range(1,n_ranks+1):
if i*j <= n_ranks and n_ranks % (i*j) == 0:
k = (int)(n_ranks / (i*j))
performance = (k-optimal_value)**2 + (j-optimal_value)**2 + 1.1*(i-optimal_value)**2
possible_partitionings.append([i,j,k,performance])
# if no possible partitioning was found
if len(possible_partitionings) == 0:
if rank_no == 0:
print("\n\n\033[0;31mError! Number of ranks {} does not match given partitioning {} x {} x {} = {} and no automatic partitioning could be done.\n\n\033[0m".format(n_ranks, variables.n_subdomains_x, variables.n_subdomains_y, variables.n_subdomains_z, variables.n_subdomains_x*variables.n_subdomains_y*variables.n_subdomains_z))
quit()
# select the partitioning with the lowest value of performance which is the best
lowest_performance = possible_partitionings[0][3]+1
for i in range(len(possible_partitionings)):
if possible_partitionings[i][3] < lowest_performance:
lowest_performance = possible_partitionings[i][3]
variables.n_subdomains_x = possible_partitionings[i][0]
variables.n_subdomains_y = possible_partitionings[i][1]
variables.n_subdomains_z = possible_partitionings[i][2]
# output information of run
if rank_no == 0:
print("scenario_name: {}, n_subdomains: {} {} {}, n_ranks: {}, end_time: {}".format(variables.scenario_name, variables.n_subdomains_x, variables.n_subdomains_y, variables.n_subdomains_z, n_ranks, variables.end_time))
print("dt_0D: {:0.0e}, diffusion_solver_type: {}".format(variables.dt_0D, variables.diffusion_solver_type))
print("dt_1D: {:0.0e},".format(variables.dt_1D))
print("dt_splitting: {:0.0e}, paraview_output: {}".format(variables.dt_splitting, variables.paraview_output))
print("output_timestep: {:0.0e} stimulation_frequency: {} 1/ms = {} Hz".format(variables.output_timestep, variables.stimulation_frequency, variables.stimulation_frequency*1e3))
print("fiber_file: {}".format(variables.fiber_file))
print("cellml_file: {}".format(variables.cellml_file))
print("fiber_distribution_file: {}".format(variables.fiber_distribution_file))
print("firing_times_file: {}".format(variables.firing_times_file))
print("********************************************************************************")
print("prefactor: sigma_eff/(Am*Cm) = {} = {} / ({}*{})".format(variables.Conductivity/(variables.Am*variables.Cm), variables.Conductivity, variables.Am, variables.Cm))
# start timer to measure duration of parsing of this script
t_start_script = timeit.default_timer()
# initialize all helper variables
from helper import *
variables.n_subdomains_xy = variables.n_subdomains_x * variables.n_subdomains_y
variables.n_fibers_total = variables.n_fibers_x * variables.n_fibers_y
# define the config dict
config = {
"scenarioName": variables.scenario_name, # scenario name which will appear in the log file
"logFormat": "csv", # "csv" or "json", format of the lines in the log file, csv gives smaller files
"solverStructureDiagramFile": "solver_structure.txt", # output file of a diagram that shows data connection between solvers
"mappingsBetweenMeshesLogFile": "out/mappings_between_meshes.txt", # output file that contains a log about creation of mappings between meshes
"meta": { # additional fields that will appear in the log
"partitioning": [variables.n_subdomains_x, variables.n_subdomains_y, variables.n_subdomains_z]
},
"Meshes": variables.meshes,
"Solvers": {
"implicitSolver": { # solver for the implicit timestepping scheme of the diffusion time step
"maxIterations": 1e4,
"relativeTolerance": 1e-10,
"absoluteTolerance": 1e-10, # 1e-10 absolute tolerance of the residual
"solverType": variables.diffusion_solver_type,
"preconditionerType": variables.diffusion_preconditioner_type,
"dumpFilename": "", # "out/dump_"
"dumpFormat": "matlab",
},
},
"MultipleInstances": {
"logKey": "duration_subdomains_xy",
"ranksAllComputedInstances": list(range(n_ranks)),
"nInstances": variables.n_subdomains_xy,
"instances":
[{
"ranks": list(range(subdomain_coordinate_y*variables.n_subdomains_x + subdomain_coordinate_x, n_ranks, variables.n_subdomains_x*variables.n_subdomains_y)),
"StrangSplitting": {
#"numberTimeSteps": 1,
"timeStepWidth": variables.dt_splitting, # 1e-1
"logTimeStepWidthAsKey": "dt_splitting",
"durationLogKey": "duration_monodomain",
"timeStepOutputInterval": 100,
"endTime": variables.end_time,
"connectedSlotsTerm1To2": [0,1], # transfer slot 0 = state Vm from Term1 (CellML) to Term2 (Diffusion)
"connectedSlotsTerm2To1": [0,1], # transfer the same back
"Term1": { # CellML, i.e. reaction term of Monodomain equation
"MultipleInstances": {
"logKey": "duration_subdomains_z",
"nInstances": n_fibers_in_subdomain_x(subdomain_coordinate_x)*n_fibers_in_subdomain_y(subdomain_coordinate_y),
"instances":
[{
"ranks": list(range(variables.n_subdomains_z)), # these rank nos are local nos to the outer instance of MultipleInstances, i.e. from 0 to number of ranks in z direction
"Heun" : {
"timeStepWidth": variables.dt_0D, # 5e-5
"logTimeStepWidthAsKey": "dt_0D",
"durationLogKey": "duration_0D",
"initialValues": [],
"timeStepOutputInterval": 1e4,
"inputMeshIsGlobal": True,
"checkForNanInf": False,
"dirichletBoundaryConditions": {},
"dirichletOutputFilename": None, # filename for a vtp file that contains the Dirichlet boundary condition nodes and their values, set to None to disable
"nAdditionalFieldVariables": 0,
"additionalSlotNames": [],
"CellML" : {
"modelFilename": variables.cellml_file, # input C++ source file or cellml XML file
"statesInitialValues": [], # if given, the initial values for the the states of one instance
#"statesInitialValues": [-81.5938, -81.5407, 7.166, 150.916, 6.03857, 12.6618, 131.577, 132.928, 0.00751472, 0.996183, 0.0292367, 0.569413, 0.731601, 0.0075721, 0.996084, 0.0294341, 0.567101, 0.730931, 1.75811e-06, 5.75735e-06, 7.07019e-06, 3.85884e-06, 7.89791e-07, 0.879053, 0.115147, 0.00565615, 0.000123483, 1.01094e-06, -916.582, 0.0284792, 56.5564, 0.0284779, 1687.31, 2.98725, 615, 615, 811, 811, 1342.65, 17807.7, 0.107772, 0.10777, 7243.03, 7243.03, 756.867, 756.867, 956.975, 956.975, 0.0343398, 0.0102587, 0.0136058, 0.0314258, 0.0031226, 0.00249808, 0.223377, 0.264145, 1.74046e-06],
"initializeStatesToEquilibrium": False, # if the equilibrium values of the states should be computed before the simulation starts
"initializeStatesToEquilibriumTimestepWidth": 1e-4, # if initializeStatesToEquilibrium is enable, the timestep width to use to solve the equilibrium equation
# optimization parameters
"optimizationType": "vc", # "vc", "simd", "openmp" type of generated optimizated source file
"approximateExponentialFunction": True, # if optimizationType is "vc", whether the exponential function exp(x) should be approximate by (1+x/n)^n with n=1024
"compilerFlags": "-fPIC -O3 -march=native -shared ", # compiler flags used to compile the optimized model code
"maximumNumberOfThreads": 0, # if optimizationType is "openmp", the maximum number of threads to use. Default value 0 means no restriction.
# stimulation callbacks
#"libraryFilename": "cellml_simd_lib.so", # compiled library
#"setParametersFunction": set_parameters, # callback function that sets parameters like stimulation current
#"setParametersCallInterval": int(1./stimulation_frequency/dt_0D), # set_parameters should be called every 0.1, 5e-5 * 1e3 = 5e-2 = 0.05
#"setSpecificParametersFunction": set_specific_parameters, # callback function that sets parameters like stimulation current
#"setSpecificParametersCallInterval": int(1./stimulation_frequency/dt_0D), # set_parameters should be called every 0.1, 5e-5 * 1e3 = 5e-2 = 0.05
"setSpecificStatesFunction": set_specific_states, # callback function that sets states like Vm, activation can be implemented by using this method and directly setting Vm values, or by using setParameters/setSpecificParameters
"setSpecificStatesCallInterval": 0, # int(1./stimulation_frequency/dt_0D), # set_specific_states should be called every 0.1, 5e-5 * 1e3 = 5e-2 = 0.05
"setSpecificStatesCallFrequency": variables.get_specific_states_call_frequency(fiber_no, motor_unit_no), # set_specific_states should be called variables.stimulation_frequency times per ms
"setSpecificStatesFrequencyJitter": variables.get_specific_states_frequency_jitter(fiber_no, motor_unit_no), # random value to add or substract to setSpecificStatesCallFrequency every stimulation, this is to add random jitter to the frequency
"setSpecificStatesRepeatAfterFirstCall": 0.01, # simulation time span for which the setSpecificStates callback will be called after a call was triggered
"setSpecificStatesCallEnableBegin": variables.get_specific_states_call_enable_begin(fiber_no, motor_unit_no),# [ms] first time when to call setSpecificStates
"additionalArgument": fiber_no,
#"handleResultFunction": handleResult,
#"handleResultCallInterval": 2e3,
# parameters to the cellml model
"mappings": variables.mappings,
"parametersInitialValues": variables.parameters_initial_values,
"meshName": "MeshFiber_{}".format(fiber_no),
"stimulationLogFilename": "out/stimulation.log",
},
},
} for fiber_in_subdomain_coordinate_y in range(n_fibers_in_subdomain_y(subdomain_coordinate_y)) \
for fiber_in_subdomain_coordinate_x in range(n_fibers_in_subdomain_x(subdomain_coordinate_x)) \
for fiber_no in [get_fiber_no(subdomain_coordinate_x, subdomain_coordinate_y, fiber_in_subdomain_coordinate_x, fiber_in_subdomain_coordinate_y)] \
for motor_unit_no in [get_motor_unit_no(fiber_no)]],
}
},
"Term2": { # Diffusion
"MultipleInstances": {
"nInstances": n_fibers_in_subdomain_x(subdomain_coordinate_x)*n_fibers_in_subdomain_y(subdomain_coordinate_y),
"instances":
[{
"ranks": list(range(variables.n_subdomains_z)), # these rank nos are local nos to the outer instance of MultipleInstances, i.e. from 0 to number of ranks in z direction
"CrankNicolson" : {
"initialValues": [],
#"numberTimeSteps": 1,
"timeStepWidth": variables.dt_1D, # 1e-5
"timeStepWidthRelativeTolerance": 1e-10,
"logTimeStepWidthAsKey": "dt_1D",
"durationLogKey": "duration_1D",
"timeStepOutputInterval": 1e4,
"timeStepWidthRelativeTolerance": 1e-10,
"dirichletBoundaryConditions": {}, # old Dirichlet BC that are not used in FastMonodomainSolver: {0: -75.0036, -1: -75.0036},
"dirichletOutputFilename": None, # filename for a vtp file that contains the Dirichlet boundary condition nodes and their values, set to None to disable
"inputMeshIsGlobal": True,
"solverName": "implicitSolver",
"nAdditionalFieldVariables": 1, # for stress that will be transferred from CellML and then written with output writer
"additionalSlotNames": [],
"checkForNanInf": False,
"FiniteElementMethod" : {
"inputMeshIsGlobal": True,
"meshName": "MeshFiber_{}".format(fiber_no),
"prefactor": get_diffusion_prefactor(fiber_no, motor_unit_no), # resolves to Conductivity / (Am * Cm)
"solverName": "implicitSolver",
"slotName": "",
},
"OutputWriter" : [
#{"format": "Paraview", "outputInterval": int(1./variables.dt_1D*variables.output_timestep), "filename": "out/fiber_"+str(fiber_no), "binary": True, "fixedFormat": False, "combineFiles": True},
#{"format": "Paraview", "outputInterval": 1./variables.dt_1D*variables.output_timestep, "filename": "out/fiber_"+str(i)+"_txt", "binary": False, "fixedFormat": False},
#{"format": "ExFile", "filename": "out/fiber_"+str(i), "outputInterval": 1./variables.dt_1D*variables.output_timestep, "sphereSize": "0.02*0.02*0.02"},
#{"format": "PythonFile", "filename": "out/fiber_"+str(i), "outputInterval": 1./variables.dt_1D*variables.output_timestep, "binary":True, "onlyNodalValues":True},
]
},
} for fiber_in_subdomain_coordinate_y in range(n_fibers_in_subdomain_y(subdomain_coordinate_y)) \
for fiber_in_subdomain_coordinate_x in range(n_fibers_in_subdomain_x(subdomain_coordinate_x)) \
for fiber_no in [get_fiber_no(subdomain_coordinate_x, subdomain_coordinate_y, fiber_in_subdomain_coordinate_x, fiber_in_subdomain_coordinate_y)] \
for motor_unit_no in [get_motor_unit_no(fiber_no)]],
"OutputWriter" : variables.output_writer_fibers,
},
},
}
} if (subdomain_coordinate_x,subdomain_coordinate_y) == (variables.own_subdomain_coordinate_x,variables.own_subdomain_coordinate_y) else None
for subdomain_coordinate_y in range(variables.n_subdomains_y)
for subdomain_coordinate_x in range(variables.n_subdomains_x)]
},
}
# add entry for when fast_fibers with "RepeatedCall" as top solver is used
config["RepeatedCall"] = {
"timeStepWidth": variables.output_timestep, # 1e-1
"logTimeStepWidthAsKey": "dt_output_timestep",
"durationLogKey": "duration_repeated_call",
"timeStepOutputInterval": 1,
"endTime": variables.end_time,
"MultipleInstances": copy.deepcopy(config["MultipleInstances"]),
"fiberDistributionFile": variables.fiber_distribution_file, # for FastMonodomainSolver, e.g. MU_fibre_distribution_3780.txt
"firingTimesFile": variables.firing_times_file, # for FastMonodomainSolver, e.g. MU_firing_times_real.txt
"onlyComputeIfHasBeenStimulated": True, # only compute fibers after they have been stimulated for the first time
"disableComputationWhenStatesAreCloseToEquilibrium": True, # optimization where states that are close to their equilibrium will not be computed again
"valueForStimulatedPoint": variables.vm_value_stimulated, # to which value of Vm the stimulated node should be set
}
# loop over instances (fibers)
for i in range(len(config["RepeatedCall"]["MultipleInstances"]["instances"])):
#config["RepeatedCall"]["MultipleInstances"]["instances"][i]["StrangSplitting"]["endTime"] = variables.output_timestep
if config["RepeatedCall"]["MultipleInstances"]["instances"][i] is None:
continue
# loop over output writers
if config["RepeatedCall"]["MultipleInstances"]["instances"][i]:
for j in range(len(config["RepeatedCall"]["MultipleInstances"]["instances"][i]["StrangSplitting"]["Term2"]["MultipleInstances"]["OutputWriter"])):
# set outputInterval to 1
config["RepeatedCall"]["MultipleInstances"]["instances"][i]["StrangSplitting"]["Term2"]["MultipleInstances"]["OutputWriter"][j]["outputInterval"] = 1
#print(config["RepeatedCall"]["MultipleInstances"]["instances"][0]["StrangSplitting"]["Term1"]["MultipleInstances"]["instances"][0]["Heun"]["CellML"]);
# stop timer and calculate how long parsing lasted
if rank_no == 0:
t_stop_script = timeit.default_timer()
print("Python config parsed in {:.1f}s.".format(t_stop_script - t_start_script))
|
python
|
import random
from io import StringIO
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style("darkgrid")
def str_convert_float(df):
columns = df.select_dtypes(exclude="number").columns
for col_name in columns:
unique_values = df[col_name].unique()
for i in range(len(unique_values)):
df.loc[df[col_name] == unique_values[i], col_name] = i
def train_test_split(df, test_size):
if isinstance(test_size, float):
test_size = round(test_size * len(df))
indices = df.index.tolist()
test_indices = random.sample(population=indices, k=test_size)
test_df = df.loc[test_indices]
train_df = df.drop(test_indices)
return train_df, test_df
def generate_data(n, specific_outliers=None, n_random_outliers=None):
# create data
if specific_outliers is None:
specific_outliers = []
data = np.random.random(size=(n, 2)) * 10
data = data.round(decimals=1)
df = pd.DataFrame(data, columns=["x", "y"])
df["label"] = df.x <= 5
# add specific outlier data points
for outlier_coordinates in specific_outliers:
df = df.append({"x": outlier_coordinates[0],
"y": outlier_coordinates[1],
"label": True},
ignore_index=True)
# add random outlier data points
if n_random_outliers:
outlier_x_values = (6 - 5) * np.random.random(size=n_random_outliers) + 5 # value between 5 and 6
outlier_y_values = np.random.random(size=n_random_outliers) * 10
df_outliers = pd.DataFrame({"x": outlier_x_values.round(decimals=2),
"y": outlier_y_values.round(decimals=2),
"label": [True] * n_random_outliers})
df = df.append(df_outliers, ignore_index=True)
return df
def plot_decision_boundaries(tree, x_min, x_max, y_min, y_max):
color_keys = {True: "orange", False: "blue"}
# recursive part
if isinstance(tree, dict):
question = list(tree.keys())[0]
yes_answer, no_answer = tree[question]
feature, _, value = question.split()
if feature == "x":
plot_decision_boundaries(yes_answer, x_min, float(value), y_min, y_max)
plot_decision_boundaries(no_answer, float(value), x_max, y_min, y_max)
else:
plot_decision_boundaries(yes_answer, x_min, x_max, y_min, float(value))
plot_decision_boundaries(no_answer, x_min, x_max, float(value), y_max)
# "tree" is a leaf
else:
plt.fill_between(x=[x_min, x_max], y1=y_min, y2=y_max, alpha=0.2, color=color_keys[tree])
def create_plot(df, tree=None, title=None):
sns.lmplot(data=df, x="x", y="y", hue="label", fit_reg=False, height=4, aspect=1.5, legend=False)
plt.title(title)
if tree or tree is False: # root of the tree might just be a leave with "False"
x_min, x_max = round(df.x.min()), round(df.x.max())
y_min, y_max = round(df.y.min()), round(df.y.max())
plot_decision_boundaries(tree, x_min, x_max, y_min, y_max)
def tree_graph(tree, class_names):
out_graph = StringIO()
out_graph.write('digraph Tree {\n')
# Specify node aesthetics
out_graph.write('node [shape=box] ;\n')
# Specify graph & edge aesthetics
out_graph.write('edge [fontname=helvetica] ;\n')
tree_graph_recurse(tree, out_graph, class_names)
out_graph.write("}")
return out_graph.getvalue()
def tree_graph_recurse(tree, out_graph, class_names, parent=None):
question = list(tree.keys())[0]
node_id = tree['node_id']
left_child, right_child = tree[question]
out_graph.write('%d [label="%s"] ;\n' % (node_id, question))
# if not isinstance(left_child, dict):
# out_graph.write('\nclass = %s' % left_child)
# if not isinstance(right_child, dict):
# out_graph.write('\nclass = %s' % right_child)
if parent is not None:
# Add edge to parent
out_graph.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
out_graph.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_graph.write('45, headlabel="True"]')
else:
out_graph.write('45, headlabel="False"]')
out_graph.write(' ;\n')
else:
global max_node_id
max_node_id = 999
if isinstance(left_child, dict):
tree_graph_recurse(left_child, out_graph, class_names, node_id)
else:
out_graph.write('%d [label="class = %s"] ;\n' % (max_node_id, class_names[left_child]))
out_graph.write('%d -> %d ;\n' % (node_id, max_node_id))
max_node_id += 1
if isinstance(right_child, dict):
tree_graph_recurse(right_child, out_graph, class_names, node_id)
else:
out_graph.write('%d [label="class = %s"] ;\n' % (max_node_id, class_names[right_child]))
out_graph.write('%d -> %d ;\n' % (node_id, max_node_id))
max_node_id += 1
|
python
|
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.project.vpn import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^addikepolicy$',
views.AddIKEPolicyView.as_view(), name='addikepolicy'),
url(r'^update_ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.UpdateIKEPolicyView.as_view(), name='update_ikepolicy'),
url(r'^addipsecpolicy$',
views.AddIPSecPolicyView.as_view(), name='addipsecpolicy'),
url(r'^update_ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.UpdateIPSecPolicyView.as_view(), name='update_ipsecpolicy'),
url(r'^addipsecsiteconnection$',
views.AddIPSecSiteConnectionView.as_view(),
name='addipsecsiteconnection'),
url(r'^update_ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.UpdateIPSecSiteConnectionView.as_view(),
name='update_ipsecsiteconnection'),
url(r'^addvpnservice$',
views.AddVPNServiceView.as_view(), name='addvpnservice'),
url(r'^update_vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.UpdateVPNServiceView.as_view(), name='update_vpnservice'),
url(r'^ikepolicy/(?P<ikepolicy_id>[^/]+)/$',
views.IKEPolicyDetailsView.as_view(), name='ikepolicydetails'),
url(r'^ipsecpolicy/(?P<ipsecpolicy_id>[^/]+)/$',
views.IPSecPolicyDetailsView.as_view(), name='ipsecpolicydetails'),
url(r'^vpnservice/(?P<vpnservice_id>[^/]+)/$',
views.VPNServiceDetailsView.as_view(), name='vpnservicedetails'),
url(r'^ipsecsiteconnection/(?P<ipsecsiteconnection_id>[^/]+)/$',
views.IPSecSiteConnectionDetailsView.as_view(),
name='ipsecsiteconnectiondetails'),
]
|
python
|
import json
from pyramid.view import view_config
@view_config(route_name='home')
def home(request):
# https://docs.pylonsproject.org/projects/pyramid/en/latest/api/request.html
# Response
response = request.response
response.content_type = 'application/json'
response.status_code = 200
# body
set_json_to_responce(response, {'project': 'BackHP'})
return response
def set_json_to_responce(response, json_data):
response = response
response.text = json.dumps(json_data)
return response
|
python
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
def btcSupplyAtBlock(b):
if b >= 33 * 210000:
return 20999999.9769
else:
reward = 50e8
supply = 0
y = 210000 # reward changes all y blocks
while b > y - 1:
supply = supply + y * reward
reward = int(reward / 2.0)
b = b - y
supply = supply + b * reward
return (supply + reward) / 1e8
if __name__ == "__main__":
block = 1000000 # you want the supply after which block?
print(btcSupplyAtBlock(block))
|
python
|
import sys,os
import gdal
from gdalconst import *
# Get georeferencing information from a raster file and print to text file
src = sys.argv[1]
fname_out = sys.argv[2]
ds = gdal.Open(src, GA_ReadOnly)
if ds is None:
print('Content-Type: text/html\n')
print('Could not open ' + src)
sys.exit(1)
# Get the geotransform, the georeferencing, and the dimensions of the raster to match
transform = ds.GetGeoTransform()
wkt = ds.GetProjection()
rows = ds.RasterYSize
cols = ds.RasterXSize
ulx = transform[0]
uly = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
lrx = ulx + (cols * pixelWidth)
lry = uly + (rows * pixelHeight)
f_out = open(fname_out, 'w')
f_out.write(str(pixelWidth) + '\n')
f_out.write(str(pixelHeight) + '\n')
f_out.write(str(ulx) + '\n')
f_out.write(str(uly) + '\n')
f_out.write(str(lrx) + '\n')
f_out.write(str(lry))
f_out.close()
|
python
|
"""
Base functions and classes for linked/joined operations.
"""
from typing import Mapping, Callable, Union, MutableMapping, Any
import operator
from inspect import signature
from functools import partialmethod
EmptyMappingFactory = Union[MutableMapping, Callable[[], MutableMapping]]
BinaryOp = Callable[[Any, Any], Any]
Neutral = object()
def key_aligned_val_op(
x: Mapping,
y: Mapping,
op: BinaryOp,
dflt_val_for_x=Neutral,
dflt_val_for_y=Neutral,
empty_mapping_factory: EmptyMappingFactory = dict,
):
"""
>>> from operator import add, sub, mul, truediv, and_, or_, xor
>>> x = {'a': 8, 'b': 4}
>>> y = {'b': 2, 'c': 1}
If no default vals are given, only those keys that are in both mappings will be in the output.
>>> key_aligned_val_op(x, y, add)
{'b': 6}
If we specify a default for ``x`` then all items of ``y`` can be used.
>>> key_aligned_val_op(x, y, add, dflt_val_for_x=0)
{'b': 6, 'c': 1}
If we specify a default for ``y`` then all items of ``x`` can be used.
>>> key_aligned_val_op(x, y, add, dflt_val_for_y=0)
{'a': 8, 'b': 6}
"""
result_dict = empty_mapping_factory()
for k, v1 in x.items():
v2 = y.get(k, dflt_val_for_y)
if v2 is not Neutral:
result_dict[k] = op(v1, v2)
# else, don't include the key
if dflt_val_for_x is not Neutral:
for k in set(y).difference(result_dict):
result_dict[k] = op(dflt_val_for_x, y[k])
return result_dict
def key_aligned_val_op_with_forced_defaults(
x: Mapping,
y: Mapping,
op: BinaryOp,
dflt_val_for_x,
dflt_val_for_y,
empty_mapping_factory: EmptyMappingFactory = dict,
) -> Mapping:
"""Apply an operator to the key-aligned values of two dictionaries, using specified defaults for each dictionary.
The output's keys will be the union of the keys of the input dictionaries.
The type of the output will be the type of that is returned by empty_mapping_factory.
>>> from operator import add, sub, mul, truediv, and_, or_, xor
>>> x = {'a': 8, 'b': 4}
>>> y = {'b': 2, 'c': 1}
>>> key_aligned_val_op_with_forced_defaults(x, y, add, 0, 0)
{'a': 8, 'b': 6, 'c': 1}
>>> key_aligned_val_op_with_forced_defaults(x, y, sub, 0, 0)
{'a': 8, 'b': 2, 'c': -1}
>>> key_aligned_val_op_with_forced_defaults(x, y, mul, 1, 1)
{'a': 8, 'b': 8, 'c': 1}
>>> key_aligned_val_op_with_forced_defaults(x, y, truediv, 1, 1)
{'a': 8.0, 'b': 2.0, 'c': 1.0}
>>> x = {'a': [8], 'b': [4]}
>>> y = {'b': [2], 'c': [1]}
>>> key_aligned_val_op_with_forced_defaults(x, y, add, [], [])
{'a': [8], 'b': [4, 2], 'c': [1]}
>>> x = {'a': True, 'b': False}
>>> y = {'b': True, 'c': False}
>>> key_aligned_val_op_with_forced_defaults(x, y, and_, True, True)
{'a': True, 'b': False, 'c': False}
>>> key_aligned_val_op_with_forced_defaults(x, y, or_, True, True)
{'a': True, 'b': True, 'c': True}
>>> key_aligned_val_op_with_forced_defaults(x, y, xor, True, True)
{'a': False, 'b': True, 'c': True}
"""
result_dict = empty_mapping_factory()
for k, v1 in x.items():
v2 = y.get(k, dflt_val_for_y)
result_dict[k] = op(v1, v2)
for k in set(y).difference(result_dict):
result_dict[k] = op(dflt_val_for_x, y[k])
return result_dict
def map_op_val(x: Mapping, val, op: BinaryOp, items_to_mapping=dict):
"""Apply operation op(v, val) to every value v of mapping x.
>>> from operator import add, sub, mul, truediv
>>> x = dict(a=2, b=3)
>>> map_op_val(x, 2, add)
{'a': 4, 'b': 5}
>>> map_op_val(x, 2, sub)
{'a': 0, 'b': 1}
>>> map_op_val(x, 2, mul)
{'a': 4, 'b': 6}
>>> map_op_val(x, 2, truediv)
{'a': 1.0, 'b': 1.5}
"""
return items_to_mapping(((k, op(v, val)) for k, v in x.items()))
startswith_dunder = lambda x: x.startswith('__')
def gen_attrname_func_for_module(module, name_filter=startswith_dunder):
module_obj_names_that_are_particular_to_module = set(dir(module)) - set(dir(dict))
module_obj_names = filter(
name_filter, module_obj_names_that_are_particular_to_module
)
for name in module_obj_names:
func = getattr(module, name)
if callable(func):
yield name, func
operator_name_funcs_1 = set(
filter(
lambda name_func: len(signature(name_func[1]).parameters) == 1,
gen_attrname_func_for_module(operator),
)
)
operator_name_funcs_2 = set(
filter(
lambda name_func: len(signature(name_func[1]).parameters) == 2,
gen_attrname_func_for_module(operator),
)
)
class OperableMapping(dict):
"""
>>> d = OperableMapping({'a': 8, 'b': 4})
>>> dd = OperableMapping(b=2, c=1) # you can make one this way too
>>> d + dd
{'a': 8, 'b': 6, 'c': 1}
>>> d - dd
{'a': 8, 'b': 2, 'c': -1}
>>> d * dd
{'a': 8, 'b': 8, 'c': 1}
>>> d / dd
{'a': 8.0, 'b': 2.0, 'c': 1.0}
The result of the operations are themselves DictWithOps, so you can compose several.
>>> d * (dd + d) / d # notice that this is equivalent to d + dd (but with numbers cast to floats)
{'a': 8.0, 'b': 6.0, 'c': 1.0}
You can also use values (which will have the effect of being broadcast to all values of the mapping.
>>> d + 1
{'a': 9, 'b': 5}
>>> d * 10
{'a': 80, 'b': 40}
"""
def __add__(self, y):
if isinstance(y, dict):
return key_aligned_val_op_with_forced_defaults(
self, y, operator.__add__, 0, 0, __class__
)
else:
return map_op_val(self, y, operator.__add__)
def __sub__(self, y):
if isinstance(y, dict):
return key_aligned_val_op_with_forced_defaults(
self, y, operator.__sub__, 0, 0, __class__
)
else:
return map_op_val(self, y, operator.__sub__)
def __mul__(self, y):
if isinstance(y, dict):
return key_aligned_val_op_with_forced_defaults(
self, y, operator.__mul__, 1, 1, __class__
)
else:
return map_op_val(self, y, operator.__mul__)
def __truediv__(self, y):
if isinstance(y, dict):
return key_aligned_val_op_with_forced_defaults(
self, y, operator.__truediv__, 1, 1, __class__
)
else:
return map_op_val(self, y, operator.__truediv__)
# TODO: Inject the methods inside the class itself?
class OperableMappingNoDflts(dict):
"""OperableMapping with ALL operators of operator module (but without defaults)
>>> from linkup.base import *
>>> d = OperableMappingNoDflts({'a': 8, 'b': 4, 'c': 3})
>>> dd = OperableMappingNoDflts(b=2, c=1, d=0) # you can make one this way too
>>>
>>> d + 1
{'a': 9, 'b': 5, 'c': 4}
>>> d / dd
{'b': 2.0, 'c': 3.0}
>>> (d + 1) / dd
{'b': 2.5, 'c': 4.0}
"""
def _binary_operator_method_template(self, y, op, factory):
""""""
if isinstance(y, Mapping):
return key_aligned_val_op(self, y, op, empty_mapping_factory=factory)
else:
return map_op_val(self, y, op, factory)
# TODO: Make unary tools and inject to OperableMappingNoDflts
# for name, func in operator_name_funcs_1:
# setattr(OperableMappingNoDflts, name, partialmethod(_binary_operator_method_template,
# op=func, factory=OperableMappingNoDflts))
for name, func in operator_name_funcs_2:
setattr(
OperableMappingNoDflts,
name,
partialmethod(
_binary_operator_method_template, op=func, factory=OperableMappingNoDflts,
),
)
|
python
|
import os
import re
import boto3
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
def maybe_makedirs(path, exist_ok=True):
"""Don't mkdir if it's a path on S3"""
if path.startswith('s3://'):
return
os.makedirs(path, exist_ok=exist_ok)
def smart_ls(path):
"""Get a list of files from `path`, either S3 or local."""
if path.startswith('s3://'):
return _smart_ls_s3(path)
else:
return _smart_ls_local(path)
def _smart_ls_s3(path):
bucket_name, prefix = re.match(r"s3:\/\/(.+?)\/(.+)", path).groups()
if not prefix.endswith('/'):
prefix += '/'
results = []
paginator = s3_client.get_paginator('list_objects')
for resp in paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter='/'):
if not 'CommonPrefixes' in resp:
break
for common_prefix in resp['CommonPrefixes']:
dirname = common_prefix['Prefix'][len(prefix):] # strip root prefix
dirname = dirname.rstrip('/')
results.append(dirname)
return results
def _smart_ls_local(path):
if os.path.exists(path):
return os.listdir(path)
return []
|
python
|
from sqlalchemy import Column, TIMESTAMP, Float, Integer, ForeignKey, String
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta, declared_attr
'''SQLAlchemy models used for representing the database schema as Python objects'''
Base: DeclarativeMeta = declarative_base()
class Watered(Base):
'''Model to hold the data for when the plant was last watered.'''
__tablename__: str = 'watered_tab'
id: Column = Column(Integer, primary_key=True, autoincrement=True)
date_watered: Column = Column(TIMESTAMP, nullable=False)
@declared_attr
def plant_id(cls) -> Column:
return Column(Integer, ForeignKey('plants_tab.id'), primary_key=True)
def __repr__(self) -> str:
return f'Watered(plant={self.plant_id}, date_watered={self.date_watered})'
class Schedule(Base):
'''Model to hold the values collected from the moisture sensors for a given plant.'''
__tablename__: str = 'schedule_tab'
id: Column = Column(Integer, primary_key=True, autoincrement=True)
datetime: Column = Column(TIMESTAMP, nullable=False)
water_level: Column = Column(Float, nullable=False)
@declared_attr
def plant_id(cls) -> Column:
return Column(Integer, ForeignKey('plants_tab.id'), primary_key=True)
def __repr__(self) -> str:
return f'Schedule(plant={self.plant_id}, datetime={self.datetime})'
class Plant(Base):
'''Model to represent a plant.'''
__tablename__: str = 'plants_tab'
id: Column = Column(Integer, primary_key=True, autoincrement=True)
room: Column = Column(String, nullable=False)
name: Column = Column(String, nullable=False)
def __repr__(self) -> str:
return f'Plant(id={self.id}, name={self.name}, room={self.room})'
|
python
|
import math
from .settings import pi, PI_HALF, nearly_eq
from .point import Point
from .basic import GeometryEntity
class LinearEntity(GeometryEntity):
def __new__(cls, p1, p2=None, **kwargs):
if p1 == p2:
raise ValueError(
"%s.__new__ requires two unique Points." % cls.__name__)
if len(p1) != len(p2):
raise ValueError(
"%s.__new__ requires two Points of equal dimension." % cls.__name__)
return GeometryEntity.__new__(cls, p1, p2, **kwargs)
def __len__(self):
"""
Treat a line as a two item container with length '2'.
:return: 2
"""
return 2
def __contains__(self, other):
"""Return a definitive answer or else raise an error if it cannot
be determined that other is on the boundaries of self."""
result = self.contains(other)
if result is not None:
return result
else:
return ("can't decide whether '%s' contains '%s'" % (self, other))
def __eq__(self, other):
result = self.equals(other)
if result is not None:
return result
return ("can't decide whether '%s' equals '%s'" % (self, other))
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.args)
def contains(self, other):
raise NotImplementedError()
def _span_test(self, other):
"""Test whether the point `other` lies in the positive span of `self`."""
if self.p1 == other:
return 0
rel_pos = other - self.p1
d = self.direction
if d.dot(rel_pos) > 0:
return 1
return -1
@property
def ambient_dimention(self):
"""Return the dimention of self line"""
return len(self.p1)
def angle_between(self, line):
"""
get the angle between 'self' and 'line' in radians
:param line: Line
:return: angle in radians
"""
a = self.direction
b = line.direction
return math.acos(a.dot(b)/(abs(a)*abs(b)))
def smallest_angle_between(self, line):
"""Return the smallest angle between two lines.
if angle > pi/2, return angle = angle - pi/2
"""
angle = self.angle_between(line)
if angle > PI_HALF:
return angle - PI_HALF
else:
return angle
def is_parallel(self, line):
"""
whether two linear are parallel
:param line: Line
"""
# 这里的平行包含共线,使用时需注意
if not isinstance(self, LinearEntity) and not isinstance(line, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
a = self.direction
b = line.direction
if isinstance(line, LinearEntity2D):
return a.cross(b) == 0
# 三维直线判断平行
raise NotImplementedError()
def is_perpendicular(self, line):
"""
whether two linear are perpendicular
:param line: Line
"""
if not isinstance(line, LinearEntity) and not isinstance(self, LinearEntity):
raise TypeError('Must pass only LinearEntity objects')
return self.direction.dot(line.direction) == 0
def is_intersection(self, line):
"""Are lines intersect?
Parameters
==========
other : Point or LinearEntity
"""
return not self.is_parallel(line)
def is_similar(self, line):
"""
Return True if self and other are contained in the same line.
:param line: Line
:return:
"""
return line in self
@property
def direction(self):
"""Return the direction of line"""
return self.p2 - self.p1
@property
def normal_direction(self):
"""Return the normal direction of a line
The normal direction here is the right hand direction
"""
d = self.direction
# to get the left hand direction: return Point(-d.y, d.x)
return Point(d.y, -d.x)
@property
def p1(self):
"""The first point of the line
:return: Point object
"""
return self.args[0]
@property
def p2(self):
return self.args[1]
@property
def points(self):
return (self.p1, self.p2)
@property
def unit(self):
"""Translates the vector self to the origin and scales the length
of the vector to 1.
:return Returns a Point() whose distance from the origin is 1.
"""
dis = self.direction
return dis * (1/abs(dis))
def parallel_line(self, pt):
"""
Create a new Line parallel to this linear entity which passes
through the point `p`.
:param pt: Point
:return:
"""
pt = Point._convert(pt)
return Line(pt, pt + self.direction)
def perpendicular_line(self, pt):
"""Create a new line that perpendicular to self
and through the point pt
"""
pt = Point._convert(pt)
return Line(pt, pt + self.normal_direction)
def perpendicular_segment(self, p):
"""Create a perpendicular line segment from `p` to this line.
The segment's other point on the line.
"""
p = Point._convert(p)
if p in self:
return ("the point {} on line".format(p))
p2 = self.projection(p)
return Segment(p, p2)
def projection(self, other):
"""Project a point, line, ray, or segment onto this linear entity.
:param other: point, line, ray, segment
:return: projection : Point or LinearEntity (Line, Ray, Segment)
"""
other = Point._convert(other)
# caution : project is not from (0, 0), so must add self.p1.
return (other - self.p1).project(self.direction) + self.p1
def are_concurrent(self, *lines):
"""Are two or more linear entities are concurrent?
Concurrent means lines all intersect at a single point.
"""
raise NotImplementedError()
def intersection(self, other):
def intersect_parallel_segments(seg1, seg2):
if seg1.contains(seg2):
return [seg2]
if seg2.contains(seg1):
return [seg1]
# direct the segments so they're oriented the same way
if seg1.direction.dot(seg2.direction) < 0:
seg2 = Segment(seg2.p2, seg2.p1)
# order the segments so seg1 is "behind" seg2
if seg1._span_test(seg2.p1) < 0:
seg1, seg2 = seg2, seg1
if seg2._span_test(seg1.p2) < 0:
return []
return [Segment(seg2.p1, seg1.p2)]
def intersect_parallel_rays(ray1, ray2):
if ray1.direction.dot(ray2.direction) > 0:
return [ray2] if ray1._span_test(ray2.p1) else [ray1]
st = ray1._span_test(ray2.p1)
if st > 0:
return [Segment(ray1.p1, ray2.p1)]
elif st == 0:
return [ray1.p1]
return []
def intersect_parallel_segment_and_ray(seg, ray):
st1, st2 = ray._span_test(seg.p1), ray._span_test(seg.p2)
if st1 < 0 and st2 < 0:
return []
elif st1 >= 0 and st2 >= 0:
return [seg]
elif st1 >= 0 and st2 < 0:
return [Segment(ray.p1, seg.p1)]
elif st1 <= 0 and st2 > 0:
return [Segment(ray.p1, seg.p2)]
if not isinstance(other, GeometryEntity):
other = Point._convert(other)
if self.contains(other):
return [other]
else:
return []
if isinstance(other, Point):
if self.contains(other):
return [other]
return []
if isinstance(other, LinearEntity):
if self.p1.is_collinear(self.p2, other.p1, other.p2):
if isinstance(other, Line):
return [self]
if isinstance(self, Line):
return [other]
if isinstance(other, Segment) and isinstance(self, Segment):
return intersect_parallel_segments(self, other)
if isinstance(self, Ray) and isinstance(other, Ray):
return intersect_parallel_rays(self, other)
if isinstance(self, Segment) and isinstance(other, Ray):
return intersect_parallel_segment_and_ray(self, other)
if isinstance(self, Ray) and isinstance(other, Segment):
return intersect_parallel_rays(other, self)
if self.is_parallel(other):
return []
else:
l1 = Line(self.p1, self.p2)
l2 = Line(other.p1, other.p2)
if isinstance(l1, LinearEntity2D) and isinstance(l2, LinearEntity2D):
a1, b1, c1 = l2.coefficients
a2, b2, c2 = l1.coefficients
d = a1*b2 - a2*b1
p_inter = Point((b1*c2 - b2*c1)/d, (a2*c1 - a1*c2)/d)
if isinstance(self, Line2D) and isinstance(other, Line2D):
return [p_inter]
elif isinstance(self, Line2D) and isinstance(other, Ray2D):
if other._span_test(p_inter) >= 0:
return [p_inter]
return []
elif isinstance(self, Line2D) and isinstance(other, Segment2D):
if p_inter in other:
return [p_inter]
return []
elif isinstance(self, Segment2D) and isinstance(other, Segment2D):
if p_inter in self and p_inter in other:
return [p_inter]
return []
elif isinstance(self, Segment2D) and isinstance(other, Ray2D):
if p_inter in self and other._span_test(p_inter) >=0:
return [p_inter]
return []
elif isinstance(self, Segment2D) and isinstance(other, Line2D):
if p_inter in self:
return [p_inter]
return []
elif isinstance(self, Ray2D) and isinstance(other, Segment2D):
if p_inter in other and self._span_test(p_inter) >= 0:
return [p_inter]
return []
elif isinstance(self, Ray2D) and isinstance(other, Ray2D):
if other._span_test(p_inter) >= 0 and self._span_test(p_inter) >= 0:
return [p_inter]
return []
elif isinstance(self, Ray2D) and isinstance(other, Line2D):
if self._span_test(p_inter) >=0:
return [p_inter]
return []
return []
#三维有待补充
return other.intersection(self)
class Line(LinearEntity):
"""A 2D or 3D Line. A n-dimensional line in the future
"""
def __new__(cls, p1, p2=None, **kwargs):
if isinstance(p1, LinearEntity):
if p2:
raise ValueError(r"p1 is a Linear Entity, can't have p2")
dim = len(p1.p1)
else:
p1, p2 = Point._convert(p1), Point._convert(p2)
dim = len(p1)
if dim == 2:
return Line2D(p1, p2, **kwargs)
elif dim == 3:
return Line3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
def contains(self, item):
if not isinstance(item, GeometryEntity):
item = Point._convert(item)
if isinstance(item, Point):
return self.p1.is_collinear(item, self.p2)
if isinstance(item, Line):
if not self.p1.is_collinear(item.p1, item.p2):
return False
return self.p2.is_collinear(item.p1, item.p2)
@property
def length(self):
return ("A line doesn't have length.")
def equals(self, other):
"""Return whether two lines are collinear"""
if not isinstance(other, Line):
return False
return self.p1.is_collinear(self.p2, other.p1, other.p2)
def distance(self, p):
"""
Distance between a line and a point
:param p: Point-(0, 1)
:return:
"""
p = Point._convert(p)
if self.contains(p):
return 0.0
if isinstance(self, Line2D):
d = self.direction
A_point = p - self.p1
n = d.cross(A_point) / self.p1.distance(self.p2)
return abs(n)
# 三维有待补充
return NotImplementedError
class LinearEntity2D(LinearEntity):
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.args
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
@property
def slope(self):
"""
Get the slope of a line. if the line is vertical, return "line is vertical"
:return: float. slope of a line
"""
d_x, d_y = self.direction.args
if d_x == 0:
return ("line is vertical!", self.is_vertical)
return d_y/d_x
@property
def normal(self):
"""
Get the normal line of self(a line)
:return: the normal line of a line
"""
d = self.direction
return Line([-d.y, d.x], [d.y, -d.x])
@property
def is_vertical(self):
"""
Determine whether the line is vertical.
:return: True or False
"""
return self.p1.x == self.p2.x
@property
def is_horizontal(self):
"""
Determine whether the line is parallel.
:return: True or False
"""
return self.p1.y == self.p2.y
class Line2D(LinearEntity2D, Line):
def __new__(cls, p1, p2=None, **kwargs):
"""No need to judge p1, p2. Then have been judged in class LinearEntity
No judgement will accelerate several even 10 times!!!
"""
return LinearEntity.__new__(cls, p1, p2, **kwargs)
@property
def coefficients(self):
"""The coefficients (`a`, `b`, `c`) for a standard linear equation `ax + by + c = 0`."""
p1, p2 = self.args
p1_x, p1_y = p1.args
p2_x, p2_y = p2.args
if p1_x == p2_x:
return (1, 0, -p1_x)
elif p1_y == p2_y:
return (0, 1, -p1_y)
return (p1_y - p2_y, p2_x - p1_x, p1_x*p2_y - p1_y*p2_x)
def equation(self, x='x', y='y'):
"""Return the equation of 'ax + by +c'
:return: equation that data type is string.
"""
# I think this function is useless.
# The function coefficients() is enough
# But I written it to keep consistent with sympy
a, b, c = self.coefficients
if b < 0 & c < 0:
return ("{0} {1} {2}".
format(str(a) + x, str(b) + y, str(c)))
elif b < 0:
return ("{0} {1} + {2}".
format(str(a) + x, str(b) + y, str(c)))
elif c < 0:
return ("{0} + {1} {2}".
format(str(a) + x, str(b) + y, str(c)))
return ("{0} + {1} + {2}".
format(str(a) + x, str(b) + y, str(c)))
class Segment(LinearEntity):
def __new__(cls, p1, p2=None, **kwargs):
if isinstance(p1, LinearEntity):
if p2:
raise ValueError(r"p1 is a Linear Entity, can't have p2")
dim = len(p1.p1)
else:
p1, p2 = Point._convert(p1), Point._convert(p2)
dim = len(p1)
if dim == 2:
return Segment2D(p1, p2, **kwargs)
elif dim == 3:
return Segment3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
@property
def vertices(self):
return list(self.args)
def contains(self, item):
if not isinstance(item, GeometryEntity):
item = Point._convert(item)
if isinstance(item, Point):
if self.p1.is_collinear(item, self.p2):
d = self.direction
d1, d2 = self.p1 - item, self.p2 - item
return (abs(d) - abs(d1) - abs(d2)) == 0
if isinstance(item, Segment):
return item.p1 in self and item.p2 in self
return False
def equals(self, other):
if not isinstance(other, Segment):
raise ValueError("{} is not a segment".format(other))
sp1, sp2, op1, op2 = self.p1, self.p2, other.p1, other.p2
if op1 == sp1 and op2 == op2:
return True
if op1 == sp2 and op2 == sp1:
return True
return False
@property
def length(self):
return self.p1.distance(self.p2)
@property
def midpoint(self):
return self.p1.midpoint(self.p2)
def distance(self, p):
"""Return the shortest distance from a point to a segment"""
p = Point._convert(p)
d = self.direction
op1, op2 = self.p1 - p, self.p2 - p
sign1, sign2 = op1.dot(d), op2.dot(d)
if sign1 >= 0 and sign2 > 0:
return abs(op1)
if sign1 < 0:
if sign2 <= 0:
return abs(op2)
else:
return Line(self.p1, self.p2).distance(p)
return ("Can't get distace")
def perpendicular_bisector(self):
"""The perpendicular bisector of this segment."""
return self.perpendicular_line(self.midpoint)
class Segment2D(Segment, LinearEntity2D):
def __new__(cls, p1, p2, **kwargs):
p1 = Point._convert(p1)
p2 = Point._convert(p2)
if p1 == p2:
return p1
return LinearEntity.__new__(cls, p1, p2, **kwargs)
class Ray(LinearEntity):
def __new__(cls, p1, p2=None, **kwargs):
if isinstance(p1, LinearEntity):
if p2:
raise ValueError(r"p1 is a Linear Entity, can't have p2")
dim = len(p1.p1)
else:
p1, p2 = Point._convert(p1), Point._convert(p2)
dim = len(p1)
if dim == 2:
return Ray2D(p1, p2, **kwargs)
elif dim == 3:
return Ray3D(p1, p2, **kwargs)
return LinearEntity.__new__(cls, p1, p2, **kwargs)
@property
def source(self):
return self.p1
def equals(self, other):
if not isinstance(other, Ray):
return False
return self.source == other.source and other.p2 in self
def contains(self, other):
if not isinstance(other, GeometryEntity):
other = Point._convert(other)
if isinstance(other, Point):
if self.p1.is_collinear(self.p2, other):
return self.direction.dot(other - self.p1) >= 0
return False
if isinstance(other, Ray):
if self.p1.is_collinear(self.p2, other.p1, other.p2):
return self.direction.dot(other.direction) > 0
return False
if isinstance(other, Segment):
return other.p1 in self and other.p2 in self
class Ray2D(Ray, LinearEntity2D):
def __new__(cls, p1, p2, **kwargs):
return LinearEntity.__new__(cls, p1, p2, **kwargs)
class Line3D(Line):
def __new__(cls, p1, p2, **kwargs):
return LinearEntity.__new__(cls, p1, p2, **kwargs)
class Ray3D(Ray):
def __new__(cls, p1, p2, **kwargs):
return LinearEntity.__new__(cls, p1, p2, **kwargs)
class Segment3D(Segment):
def __new__(cls, p1, p2, **kwargs):
return LinearEntity.__new__(cls, p1, p2, **kwargs)
|
python
|
'''
This program reads the message data from an OVI backup file
(sqlite) create for a NOKIA N95 phone and writes them formated
as XML to stdout.
The backup file is usually located in:
C:\Users\<user>\AppData\Local\Nokia\Nokia Suite/ \
Messages/Database/msg_db.sqlite
Note: the exported XML only contains elements with a valid value (not None)
Example:
> python messages2xml.py msg_db.sqlite
<?xml version="1.0" ?>
<messages>
<message id="0">
<msg_txt>Lorem ipsum dolor sit amet, consectetur </msg_txt>
<msg_address>123456789</msg_address>
<msg_folder>1</msg_folder>
<msg_time>1297268424</msg_time>
<msg_imei>789456123</msg_imei>
<msg_status>36</msg_status>
<msg_uid>{1c58fef3-932e-4106-86ee-f9fe0a50363d}</msg_uid>
<msg_address_substr>3572583</msg_address_substr>
</message>
<message id="1">
<msg_txt>odio ultrices nunc semper iaculis. Fusce nibh</msg_txt>
<msg_address>987654321</msg_address>
<msg_folder>1</msg_folder>
<msg_time>1300101857</msg_time>
<msg_imei>358064010341612</msg_imei>
<msg_status>36</msg_status>
<msg_uid>{e93839af-a899-4de8-a14c-92e136d13ccd}</msg_uid>
<msg_address_substr>3572583</msg_address_substr>
</message>
...
</messages>
'''
import sqlite3 as sqlite3
import sys, os
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from xml.dom import minidom
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
found here: http://pymotw.com/2/xml/etree/ElementTree/create.html
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def ovi_n95_messages_backup_2_xml(db_file):
''' read the messages table from a OVI sqlite backup file and
write the content formated as XML to stdout
'''
try:
con = sqlite3.connect(db_file)
cur = con.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
all_tables = cur.fetchall()
if (u'messages',) not in all_tables:
raise Exception('missing table \'messages\' - not a valid ' \
'OVI backup database file?!?')
cur.execute("SELECT * FROM messages")
rows = cur.fetchall()
if len(rows) == 0:
raise Exception('no messages found in database.')
col_names = [col[0] for col in cur.description]
root = Element('messages')
i = 0
for entry in rows:
message = SubElement(root, 'message', id="%d" % (i))
for idx, col in enumerate(entry):
if col:
SubElement(message, col_names[idx]).text = "%s" % (col)
i += 1
print u''.join(prettify(root)).encode('utf-8', \
errors='xmlcharrefreplace').strip()
except sqlite3.Error, err:
sys.stderr.write('failed to open sqlite database cause: %s', err)
sys.exit(-1)
except Exception as err:
sys.stderr.write('%s\n' % (err))
sys.exit(-1)
if __name__ == '__main__':
try:
if os.path.isfile(sys.argv[1]) is False:
sys.stderr.write('given database file \'%s\' not found.\n' \
% (sys.argv[1]))
sys.exit(-1)
ovi_n95_messages_backup_2_xml(sys.argv[1])
except IndexError:
sys.stderr.write('missing parameter <db-file-name>.\n')
sys.exit(-1)
|
python
|
import torch.nn as nn
import math
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion),
)
self.shortcut = nn.Sequential()
self.relu = nn.ReLU()
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
x = self.residual_function(x) + self.shortcut(x)
x = self.relu(x)
return x
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
self.relu = nn.ReLU()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels*BottleNeck.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels*BottleNeck.expansion)
)
def forward(self, x):
x = self.residual_function(x) + self.shortcut(x)
x = self.relu(x)
return x
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=2, init_weights=True):
super().__init__()
self.in_channels=64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
if init_weights:
self._initialize_weights()
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self,x):
output = self.conv1(x)
output = self.conv2_x(output)
x = self.conv3_x(output)
x = self.conv4_x(x)
x = self.conv5_x(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def resnet18():
return ResNet(BasicBlock, [2,2,2,2])
def resnet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50():
return ResNet(BottleNeck, [3,4,6,3])
def resnet101():
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
return ResNet(BottleNeck, [3, 8, 36, 3])
|
python
|
import unittest
from parameterized import parameterized as p
from solns.karatsubaMultiply.karatsubaMultiply import *
class UnitTest_KaratsubaMultiply(unittest.TestCase):
@p.expand([
[3141592653589793238462643383279502884197169399375105820974944592,
2718281828459045235360287471352662497757247093699959574966967627,
8539734222673567065463550869546574495034888535765114961879601127067743044893204848617875072216249073013374895871952806582723184]
])
def test_naive(self,a,b,c):
self.assertEqual(Solution.naive(a,b),c)
|
python
|
média = 0
idademaisvelho = 0
nomemaisvelho = ''
totmulher20 = 0
for p in range(1, 5):
print('-' * 10, '{}ª PESSOA'.format(p), '-' * 10)
nome = str(input('Nome: ').strip())
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: ').strip())
média = (média*(p-1)+idade)/p
if p == 1 and sexo in 'Mm':
idademaisvelho = idade
nomemaisvelho = nome
if idade > idademaisvelho and sexo in 'Mm':
idademaisvelho = idade
nomemaisvelho = nome
if sexo in 'Ff' and idade < 20:
totmulher20 += 1
print('A média de idade do grupo é de {}'.format(média))
print('A idade do homem mais velho é {} e se chama {}.'.format(idademaisvelho, nomemaisvelho))
print('Ao todo são {} mulheres com menos de 20 anos.'.format(totmulher20))
|
python
|
import logging
import numpy as np
X1, Y1, X2, Y2 = range(4)
class ImageSegmentation(object):
"""
Data Structure to hold box segments for images. Box segments are defined by two points:
upper-left & bottom-right.
"""
def __init__(self, width, height):
self.height = height
self.width = width
self.segments = []
def add_segmentation(self, x1, y1, x2, y2):
"""
Adds a segmentation array to the ImageSegmentation object
:param x1, y1: upper-left point of the segment box
:param x2, y2: bottom-right point of the segment box
:return: null
"""
# if (any(value < 0 for value in [x1, y1, x2, y2]) | any(x > self.height for x in [x1, x2]) |
# any(y > self.width for y in [y1, y2]) | (x1 > x2) | (y1 > y2)):
# logging.error("Invalid image segment: %s of image sized %s", (x1, y1, x2, y2), (self.width, self.height))
# raise InvalidSegmentError()
# else:
if [x1, y1, x2, y2] not in self.segments:
self.segments.append([x1, y1, x2, y2])
def get_scaled_segments(self, width, height, r, maxr, border=0, offset=1, chunks=None):
"""
:param width: pixel width of image to be scaled to.
:param height: pixel height of image to be scaled to.
:param r: resolution level that is being scaled to.
:param maxr: total number of resolution levels.
:return: An array of segment boxes scaled to the dimensions width x height
https://stackoverflow.com/questions/42000601/resize-an-image-with-offset-in-python
"""
scalefactor = int(np.sqrt((width * height) / (self.width * self.height)))
#Because of data chunking, height and width of resolution[-1] is padded to be a multiple of 128.
#This means instead of being eg. 593 wide it is 640 wide, meaning scaling relative to this value is incorrect.
#It appears to be exponential scaling with each resolution level though so I will try this instead.
exp_scalefactor = [1,2,4,8,16,32,64,128,256,512,1024]
#scale_width = (width / self.width)
#scale_height = (height / self.height)
#if scale_width == 1 or scale_height == 1:
# scale_width = scalefactor
# scale_height = scalefactor
scale_width = exp_scalefactor[(maxr - 1 - r)]
scale_height = exp_scalefactor[(maxr - 1 - r)]
msg = 'Scalefactors: wx%d hx%d [%d x %d] with border=%d to initial [%d x %d] offset=%s' % (
scale_width, scale_height, width, height, border, self.width, self.height, offset)
print(msg)
logging.info(msg)
# make into 2d Array
matrix = np.array(self.segments)[:]
msg = 'Orig xy: {}'.format(matrix)
print(msg)
logging.debug(msg)
# Total segment boundary
total_seg_h = matrix[-1][X2] - matrix[0][X1]
total_seg_w = matrix[-1][Y2] - matrix[0][Y1]
msg = 'Segmented area: %d x %d [%0.4f %0.4f]' % (
total_seg_w, total_seg_h, (total_seg_h / self.height), (total_seg_w / self.width))
print(msg)
logging.info(msg)
if border:
border_factor = int((border / 100) * np.sqrt(self.width * self.height))
matrix[:, [X1, Y1]] = np.subtract(matrix[:, [X1, Y1]], border_factor)
matrix[:, [X2, Y2]] = np.add(matrix[:, [X2, Y2]], border_factor)
msg = 'Border xy:{}'.format(matrix)
print(msg)
logging.debug(msg)
# Apply scaling
#this was the original - but I think the width/height scaling was accidentally reversed
#matrix[:, [X1, X2]] = np.multiply(matrix[:, [X1, X2]], scale_width)
#matrix[:, [Y1, Y2]] = np.multiply(matrix[:, [Y1, Y2]], scale_height)
matrix[:, [X1, X2]] = np.multiply(matrix[:, [X1, X2]], scale_height)
matrix[:, [Y1, Y2]] = np.multiply(matrix[:, [Y1, Y2]], scale_width)
msg = 'Scaled xy:{}'.format(matrix)
print(msg)
logging.debug(msg)
# replace with boundaries
matrix[matrix < 0] = 0
# TODO Large images in IMS are chunked so need to adjust offset for large images only
# chunks = 128x128x16 (this may change with diff image acquisition)
# https://www.oreilly.com/library/view/python-and-hdf5/9781491944981/ch04.html
if chunks is not None and width/chunks[-1] > 50:
# allow user to add an offset between 0-2, 0 will not apply ANY correction
if offset >= 0:
for i in range(len(matrix)):
numchunks = matrix[i][Y1] / chunks[-1]
bytefactor = (numchunks + 1 ) * 8 # UINT8 bits per px
offsetfactor = int(bytefactor * offset)
msg ='Offset factor: [%d] %d' % (i, offsetfactor)
logging.info(msg)
print(msg)
matrix[i][Y1] = np.add(matrix[i][Y1], offsetfactor)
matrix[i][Y2] = np.add(matrix[i][Y2], offsetfactor)
matrix[i][X1] = np.add(matrix[i][X1], offsetfactor)
matrix[i][X2] = np.add(matrix[i][X2], offsetfactor)
msg = 'Offset xy: {}'.format(matrix)
print(msg)
logging.info(msg)
# check width not greater than max width
mw = matrix[:, [X2]] - matrix[:, [X1]]
mw[mw > height] = height
matrix[:, [X1]] = matrix[:, [X2]] - mw
# max height
mw = matrix[:, [Y2]] - matrix[:, [Y1]]
mw[mw > width] = width
matrix[:, [Y1]] = matrix[:, [Y2]] - mw
# Total segment boundary after
total_seg_w = matrix[-1][X2] - matrix[0][X1]
total_seg_h = matrix[-1][Y2] - matrix[0][Y1]
msg = 'Segmented area: %d x %d [%0.4f %0.4f]' % (total_seg_w, total_seg_h, (total_seg_w / height), (total_seg_h / width))
print(msg)
logging.info(msg)
return matrix.astype(int)
def get_relative_segments(self):
"""
:return: An array of segment boxes without scaling. x1<= x, y <=1.
"""
return self.get_scaled_segments(1, 1)
def get_max_segment(self):
"""
:return: Returns the segment [x1, y1, x2, y2] with the largest area
"""
max_segment = self.segments[0]
max_area = self.segment_area(max_segment)
for segment in self.segments:
segment_area = self.segment_area(segment)
if segment_area > max_area:
max_segment = segment
max_area = segment_area
return max_segment
def segment_area(self, segment):
"""
:param segment: Bounding box of form [x1, y1, x2, y2]
:return: area of bounding box
"""
return (segment[X2] - segment[X1]) * (segment[Y2] - segment[Y1])
def change_segment_bounds(self, factor):
"""
TODO: Fix for border - currently crashing with 'argument out of range' error
:param factor: percent of pixels to include as border eg 5
:return: an ImageSegmentation object with bounding boxes increased/decreased by the given factor or nothing
"""
if factor > 0 and factor < 100:
border = int((factor / 100) * np.sqrt(self.width * self.height))
msg = "Applying border of {} px".format(border)
logging.info(msg)
print(msg)
new_image_segmentation = ImageSegmentation(self.width, self.height)
for bounding_box in self.segments:
pos_x1 = max(0, round(bounding_box[0] - border))
pos_y1 = max(0, round(bounding_box[1] - border))
pos_x2 = min(self.width, round(bounding_box[2] + border))
pos_y2 = min(self.height, round(bounding_box[3] + border))
new_image_segmentation.add_segmentation(pos_x1, pos_y1, pos_x2, pos_y2)
# new_image_segmentation.add_segmentation(bounding_box[0],bounding_box[1],bounding_box[2],bounding_box[3])
return new_image_segmentation
else:
return self
class InvalidSegmentError(Exception):
pass
|
python
|
# -*- coding: utf-8 -*-
from app import app, db, bbcode_parser, redis_store, mail #, q
from flask import request, render_template, redirect, url_for, send_from_directory, abort, flash, g, jsonify
import datetime
import os
from PIL import Image
import simplejson
import traceback
from werkzeug.utils import secure_filename
import imghdr
from models import User, Picture, Post, Meta, Forum, ForumTopic, ForumPost, Breadcrumb, Comment
from services import ForumPostService, ForumTopicService, CommentService, PostService
from forms import BlogPostForm, UserForm, SettingsForm, ForumTopicForm, ForumPostForm, CommentForm
from flask.ext.security import login_required, roles_required, current_user
from flask.ext.security.utils import verify_password, encrypt_password
from unidecode import unidecode
from sqlalchemy.orm import joinedload, load_only, undefer_group
import random
#from flask.ext.mail import Message
#from tasks import xprocess
@app.route('/')
#@login_required
def home():
#app.logger.debug('Process call.')
#job = q.enqueue_call(func=xprocess, args=('FFFF',), result_ttl=5000)
#app.logger.debug(job.get_id())
#j = process.delay(3)
#app.logger.debug(j)
# msg = Message("Hello from SFDEV.net", recipients=["[email protected]"])
# msg.body = "testing from Home"
# msg.html = "<b>testing from Home</b>"
# mail.send(msg)
# p = redis_store.get('potato')
# if p is None:
# redis_store.set('potato', 'XXXXX')
# app.logger.debug(p)
last_posts = Post.query.filter(Post.published_at != None).order_by(Post.published_at.desc()).limit(5)
for post in last_posts:
if post.thumbnail:
post.thumbnail = post.thumbnail.replace('/pictures/', '/pictures/thumbs/thumb256_')
else:
post.thumbnail = '/static/no-image.jpg'
return render_template('home.html', last_posts=last_posts)
# ------------- PICTURES ------------------
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
IGNORED_FILES = set(['.gitignore'])
@app.route('/uploads', methods=['GET', 'POST'])
@roles_required('editor')
def pictures():
if request.method == 'POST':
file = request.files['file']
if file:
filename = secure_filename(unidecode(file.filename))
filename = str(current_user.id+1024)+'_'+filename
filename = gen_file_name(filename)
mimetype = file.content_type
file_ext = imghdr.what(file)
app.logger.debug(file_ext)
if not file_ext in ALLOWED_EXTENSIONS:
result = {"error": "Filetype not allowed",
"name": filename,
"type": mimetype,
"size": 0,}
else:
# save file to disk
uploaded_file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(uploaded_file_path)
p = Picture(filename=filename, user=current_user)
db.session.add(p)
# create thumbnail after saving
if mimetype.startswith('image'):
create_thumbnail(filename, 256)
# get file size after saving
size = os.path.getsize(uploaded_file_path)
# return json for js call back
result = {"name": p.filename,
"url": p.picture_path(),
"thumbnailUrl": p.thumb_path(256),
}
db.session.commit()
return simplejson.dumps({"files": [result]})
if request.method == 'GET':
pics = Picture.query.filter_by(user_id=current_user.id).all()
inline = request.args.get('inline', 'false')
type = request.args.get('type', '')
if inline == 'true':
pic_layout='layout_empty.html'
else:
pic_layout='layout.html'
return render_template('pictures.html', pics=pics, pic_layout=pic_layout, type=type)
return redirect(url_for('home'))
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def gen_file_name(filename):
i = 1
while os.path.exists(os.path.join(app.config['UPLOAD_FOLDER'], filename)):
name, extension = os.path.splitext(filename)
filename = '%s_%s%s' % (name, str(i), extension)
i = i + 1
return filename
def create_thumbnail(image, basewidth):
try:
size = (basewidth, basewidth)
img = Image.open(os.path.join(app.config['UPLOAD_FOLDER'], image))
# wpercent = (basewidth/float(img.size[0]))
# hsize = int((float(img.size[1])*float(wpercent)))
# img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
width, height = img.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
img = img.crop((left, upper, right, lower))
img.thumbnail(size, Image.ANTIALIAS)
img.save(os.path.join(app.config['THUMBNAIL_FOLDER'], 'thumb'+str(basewidth)+'_'+image), quality=90, dpi=(72,72))
return True
except:
print traceback.format_exc()
return False
# -------------- GET PICTURE for DEV SERVER ----------
if app.config['DEBUG'] == True:
@app.route("/pictures/<string:filename>", methods=['GET'])
def get_picture(filename):
return send_from_directory(os.path.join(app.config['UPLOAD_FOLDER']), filename=secure_filename(filename))
@app.route("/pictures/thumbs/<string:filename>", methods=['GET'])
def get_thumbnail(filename):
return send_from_directory(os.path.join(app.config['THUMBNAIL_FOLDER']), filename=secure_filename(filename))
@app.route("/pictures/avatars/<string:filename>", methods=['GET'])
def get_avatars(filename):
return send_from_directory(os.path.join(app.config['AVATAR_FOLDER']), filename=secure_filename(filename))
# ------------- EDITOR -------------------------------
@app.route("/blog")
@app.route("/blog/page/<int:page>")
def blog_list(page=1):
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
posts = Post.query.options(joinedload('user')).\
filter(Post.published_at != None).\
order_by(Post.published_at.desc()).\
paginate(page, 10, True)
total_drafts = None
if current_user.has_role('editor'):
total_drafts = Post.query.options(load_only('id')).filter(Post.user_id == current_user.id, Post.published_at == None).count()
meta = Meta(title='Blog | Salesforce-Developer.net',
description='Blog for Salesforce Developers with main technical information and examples of apex code.',
keywords='salesforce blog, apex blog, visualforce blog'
)
return render_template('blog_list.html', posts=posts, meta=meta, total_drafts=total_drafts)
@app.route("/blog/author/<string:user_slug>")
@app.route("/blog/author/<string:user_slug>/page/<int:page>")
def blog_by_author(user_slug, page=1):
user = User.query.filter_by(slug=user_slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
g.breadcrumbs.append(Breadcrumb('', user.fullname()))
posts = Post.query.options(joinedload('user')).\
filter(Post.user_id == user.id, Post.published_at != None).\
order_by(Post.published_at.desc()).\
paginate(page, 10, True)
total_drafts = None
if current_user.has_role('editor'):
total_drafts = Post.query.options(load_only('id')).filter(Post.user_id == current_user.id, Post.published_at == None).count()
meta = Meta(title='Articles by '+user.fullname()+' | Salesforce-Developer.net',
description='All articler by '+user.fullname()+' published on Salesforce-Developer.net',
keywords='salesforce articles, '+user.fullname()
)
return render_template('blog_list.html', posts=posts, meta=meta, author=user, total_drafts=total_drafts)
@app.route("/blog/drafts/")
@app.route("/blog/drafts/page/<int:page>")
@roles_required('editor')
def blog_my_drafts(page=1):
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
g.breadcrumbs.append(Breadcrumb('', 'My Drafts'))
posts = Post.query.options(joinedload('user')).\
filter(Post.user_id == current_user.id, Post.published_at == None).\
order_by(Post.created_at.desc()).\
paginate(page, 10, True)
total_drafts = None
if current_user.has_role('editor'):
total_drafts = Post.query.options(load_only('id')).filter(Post.user_id == current_user.id, Post.published_at == None).count()
meta = Meta(title='My Drafts | Salesforce-Developer.net',
description='My drafts on Salesforce-Developer.net',
keywords='my drafts'
)
return render_template('blog_list.html', posts=posts, meta=meta, my_drafts=True, total_drafts=total_drafts)
@app.route("/blog/new", methods=['GET', 'POST'])
@roles_required('editor')
def blog_new_post():
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
g.breadcrumbs.append(Breadcrumb('', 'New Article'))
if request.method == 'POST':
form = BlogPostForm(request.form)
if form.validate():
post = Post()
post.title = form.title.data
post.body = form.body.data
post.meta_keywords = form.meta_keywords.data
post.meta_description = form.meta_description.data
post.thumbnail = form.thumbnail.data
post.user_id = current_user.id
PostService.insert(post)
flash('Post created successfully. You can continue editing or return to Article List.', 'success')
return redirect(url_for('blog_edit_post', slug=post.slug))
else:
flash('There are errors on the form. Please fix them before continuing.', 'error')
else:
form = BlogPostForm()
return render_template('blog_edit.html', form=form, post=None)
@app.route("/blog/bb/preview", methods=['POST'])
@roles_required('editor')
def blog_preview_post():
body = request.form['data']
return render_template('blog_bb_preview.html', body=bbcode_parser.format(body))
@app.route("/blog/<string:slug>/edit/", methods=['GET', 'POST'])
@roles_required('editor')
def blog_edit_post(slug):
post = Post.query.filter_by(slug=slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
g.breadcrumbs.append(Breadcrumb('', post.title))
if post.user_id != current_user.id:
abort(403)
if request.method == 'POST':
form = BlogPostForm(request.form)
if form.validate():
post.title = form.title.data
post.body = form.body.data
post.meta_keywords = form.meta_keywords.data
post.meta_description = form.meta_description.data
post.thumbnail = form.thumbnail.data
if form.published.data == True and post.published_at == None:
post.published_at = datetime.datetime.now()
if form.published.data == False and post.published_at != None:
post.published_at = None
PostService.update(post)
if request.form['submit'] == 'Save & Exit':
flash('Post updated successfully.', 'success')
return redirect(url_for('top_slug', slug=post.slug))
flash('Post updated successfully. You can continue editing or return to Article List.', 'success')
return redirect(url_for('blog_edit_post', slug=post.slug))
else:
flash('There are errors on the form. Please fix them before continuing.', 'error')
else:
form = BlogPostForm(obj=post)
if post.published_at != None:
form.published.data = True
return render_template('blog_edit.html', form=form, post=post)
# ------------ COMMENTS ----------------
@app.route('/blog/action/new-comment/<string:slug>', methods=['POST'])
@login_required
def blog_new_comment(slug):
post = Post.query.filter_by(slug=slug).first_or_404()
form = CommentForm(obj=request.json)
if form.validate():
comment = Comment()
comment.body = form.body.data
comment.post_id = post.id
comment.user_id = current_user.id
CommentService.insert(comment)
return jsonify(status='ok', body=post.body, body_html=post.body_html, comment_id=comment.id)
return jsonify(status='error', errors=form.errors), 400
@app.route('/blog/action/save-comment/<int:comment_id>', methods=['POST'])
@login_required
def blog_save_comment(comment_id):
comment = Comment.query.get_or_404(comment_id)
if comment.user_id != current_user.id:
abort(403)
form = CommentForm(obj=request.json)
if form.validate():
comment.body = form.body.data
CommentService.update(comment)
return jsonify(status='ok', body=comment.body, body_html=comment.body_html)
return jsonify(status='error', errors=form.errors), 400
# ------------ USER --------------------
@app.route("/user/<string:slug>")
def user_view(slug):
user = User.query.options(undefer_group('full')).filter_by(slug=slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/community', 'Community'))
g.breadcrumbs.append(Breadcrumb('', user.fullname()))
return render_template('user_view.html', user=user)
@app.route("/user/<string:slug>/edit", methods=['GET', 'POST'])
@login_required
def user_edit(slug):
user = User.query.filter_by(slug=slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/community', 'Community'))
g.breadcrumbs.append(Breadcrumb('/user/'+slug, user.fullname()))
g.breadcrumbs.append(Breadcrumb('', 'Edit My Profile'))
if user.id != current_user.id:
abort(403)
if request.method == 'POST':
form = UserForm(request.form)
if form.validate():
user.type = form.type.data
user.geo_lat = form.geo_lat.data
user.geo_lng = form.geo_lng.data
user.geo_address = form.geo_address.data
if user.type == 'developer':
if user.first_name != form.first_name.data or user.last_name != form.last_name.data:
app.logger.debug('reset slug')
user.slug = None
user.first_name = form.first_name.data
user.last_name = form.last_name.data
user.birthdate = form.birthdate.data
if form.sfdc_start.data == '':
form.sfdc_start.data = None
user.sfdc_start = form.sfdc_start.data
user.sfdc_skills = form.sfdc_skills.data
user.sfdc_certificates = form.sfdc_certificates.data
user.other_skills = form.other_skills.data
user.google_plus = form.google_plus.data
user.linkedin = form.linkedin.data
user.facebook = form.facebook.data
user.personal_site = form.personal_site.data
elif user.type == 'company':
if user.company_name != form.company_name.data:
user.slug = None
user.company_name = form.company_name.data
user.company_info = form.company_info.data
user.google_plus = form.google_plus.data
user.linkedin = form.linkedin.data
user.facebook = form.facebook.data
user.personal_site = form.personal_site.data
elif user.type == 'other':
user.first_name = form.first_name.data
user.last_name = form.last_name.data
user.birthdate = form.birthdate.data
user.about_myself = form.about_myself.data
db.session.commit()
flash('Profile updated successfully', 'success')
return redirect(url_for('user_view', slug=user.slug))
else:
flash('There are errors on the form. Please fix them before continuing.', 'error')
else:
form = UserForm(obj=user)
return render_template('user_edit.html', form=form, user=user)
# ------------ ACCOUNT SETTINGS ---------
@app.route('/account/settings', methods=['GET', 'POST'])
@login_required
def account_settings():
user = current_user
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/community', 'Community'))
g.breadcrumbs.append(Breadcrumb('/user/'+user.slug, user.fullname()))
g.breadcrumbs.append(Breadcrumb('', 'Account Settings'))
if request.method == 'POST':
form = SettingsForm(request.form)
r = request
if form.validate():
user.gravatar = True if form.avatar_type.data == 'gravatar' else False
if form.new_password.data:
if form.new_password.data != form.old_password.data:
if verify_password(form.old_password.data, user.password):
user.password = encrypt_password(form.new_password.data)
else:
form.old_password.errors.append('Wrong old password')
else:
form.new_password.errors.append('Old and New passwords are the same')
file = request.files['avatar_file']
file_error = False
if file:
filename = 'avatar_'+str(current_user.id+1024)
file_ext = imghdr.what(file)
if file_ext in ALLOWED_EXTENSIONS:
full_filename = filename+'.'+file_ext
size = (200, 200)
img = Image.open(file)
width, height = img.size
if width > height:
delta = width - height
left = int(delta/2)
upper = 0
right = height + left
lower = height
else:
delta = height - width
left = 0
upper = int(delta/2)
right = width
lower = width + upper
img = img.crop((left, upper, right, lower))
img.thumbnail(size, Image.ANTIALIAS)
img.save(os.path.join(app.config['AVATAR_FOLDER'], full_filename), quality=90, dpi=(72,72))
user.avatar_link = full_filename
else:
form.avatar_file.errors.append('File type is not allowed.')
flash('There are errors on the form. Please fix them before continuing.', 'error')
file_error = True
if file_error == False:
db.session.commit()
flash('Settings updated successfully.', 'success')
else:
flash('There are errors on the form. Please fix them before continuing.', 'error')
else:
form = SettingsForm()
form.avatar_type.data = 'gravatar' if user.gravatar == True else 'avatar'
form.username.data = user.username
form.email.data = user.email
form.timezone.data = user.timezone
return render_template('settings.html', form=form, user=user, rnd=int(random.random()*1000))
# ------------ FORUM --------------------
@app.route('/forum')
def forum_list():
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/forum', 'Forums'))
forums = Forum.query.order_by('sequence').all()
meta = Meta(title='Forum | Salesforce-Developer.net',
description='Forum for Salesforce Developers. Join us and start to discuss dev topics.',
keywords='salesforce forum, sfdc forum, about salesforce, salesforce development, salesforce integration'
)
return render_template('forum_list.html', forums=forums, meta=meta)
@app.route('/forum/<string:slug>')
@app.route("/forum/<string:slug>/page/<int:page>")
def forum_view(slug, page=1):
forum = Forum.query.filter_by(slug=slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/forum', 'Forums'))
g.breadcrumbs.append(Breadcrumb('/forum/'+forum.slug, forum.title))
topics = ForumTopic.query.filter_by(forum_id=forum.id).\
order_by(ForumTopic.created_at.desc()).\
paginate(page, 20, True)
meta = Meta(title=forum.title + ' | Salesforce-Developer.net',
description=forum.description,
keywords=forum.title
)
return render_template('forum_view.html', forum=forum, topics=topics, meta=meta)
@app.route('/forum/action/new-topic/<string:forum_slug>', methods=['GET'])
@login_required
def forum_new_topic(forum_slug):
forum = Forum.query.filter_by(slug=forum_slug).first_or_404()
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/forum', 'Forums'))
g.breadcrumbs.append(Breadcrumb('/forum/'+forum.slug, forum.title))
g.breadcrumbs.append(Breadcrumb('', 'New Topic'))
return render_template('topic_view.html', forum=forum, new_topic=True)
@app.route('/forum/action/new-topic/<string:slug>', methods=['POST'])
@login_required
def forum_create_topic(slug):
forum = Forum.query.filter_by(slug=slug).first_or_404()
form = ForumTopicForm(obj=request.json)
if form.validate():
topic = ForumTopic()
topic.title = form.title.data
topic.body = form.body.data
topic.forum_id = forum.id
topic.user_id = current_user.id
ForumTopicService.insert(topic)
return jsonify(status='ok', title=topic.title, body=topic.body, body_html=topic.body_html, slug=topic.slug)
return jsonify(status='error', errors=form.errors), 400
@app.route('/forum/action/save-topic/<string:slug>', methods=['POST'])
@login_required
def forum_save_topic(slug):
topic = ForumTopic.query.filter_by(slug=slug).first_or_404()
if topic.user_id != current_user.id:
abort(403)
form = ForumTopicForm(obj=request.json)
if form.validate():
topic.title = form.title.data
topic.body = form.body.data
ForumTopicService.update(topic)
return jsonify(status='ok', title=topic.title, body=topic.body, body_html=topic.body_html, slug=topic.slug)
return jsonify(status='error', errors=form.errors), 400
@app.route('/forum/action/new-post/<string:slug>', methods=['POST'])
@login_required
def forum_new_post(slug):
topic = ForumTopic.query.filter_by(slug=slug).first_or_404()
form = ForumPostForm(obj=request.json)
if form.validate():
post = ForumPost()
post.body = form.body.data
post.forum_id = topic.forum_id
post.topic_id = topic.id
post.user_id = current_user.id
ForumPostService.insert(post)
return jsonify(status='ok', body=post.body, body_html=post.body_html, post_id=post.id)
return jsonify(status='error', errors=form.errors), 400
@app.route('/forum/action/save-post/<int:post_id>', methods=['POST'])
@login_required
def forum_save_post(post_id):
post = ForumPost.query.get_or_404(post_id)
if post.user_id != current_user.id:
abort(403)
form = ForumPostForm(obj=request.json)
if form.validate():
post.body = form.body.data
ForumPostService.update(post)
return jsonify(status='ok', body=post.body, body_html=post.body_html)
return jsonify(status='error', errors=form.errors), 400
# ------------ COMMUNITY ----------------
@app.route('/community')
def community_map():
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/community', 'Community'))
g.breadcrumbs.append(Breadcrumb('', 'Map'))
users = User.query.filter(User.type != None, User.active == True, User.id != 1, User.geo_lat != None, User.geo_lng != None).order_by(User.login_count.desc())
return render_template('community_map.html', users=users)
@app.route('/community/list')
def community_list():
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/community', 'Community'))
g.breadcrumbs.append(Breadcrumb('', 'List'))
users = User.query.filter(User.type != None, User.active == True, User.id != 1).order_by(User.login_count.desc())
return render_template('community_list.html', users=users)
# ------------ GET by SLUG --------------
@app.route('/<string:slug>')
def top_slug(slug):
post = Post.query.filter(Post.slug==slug).first()
if post:
if post.published_at is None and post.user_id != current_user.id:
abort(404)
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/blog', 'Blog'))
g.breadcrumbs.append(Breadcrumb('', post.title))
if post.published_at is None:
flash('This post is not published. Only you can see it.', 'warning')
comments = Comment.query.filter(Comment.post_id==post.id).order_by(Comment.created_at)
meta = Meta(title=post.title+' | Salesforce-Developer.net',
description=post.meta_description,
keywords=post.meta_keywords
)
return render_template('blog_view.html', post=post, meta=meta, comments=comments)
topic = ForumTopic.query.filter(ForumTopic.slug==slug).first()
if topic:
#ForumTopicService.update(topic)
g.breadcrumbs = []
g.breadcrumbs.append(Breadcrumb('/', 'Salesforce-developer.net'))
g.breadcrumbs.append(Breadcrumb('/forum', 'Forums'))
g.breadcrumbs.append(Breadcrumb('/forum/'+topic.forum.slug, topic.forum.title))
g.breadcrumbs.append(Breadcrumb('', topic.title))
posts = ForumPost.query.filter_by(topic_id=topic.id).order_by(ForumPost.created_at)
#for post in posts:
# ForumPostService.update(post)
meta = Meta(title=topic.title+' | Salesforce-Developer.net',
description=topic.title,
keywords=topic.forum.title)
return render_template('topic_view.html', topic=topic, posts=posts, meta=meta)
abort(404)
# ------------ ERROR HANDLERS ----------
@app.errorhandler(404)
def page_not_found(e):
flash('Requested resource not found.', 'error')
if request.mimetype == 'application/json':
return 'Requested resource not found.', 404
return render_template('error.html'), 404
@app.errorhandler(403)
def no_permissions(e):
flash('You don\'t have the permission to access the requested resource', 'error')
if request.mimetype == 'application/json':
return 'You don\'t have the permission to access the requested resource', 403
return render_template('error.html'), 403
|
python
|
# Generated by Django 2.2.4 on 2019-09-10 13:37
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('export_readiness', '0055_auto_20190910_1242'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='how_dit_helps_columns',
field=wagtail.core.fields.StreamField([('columns', wagtail.core.blocks.StructBlock([('heading', wagtail.core.blocks.CharBlock()), ('content', wagtail.core.blocks.RichTextBlock()), ('icon', wagtail.images.blocks.ImageChooserBlock(required=False)), ('image_alt', wagtail.core.blocks.CharBlock(required=False))]))], blank=True, null=True),
),
]
|
python
|
import opentrons.execute
import requests
import json
import import_ipynb
import csv
from labguru import Labguru
experiment_id = 512
lab = Labguru(login='[email protected]', password='123password')
# get Labguru elements
experiment = lab.get_experiment(experiment_id)
plate_element = lab.get_elements_by_type(experiment_id, 'plate')[0].get_data()
sample_element = lab.get_elements_by_type(experiment_id, 'samples')[0]
forms_elements = lab.get_elements_by_type(experiment_id, 'form')
steps_element = lab.get_elements_by_type(experiment_id, 'steps')[0]
attachments_element = lab.get_elements_by_type(experiment_id, 'attachments')[0]
# get data from elements
samples = sample_element.get_data()
data = forms_elements[1].get_data()
# opentrons protocol
protocol = opentrons.execute.get_protocol_api('2.8')
protocol.home()
plate1 = protocol.load_labware(data['source_plate'], 4)
plate2 = protocol.load_labware(data['destination_plate'], 7)
tiprack_1 = protocol.load_labware('opentrons_96_tiprack_300ul', 10)
p50_multi = protocol.load_instrument('p50_multi', 'right', tip_racks=[tiprack_1])
# iterating samples and stocks from Labguru
for sample_index, sample in enumerate(samples):
p50_multi.pick_up_tip()
p50_multi.aspirate(data['transfer_volume'], plate1.rows()[0][sample_index])
for stock_index, stock in enumerate(sample['stocks']):
p50_multi.dispense(10, plate2.rows()[0][stock_index])
# update stocks volume in Labguru
sample_element.update_stock_amount(sample['id'], stock['id'], data['transfer_volume'], 'volume', data['unit'])
# add step to Labguru with some tracking information
steps_element.add_step(f'transfer {data["transfer_volume"]} {data["unit"]}, of {stock["name"]} id {stock["id"]}')
p50_multi.drop_tip()
# create csv file from Labguru plate
file_name = 'Cell culture - Assay Preparation.csv'
with open(file_name, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['id', 'coordinates', 'samples', 'stock', 'concentration'])
for cell in plate_element:
writer.writerow([cell['id'], cell['coordinates'], cell['samples'][0],
cell['samples_metadata'][str(cell['samples'][0])]['stocks'],
cell['samples_metadata'][str(cell['samples'][0])]['concentration']])
with open(file_name, 'rb') as file:
attachment = file.read()
# upload file to Labguru
attachment_reponse = requests.post(f'{lab.session.url}/api/v1/attachments',
data={
'item[title]': file_name,
'item[attachable_type]': 'Knowledgebase::AbstractDocument',
'item[attach_to_uuid]': experiment.uuid,
'token': lab.session.token,
},
files={'item[attachment]': (file_name, attachment)},
)
# add attachment to attachments_element
attachments_element.add_attachment(attachment_reponse.json()['id'])
protocol.home()
|
python
|
import collections
import os
from strictdoc.helpers.sorting import alphanumeric_sort
class FileOrFolderEntry:
def get_full_path(self):
raise NotImplementedError
def get_level(self):
raise NotImplementedError
def is_folder(self):
raise NotImplementedError
def mount_folder(self):
raise NotImplementedError
class File(FileOrFolderEntry):
def __init__(self, level, full_path):
assert os.path.isfile(full_path)
assert os.path.isabs(full_path)
self.level = level
self.full_path = full_path
self.root_path = full_path
self.files = [self]
self.subfolder_trees = []
def __repr__(self):
return "File: {}".format(self.full_path)
def is_folder(self):
return False
def get_full_path(self):
return self.full_path
def get_level(self):
return self.level
def get_file_name(self):
return os.path.basename(self.full_path)
def get_folder_path(self):
return os.path.dirname(self.full_path)
def mount_folder(self):
return os.path.basename(os.path.dirname(self.root_path))
class Folder(FileOrFolderEntry):
def __init__(self, root_path, level):
assert os.path.isdir(root_path)
assert os.path.isabs(root_path)
self.root_path = root_path
self.level = level
self.files = []
self.subfolder_trees = []
self.parent_folder = None
self.has_sdoc_content = False
def __repr__(self):
return "FileTree: (root_path: {}, files: {})".format(
self.root_path, self.files
)
def is_folder(self):
return True
def get_full_path(self):
return self.root_path
def get_level(self):
return self.level
def get_folder_name(self):
return os.path.basename(os.path.normpath(self.root_path))
def mount_folder(self):
return os.path.basename(self.root_path)
def set(self, files):
for file in files:
full_file_path = os.path.join(self.root_path, file)
self.files.append(File(self.level + 1, full_file_path))
def add_subfolder_tree(self, subfolder_tree):
assert isinstance(subfolder_tree, Folder)
self.subfolder_trees.append(subfolder_tree)
def set_parent_folder(self, parent_folder):
assert isinstance(parent_folder, Folder)
self.parent_folder = parent_folder
def dump(self):
print(self)
for subfolder in self.subfolder_trees:
subfolder.dump()
class FileTree:
def __init__(self, root_folder_or_file):
self.root_folder_or_file = root_folder_or_file
@staticmethod
def create_single_file_tree(root_path):
single_file = File(0, root_path)
return FileTree(single_file)
def iterate(self):
file_tree_mount_folder = self.root_folder_or_file.mount_folder()
task_list = [self.root_folder_or_file]
while len(task_list) > 0:
current_tree = task_list.pop(0)
for doc_file in current_tree.files:
yield self.root_folder_or_file, doc_file, file_tree_mount_folder
task_list.extend(current_tree.subfolder_trees)
def iterate_directories(self):
task_list = collections.deque([self.root_folder_or_file])
while task_list:
file_tree_or_file = task_list.popleft()
if isinstance(file_tree_or_file, File):
yield file_tree_or_file
elif isinstance(file_tree_or_file, Folder):
if not file_tree_or_file.has_sdoc_content:
continue
yield file_tree_or_file
task_list.extendleft(reversed(file_tree_or_file.files))
task_list.extendleft(
reversed(file_tree_or_file.subfolder_trees)
)
class FileFinder:
@staticmethod
def find_files_with_extensions(root_path, extensions):
assert os.path.isdir(root_path)
assert os.path.isabs(root_path)
assert isinstance(extensions, set)
root_level = root_path.count(os.sep)
root_folder = Folder(root_path, 0)
folder_map = {root_path: root_folder}
for current_root_path, dirs, files in os.walk(root_path, topdown=True):
dirs[:] = [
d
for d in dirs
if (
not d.startswith(".")
and not d.startswith("_")
and "tests" not in d
)
]
dirs.sort(key=alphanumeric_sort)
current_root_path_level = (
current_root_path.count(os.sep) - root_level
)
current_tree = folder_map.setdefault(
current_root_path,
Folder(current_root_path, current_root_path_level),
)
def filter_source_files(_files):
_source_files = []
for file in _files:
_, file_extension = os.path.splitext(file)
if file_extension in extensions:
_source_files.append(file)
return _source_files
files = filter_source_files(files)
files.sort(key=alphanumeric_sort)
current_tree.set(files)
if len(files) > 0:
current_tree.has_sdoc_content = True
if current_root_path == root_path:
continue
current_parent_path = os.path.dirname(current_root_path)
# top-down search assumes we have seen the parent before.
assert current_parent_path in folder_map
current_parent_folder = folder_map[current_parent_path]
current_tree.set_parent_folder(current_parent_folder)
if current_tree.has_sdoc_content:
parent_folder_cursor = current_parent_folder
while (
parent_folder_cursor
and not parent_folder_cursor.has_sdoc_content
):
parent_folder_cursor.has_sdoc_content = True
parent_folder_cursor = parent_folder_cursor.parent_folder
current_parent_folder.add_subfolder_tree(current_tree)
file_tree_structure = FileTree(folder_map[root_path])
return file_tree_structure
class PathFinder:
@staticmethod
def find_directories(root_path, directory):
assert os.path.isdir(root_path)
assert os.path.isabs(root_path)
directories = []
for current_root_path, dirs, _ in os.walk(root_path, topdown=True):
dirs[:] = [
d
for d in dirs
if not d.startswith(".") and d != "output" and d != "tests"
]
if os.path.basename(current_root_path) == directory:
directories.append(current_root_path)
return directories
|
python
|
#!/usr/bin/env python2
import sys
sys.path.insert(0, '..')
from cde_test_common import *
def checker_func():
assert os.path.isfile('cde-package/cde-root/home/pgbovine/CDE/tests/readlink_abspath_test/libc.so.6')
generic_test_runner(["python", "readlink_abspath_test.py"], checker_func)
|
python
|
import numpy as np
import pysam
import subprocess
import argparse
import utils
import os, pdb
MIN_MAP_QUAL = 10
def parse_args():
parser = argparse.ArgumentParser(description=" convert bam data format to bigWig data format, "
" for ribosome profiling and RNA-seq data ")
parser.add_argument("--dtype",
choices=("rnaseq","riboseq"),
default="riboseq",
help="specifies the type of assay (default: riboseq)")
parser.add_argument("bam_file",
action="store",
help="path to bam input file")
options = parser.parse_args()
options.bgzip = which("bgzip")
options.tabix = which("tabix")
return options
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def convert_rnaseq(options):
# file names and handles
count_file = os.path.splitext(options.bam_file)[0]
sam_handle = pysam.Samfile(options.bam_file, "rb")
count_handle = open(count_file, 'w')
for cname,clen in zip(sam_handle.references,sam_handle.lengths):
# fetch reads in chromosome
sam_iter = sam_handle.fetch(reference=cname)
# initialize count array
counts = dict()
for read in sam_iter:
# skip read if unmapped
if read.is_unmapped:
continue
# skip read, if mapping quality is low
if read.mapq < MIN_MAP_QUAL:
continue
if read.is_reverse:
site = read.pos + read.alen - 1
else:
site = read.pos
try:
counts[site] += 1
except KeyError:
counts[site] = 1
# write counts to output file
indices = np.sort(counts.keys())
for i in indices:
count_handle.write('\t'.join([cname,'%d'%i,'%d'%(i+1),'%d'%counts[i]])+'\n')
print "completed %s"%cname
sam_handle.close()
count_handle.close()
# compress count file
pipe = subprocess.Popen("%s -f %s"%(options.bgzip, count_file), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
# index count file
pipe = subprocess.Popen("%s -f -b 2 -e 3 -0 %s.gz"%(options.tabix, count_file), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
print "Compressed file with RNA-seq counts is %s"%(count_file+'.gz')
def convert_riboseq(options):
# file names and handles
fwd_count_file = os.path.splitext(options.bam_file)[0]+'_fwd'
rev_count_file = os.path.splitext(options.bam_file)[0]+'_rev'
sam_handle = pysam.Samfile(options.bam_file, "rb")
fwd_handle = dict([(r,open(fwd_count_file+'.%d'%r, 'w')) for r in utils.READ_LENGTHS])
rev_handle = dict([(r,open(rev_count_file+'.%d'%r, 'w')) for r in utils.READ_LENGTHS])
for cname,clen in zip(sam_handle.references,sam_handle.lengths):
# fetch reads in chromosome
sam_iter = sam_handle.fetch(reference=cname)
# initialize count arrays
fwd_counts = dict([(r,dict()) for r in utils.READ_LENGTHS])
rev_counts = dict([(r,dict()) for r in utils.READ_LENGTHS])
for read in sam_iter:
# skip reads not of the appropriate length
if read.rlen not in utils.READ_LENGTHS:
continue
# skip read if unmapped
if read.is_unmapped:
continue
# skip read, if mapping quality is low
if read.mapq < MIN_MAP_QUAL:
continue
if read.is_reverse:
asite = int(read.positions[-13])
try:
rev_counts[read.rlen][asite] += 1
except KeyError:
rev_counts[read.rlen][asite] = 1
else:
asite = int(read.positions[12])
try:
fwd_counts[read.rlen][asite] += 1
except KeyError:
fwd_counts[read.rlen][asite] = 1
# write counts to output files
for r in utils.READ_LENGTHS:
indices = np.sort(fwd_counts[r].keys())
for i in indices:
fwd_handle[r].write('\t'.join([cname, '%d'%i, '%d'%(i+1), '%d'%fwd_counts[r][i]])+'\n')
indices = np.sort(rev_counts[r].keys())
for i in indices:
rev_handle[r].write('\t'.join([cname, '%d'%i, '%d'%(i+1), '%d'%rev_counts[r][i]])+'\n')
print "completed %s"%cname
sam_handle.close()
for r in utils.READ_LENGTHS:
fwd_handle[r].close()
rev_handle[r].close()
for r in utils.READ_LENGTHS:
# compress count file
pipe = subprocess.Popen("%s -f %s.%d"%(options.bgzip, fwd_count_file, r), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
pipe = subprocess.Popen("%s -f %s.%d"%(options.bgzip, rev_count_file, r), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
# index count file
pipe = subprocess.Popen("%s -f -b 2 -e 3 -0 %s.%d.gz"%(options.tabix, fwd_count_file, r), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
pipe = subprocess.Popen("%s -f -b 2 -e 3 -0 %s.%d.gz"%(options.tabix, rev_count_file, r), \
stdout=subprocess.PIPE, shell=True)
stdout = pipe.communicate()[0]
print "Compressed file with ribosome footprint counts on forward strand is %s"%(fwd_count_file+'.%d.gz'%r)
print "Compressed file with ribosome footprint counts on reverse strand is %s"%(rev_count_file+'.%d.gz'%r)
if __name__=="__main__":
options = parse_args()
if options.dtype=="rnaseq":
convert_rnaseq(options)
elif options.dtype=="riboseq":
convert_riboseq(options)
|
python
|
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if k is None or k <= 0:
return
k = k % (len(nums))
end = len(nums)- 1
self.swap(nums,0,end-k)
self.swap(nums,end-k+1,end)
self.swap(nums,0,end)
def swap(self, nums, start,end):
while start < end :
nums[start], nums[end] = nums[end], nums[start]
start = start + 1
end = end - 1
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
使用自己的 svm 进行垃圾邮件分类。
"""
import re
import numpy as np
import scipy.io as scio
from nltk.stem import porter
from svm import SVC
def get_vocabs():
vocabs = {}
# 单词的总数
n = 1899
f = open('vocab.txt', 'r')
for i in range(n):
line = f.readline()
idx = int(re.search(r'\d+', line).group(0)) - 1
word = re.search(r'[a-zA-Z]+', line).group(0)
vocabs[word] = idx
f.close()
return vocabs
def process_email(email_content: str):
vocabs = get_vocabs()
word_indices = []
email_content = email_content.lower()
email_content = re.sub(r'<[^<>]+>', ' ', email_content)
email_content = re.sub(r'[0-9]+', 'number', email_content)
email_content = re.sub(r'(http|https)://[^\s]*', 'httpaddr', email_content)
email_content = re.sub(r'[^\s]+@[^\s]+', 'emailaddr', email_content)
email_content = re.sub(r'[$]+', 'dollar', email_content)
print('\n==== Processed Email ====\n')
l = 0
tokens = re.split(r'[@$/#.-:&*+=\[\]?!\(\){},\'">_<;% ]', email_content)
token_pattern = re.compile(r'[^a-zA-Z0-9]')
stemmer = porter.PorterStemmer()
for token in tokens:
token = token_pattern.sub('', token)
token = stemmer.stem(token)
if len(token) < 1:
continue
if token in vocabs:
word_indices.append(vocabs[token])
if l + len(token) + 1 > 78:
print()
l = 0
print(token + ' ', end='')
l = l + len(token) + 1
print('\n\n=========================')
return word_indices
def email_features(word_indices: list):
n = 1899
m = len(word_indices)
x = np.zeros((n,))
for i in range(m):
x[word_indices[i]] = 1
return x
if __name__ == '__main__':
'Part 1: Email Preprocessing'
print('\nPreprocessing sample email (emailSample1.txt)')
with open('emailSample1.txt', 'r') as f:
file_contents = f.read()
word_indices = process_email(file_contents)
print(word_indices, '\n')
input('Program paused. Press enter to continue.')
'Part 2: Feature Extraction'
print('\nExtracting features from sample email (emailSample1.txt)')
file_contents = open('emailSample1.txt', 'r').read()
word_indices = process_email(file_contents)
features = email_features(word_indices)
print('Length of feature vector: %d' % len(features))
print('Number of non-zero entries: %d' % sum(features > 0))
input('Program paused. Press enter to continue.')
'Part 3: Train Linear SVM for Spam Classification'
data = scio.loadmat('spamTrain.mat')
X = (data['X']).astype(dtype=np.int)
y = (data['y'].ravel()).astype(dtype=np.int)
print('\nTraining Linear SVM (Spam Classification)')
print('(this may take 1 to 2 minutes) ...')
c = 0.1
svc = SVC(labels=[1, 0], c=c, kernel='linear', tol=1e-5)
svc.train(X, y)
p = svc.predict(X)
print(y[:20])
print(p[:20])
print('\nTraining Accuracy: %f' % (np.mean(p == y) * 100))
'Part 4: Test Spam Classification'
data = scio.loadmat('spamTest.mat')
Xtest = (data['Xtest']).astype(dtype=np.int)
ytest = (data['ytest'].ravel()).astype(dtype=np.int)
print('\nEvaluating the trained Linear SVM on a test set ...')
p = svc.predict(Xtest)
print('Test Accuracy: %f' % (np.mean(p == ytest) * 100))
input('Program paused. Press enter to continue.')
'Part5: Test own email'
filename = input('\n输入邮件文件名称(q 退出):')
while filename != 'q':
print('测试邮件 %s 是否为垃圾邮件' % filename)
with open(filename, 'r') as f:
file_contents = f.read()
word_indices = process_email(file_contents)
x = email_features(word_indices)
x = x.reshape((1, x.shape[0]))
p = svc.predict(x)
print('使用 SVM 处理邮件 %s,是否为垃圾邮件?' % filename, p == 1)
filename = input('\n输入邮件文件名称(q 退出):')
|
python
|
# File: C (Python 2.4)
from direct.fsm.FSM import FSM
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui.GuiButton import GuiButton
from pirates.piratesgui import PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.piratesgui.CannonDefenseScorePanelBase import RoundCompleteFlags
from pirates.piratesgui.CannonDefenseEndOfWavePanel import CannonDefenseEndOfWavePanel
from pirates.piratesgui.CannonDefenseGameStatsPanel import CannonDefenseGameStatsPanel
class CannonDefenseScoreBoard(FSM, DirectFrame):
def __init__(self, waveNumber, bonusRound, maxWaves, roundComplete):
FSM.__init__(self, 'CannonDefenseScoreBoardFSM')
DirectFrame.__init__(self, frameSize = (0, 2.4060000000000001, 0, 1.5), pos = (-1.2, 0, -0.69999999999999996), sortOrder = 0)
try:
binOrder = 0
if roundComplete:
binOrder = 10
self.setBin('gui-cannonDefense', binOrder)
self.loadBackGround()
actualWaveNumber = waveNumber + bonusRound * maxWaves
self.panel1 = CannonDefenseEndOfWavePanel(actualWaveNumber, roundComplete, 1, 2, parent = self, frameColor = (0, 0, 0, 0), frameSize = (0, 2.4060000000000001, 0, 1.5))
if roundComplete:
self.panel1.nextButton['command'] = self.request
self.panel1.nextButton['extraArgs'] = [
'Panel3']
self.panel3 = CannonDefenseGameStatsPanel(roundComplete, 2, 2, parent = self, frameColor = (0, 0, 0, 0), frameSize = (0, 2.4060000000000001, 0, 1.5))
self.panel3.prevButton['command'] = self.request
self.panel3.prevButton['extraArgs'] = [
'Panel1']
self.request('Panel1')
except:
self.destroy()
raise
def loadBackGround(self):
self.backgroundModel = loader.loadModel('models/gui/pir_m_gui_can_reportPanel')
self.backgroundModel.setPos(0, 0, 0)
self.backgroundModel.wrtReparentTo(self)
self.backgroundModel.setScale(0.40000000000000002)
self.backgroundModel.setDepthWrite(False)
def enterPanel1(self):
self.panel1.show()
def exitPanel1(self):
self.panel1.hide()
def enterPanel3(self):
self.panel3.show()
def exitPanel3(self):
self.panel3.hide()
def destroy(self):
DirectFrame.destroy(self)
self.backgroundModel = None
self.panel1 = None
self.panel3 = None
def setupPanel1(self, endOfWaveData):
self.panel1.setTreasureStats(endOfWaveData.treasureStolen, endOfWaveData.treasureRemaining)
playerIndex = self.panel1.setNames(endOfWaveData.playerNames)
self.panel1.setSunkShips(endOfWaveData.shipsSunkWave, playerIndex)
self.panel1.setDamageDealt(endOfWaveData.damgeDealtWave, playerIndex)
self.panel1.setAccuracy(endOfWaveData.accuracyWave, playerIndex)
self.panel1.setShotsFired(endOfWaveData.shotsFiredWave, playerIndex)
self.panel1.setGoldAwarded(endOfWaveData.myGoldEarned, endOfWaveData.myGoldBonus)
self.panel1.setTreasureAwarded(endOfWaveData.treasureEarned)
def setupPanel3(self, endOfWaveData):
playerIndex = self.panel3.setNames(endOfWaveData.playerNames)
self.panel3.setTimePlayed(endOfWaveData.timePlayed, playerIndex)
self.panel3.setSunkShips(endOfWaveData.shipsSunkOverall, playerIndex)
self.panel3.setDamageDealt(endOfWaveData.damgeDealtOverall, playerIndex)
self.panel3.setAccuracy(endOfWaveData.accuracyOverall, playerIndex)
self.panel3.setShotsFired(endOfWaveData.shotsFiredOverall, playerIndex)
self.panel3.setGoldEarned(endOfWaveData.goldPaidOverall, playerIndex)
|
python
|
from .constants.google_play import Sort
from .features.app import app
from .features.reviews import reviews
__version__ = "0.0.2.3"
|
python
|
class memoize(object):
"""
Memoize the result of a property call.
>>> class A(object):
>>> @memoize
>>> def func(self):
>>> return 'foo'
"""
def __init__(self, func):
self.__name__ = func.__name__
self.__module__ = func.__module__
self.__doc__ = func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
d, n = vars(obj), self.__name__
if n not in d:
value = self.func(obj)
d[n] = value
return value
|
python
|
# -*- encoding: utf-8 -*-
# Copyright (c) 2019 Stephen Bunn <[email protected]>
# ISC License <https://opensource.org/licenses/isc>
"""
"""
import pathlib
# the path to the data directory included in the modules source
DATA_DIR = pathlib.Path(__file__).parent / "data"
# the path to the store-locations file located in the data directory
STORE_LOCATIONS_PATH = DATA_DIR / "store-locations.csv"
|
python
|
"""Prometheus collector for Apache Traffic Server's stats_over_http plugin."""
import logging
import re
import time
import requests
import yaml
from prometheus_client import Metric
CACHE_VOLUMES = re.compile("^proxy.process.cache.volume_([0-9]+)")
LOG = logging.getLogger(__name__)
def _get_float_value(data, keys):
"""Fetch a value using a list of keys. First present key wins.
Used for backwards compatibility with older ATS versions.
"""
for key in keys:
try:
value = float(data[key])
except KeyError:
pass
else:
return value
raise KeyError("Keys not found in data: {}".format(",".join(keys)))
class StatsPluginCollector(object):
"""Collector for metrics from the stats_over_http plugin."""
def __init__(self, endpoint, metrics_config_file, max_retries=0, ssl_verify=True):
"""Instantiate a new Collector for ATS stats."""
self._endpoint = endpoint
self._ssl_verify = ssl_verify
self.log = LOG
self.session = requests.Session()
http_adapter = requests.adapters.HTTPAdapter(max_retries=max_retries)
for prefix in ("http://", "https://"):
self.session.mount(prefix, http_adapter)
with open(metrics_config_file, "rb") as metrics_file:
self._metrics = yaml.safe_load(metrics_file.read())
def get_json(self):
"""Query the ATS stats endpoint, return parsed JSON."""
r = self.session.get(self._endpoint, verify=self._ssl_verify)
return r.json()["global"]
def collect(self):
"""Generator used to gather and return all metrics."""
start_time = time.time()
self.log.debug("Beginning collection")
self.log.debug("Fetching JSON: {0}".format(self._endpoint))
data = self.get_json()
self.log.debug("Gathering metrics")
for metric in self.parse_metrics(data):
yield metric
self.log.debug("Collection complete")
yield self._get_scrape_duration_metric(start_time)
def _get_scrape_duration_metric(self, start_time):
metric = Metric(
"trafficserver_scrape_duration_seconds",
"Time the Traffic Server scrape took, in seconds.",
"gauge",
)
metric.add_sample(
"trafficserver_scrape_duration_seconds",
value=time.time() - start_time,
labels={},
)
return metric
def parse_metrics(self, data):
"""Generator for trafficserver metrics."""
for metric_name, metric_cfg in self._metrics.items():
metric = Metric(
metric_name, metric_cfg["documentation"], metric_cfg["type"]
)
for metric_value in metric_cfg["values"]:
if isinstance(metric_value["value"], float):
value = metric_value["value"]
else:
try:
value = float(data[metric_value["value"]])
except ValueError:
self.log.warning(
"Unable to convert metric %s value %s to float",
metric_name,
metric_value["value"],
)
except KeyError:
self.log.debug(
"Metric %s value %s not found",
metric_name,
metric_value["value"],
)
continue
metric.add_sample(
metric_name, value=value, labels=metric_value["labels"]
)
yield metric
for rt in ("request", "response"):
metric_name = "trafficserver_{}_size_bytes_total".format(rt)
metric = Metric(
metric_name, "{} size in bytes.".format(rt.capitalize()), "counter"
)
try:
user_bytes = _get_float_value(
data,
[
"proxy.process.http.user_agent_total_{}_bytes".format(rt),
"proxy.node.http.user_agent_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.user_agent_{}_header_total_size".format(rt)
]
)
doc_total = float(
data[
"proxy.process.http.user_agent_{}_document_total_size".format(
rt
)
]
)
user_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=user_bytes,
labels={"source": "user_agent", "protocol": "http"},
)
try:
origin_bytes = _get_float_value(
data,
[
"proxy.process.http.origin_server_total_{}_bytes".format(rt),
"proxy.node.http.origin_server_total_{}_bytes".format(rt),
],
)
except KeyError:
# TS v8 with missing total.
header_total = float(
data[
"proxy.process.http.origin_server_{}_header_total_size".format(
rt
)
]
)
doc_total = float(
data[
"proxy.process.http.origin_server_{}_document_total_size".format(
rt
)
]
)
origin_bytes = header_total + doc_total
metric.add_sample(
metric_name,
value=origin_bytes,
labels={"source": "origin_server", "protocol": "http"},
)
metric.add_sample(
metric_name,
value=_get_float_value(
data,
[
"proxy.process.http.parent_proxy_{}_total_bytes".format(rt),
"proxy.node.http.parent_proxy_total_{}_bytes".format(rt),
],
),
labels={"source": "parent_proxy", "protocol": "http"},
)
yield metric
#
# Cache
#
# Gather all cache volumes for cache statistics
volumes = set()
for key in data:
if key.startswith("proxy.process.cache.volume_"):
m = CACHE_VOLUMES.match(key)
volumes.add(int(m.group(1)))
# Create all cache volume metrics
for volume in volumes:
for metric in self._parse_volume_metrics(data, volume):
yield metric
def _parse_volume_metrics(self, data, volume):
metric = Metric(
"trafficserver_ram_cache_hits_total", "RAM cache hit count.", "counter"
)
metric.add_sample(
"trafficserver_ram_cache_hits_total",
value=float(data["proxy.process.cache.ram_cache.hits"]),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_avail_size_bytes_total",
"Total cache available.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_avail_size_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_bytes_total",
"Total cache used in bytes.",
"gauge",
)
metric.add_sample(
"trafficserver_cache_used_bytes_total",
value=float(
data["proxy.process.cache.volume_{0}.bytes_used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_direntries", "Total cache direntries.", "gauge"
)
metric.add_sample(
"trafficserver_cache_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.total".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_used_direntries", "Cache direntries used.", "gauge"
)
metric.add_sample(
"trafficserver_cache_used_direntries",
value=float(
data["proxy.process.cache.volume_{0}.direntries.used".format(volume)]
),
labels={"volume": str(volume)},
)
yield metric
metric = Metric(
"trafficserver_cache_operations_total", "Cache operation count.", "counter"
)
for op in (
"lookup",
"read",
"write",
"update",
"remove",
"evacuate",
"scan",
"read_busy",
):
for result in ("success", "failure"):
k = "proxy.process.cache.volume_{volume}.{op}.{result}".format(
volume=volume, op=op, result=result
)
metric.add_sample(
"trafficserver_cache_operations_total",
value=float(data[k]),
labels={"volume": str(volume), "operation": op, "result": result},
)
yield metric
|
python
|
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
from src.personalize.reward_functions.follow_centre_line import reward_function
NEW_REWARD_FUNCTION = reward_function
DISCOUNT_FACTORS = [0.999, 0.99, 0.97, 0.95, 0.9]
DISCOUNT_FACTOR_MAX_STEPS = 300
TIME_BEFORE_FIRST_STEP = 0.2
|
python
|
# terrascript/provider/kubernetes.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:43 UTC)
#
# For imports without namespace, e.g.
#
# >>> import terrascript.provider.kubernetes
#
# instead of
#
# >>> import terrascript.provider.hashicorp.kubernetes
#
# This is only available for 'official' and 'partner' providers.
from terrascript.provider.hashicorp.kubernetes import *
|
python
|
import os
import sys
from pathlib import Path
import win32api
from dotenv import load_dotenv
from kivy.config import Config
from loguru import logger
import stackprinter
import trio
load_dotenv()
logger.remove()
logger.add(
sys.stdout,
colorize=True,
format="[ <lr>Wallpaper</> ]"
"[<b><fg #3b3b3b>{level: ^8}</></>]"
"[{name}.{function}:{line}]"
"[ {message} ]",
level=os.getenv("WE_DEBUG_LEVEL"),
)
async def launch_menu():
command = f"{sys.executable} {Path(__file__).parent / 'libs' / 'menu.py'}"
await trio.open_process(command)
stackprinter.set_excepthook(style="darkbg2")
if __name__ == "__main__":
try:
engine_debug = False
logger.debug("Starting Menu")
trio.run(launch_menu)
Config.read(str(Path(__file__).parent / "data" / "kivy_backend_config"))
Config.write()
Config.set("kivy", "log_level", "warning")
if not engine_debug:
Config.set("graphics", "borderless", "1")
Config.set("graphics", "resizable", "0")
Config.set("graphics", "width", f"{win32api.GetSystemMetrics(0)}")
Config.set("graphics", "height", f"{win32api.GetSystemMetrics(1)}")
else:
Config.set("graphics", "borderless", "0")
Config.set("graphics", "resizable", "1")
Config.set("graphics", "window_state", "hidden")
Config.write()
from kivy.resources import resource_add_path
from .libs.kivy_manager import WallpaperEngine
from .utils.common import project_dir
resource_add_path(str((project_dir / "libs" / "kv")))
resource_add_path(str((project_dir / "wallpapers" / "kv")))
logger.debug("Starting Wallpaper Engine App")
app = WallpaperEngine(engine_debug=engine_debug)
app.run()
app.window_manager.reset_wallpaper()
except KeyboardInterrupt:
from kivy.app import App
App.get_running_app().window_manager.reset_wallpaper()
logger.info("Exiting...")
|
python
|
from rest_framework import serializers
from users.api.serializers import UserSerializer
from ..models import Message
class MessageSerializer(serializers.ModelSerializer):
sender = UserSerializer(read_only=True)
receiver = UserSerializer(read_only=True)
class Meta:
model = Message
fields = ['pk', 'created', 'sender', 'receiver', 'message']
|
python
|
from typing import List, Tuple
import difflib
from paukenator.nlp import Text, Line
from .common import CmpBase
class CmpTokens(CmpBase):
'''Algorithm that compares texts at word level and detects differences in
tokenization (splitting). Comparison works in line-by-line fashion.
USAGE:
comparer = CmpTokens()
comparer(List[str], Text)
if comparer.has_diff():
print(comparer.diff_as_string())
'''
def __init__(self):
super().__init__()
self.diff_lines: List[Tuple[str, 'Line']] = None
self.diff_tokens: List['CmpToken'] = None
def __call__(self, expected: List[str], observed: Text) -> bool:
assert isinstance(expected, list), \
"Expecting list but got {}".format(type(expected))
assert isinstance(observed, Text), \
"Expecting Text but got {}".format(type(observed))
return self._compare_lines(expected, observed)
def _compare_lines(self, expected: List[str], observed: Text) -> bool:
'''Compare line by line and collect different lines'''
exp_lines = [line.strip() for line in expected if line.strip()]
obs_lines = [line for line in observed.lines()
if not line.is_blank()]
# if self.debug:
# print(f"+++ Expected +++\n{exp_lines}\n---")
# print(f"+++ Observed +++\n{obs_lines}\n---")
self._validate(exp_lines, obs_lines)
self._select_different_lines(exp_lines, obs_lines)
self.equal = not self.diff_lines
return self.equal
# TODO:
# 1. check how this works for two or three adjacent words that were
# tokenized differently
def diff_as_string(self):
'''Serialize difference as one string'''
lines = ['']
for exp, obs in self.diff_lines:
# whole lines, the old and new ones
lines.append("< {}".format(exp))
lines.append("> {}".format(obs.tokenized()))
# compute tokens that were resplit and differ in tokenization
exp_tokens = exp.split()
obs_tokens = obs.tokenized().split()
self._select_different_tokens(exp_tokens, obs_tokens)
# add changed tokens to the output
for dtok in self.diff_tokens:
lines.append(dtok.as_one_line())
lines.append('')
return "\n".join(lines)
def _select_different_tokens(
self, lwords: List[str], rwords: List[str]):
'''Compare two lists of words, <lwords> and <rwords>:
<lwords> is a list of words in a sentence (old tokenization)
<rwords> is a list of words in the same sentence but retokenized
'''
if self.debug:
print("<", " ".join(lwords))
print(">", " ".join(rwords))
# Ensure that a group with difference is surrounded by cmp_tokens
# without difference. In this case outputting left/right context will
# provide nicely looking string of text in CmpToken.as_one_line()
# Compare two lists of words and create a list of CmpTokens
# - if a word did not changed, it is a separate object of CmpToken
# - if a word changed, old and new states go into one CmpToken
cmp_tokens = []
for cmp in difflib.ndiff(lwords, rwords):
opcode, word = cmp[0], cmp[2:]
if opcode == ' ':
# unchanged word: ' word'
cmp_tokens.append(CmpToken(word))
elif opcode == '-':
# old state: '- bzw' '- .'
if not cmp_tokens[-1].has_diff():
cmp_tokens.append(CmpToken())
cmp_tokens[-1].oadd(word)
elif opcode == '+':
# new state: '+ bzw.'
if not cmp_tokens[-1].has_diff():
cmp_tokens.append(CmpToken())
cmp_tokens[-1].nadd(word)
# Link adjacent tokens to each other. This will be helpful later
# when knowledge of context words is required.
if CmpToken.WITH_CONTEXT:
for idx in range(1, len(cmp_tokens)):
cmp_tokens[idx-1].link_to(cmp_tokens[idx])
# print(cmp_tokens)
# print([cmpt for cmpt in cmp_tokens if cmpt.has_diff()])
# for grp in cmp_tokens:
# if grp.has_diff():
# print(grp.as_one_line())
# keep those tokens only that changed.
self.diff_tokens = [cmpt for cmpt in cmp_tokens if cmpt.has_diff()]
def _select_different_lines(self, exp: List[str], obs: List[Line]) -> None:
'''Build pairs of expected and observed lines and select only those
that differ in tokenization.
Sets self.diff_lines: List[Tuple[str, Line]]
'''
self.diff_lines = []
for e, o in zip(exp, obs):
if e != o.tokenized():
self.diff_lines.append((e, o))
if self.debug:
print(f"--- Pairs that differ: {len(self.diff_lines)} ---")
for e, o in self.diff_lines:
print("<", e)
print(">", o.tokenized())
print()
def _validate(self, litems: List, ritems: List):
'''Validate input'''
llen, rlen = len(litems), len(ritems)
assert llen == rlen, \
f"Number of lines does not match: {llen} vs {rlen}"
class CmpToken(object):
'''A token from the text that represents old and new states thereof.
If the token splitting changed, it has old and new states and .old_parts
and .new_parts are filled in.
Otherwise, if the token splitting did not change, only .unchanged_parts
will be filled.
'''
WITH_CONTEXT = True
WITH_CONTEXT = True
def __init__(self, word=None):
self.old_parts = []
self.new_parts = []
self.unchanged_parts = []
if word:
self.add(word)
# links to surrounding groups to provide context
self._previous = None
self._next = None
def add(self, token: str):
'''Add given <token> as unchanged.'''
self.unchanged_parts.append(token)
def oadd(self, token: str):
'''Add given <token> as an old state'''
self.old_parts.append(token)
def nadd(self, token: str):
'''Add given <token> as a new state'''
self.new_parts.append(token)
def __repr__(self):
msg = "<{}: unchanged={}, old={}, new={}>".format(
self.__class__.__name__, self.unchanged_parts, self.old_parts,
self.new_parts)
return msg
def has_diff(self):
return len(self.old_parts) or len(self.new_parts)
def __str__(self):
'''seems stupid but should produce correct result if current
object carries no diff (has_diff = False)'''
return self._join(self.unchanged_parts + self.old_parts + self.new_parts)
def as_one_line(self) -> str:
'''Serialize current object as *one* line'''
if self.has_diff():
string = "{} <> {}".format(
self._join(self.old_parts), self._join(self.new_parts))
if self.WITH_CONTEXT:
if self._previous:
string = "{}\t{}".format(str(self._previous), string)
if self._next:
string = "{}\t{}".format(string, str(self._next))
else:
string = "= {}".format(self._join(self.unchanged_parts))
return string
# def _xxx(self):
# # on two lines, provides better context in case context item(s)
# # is/are also groups with difference (has_diff = True)
# # < context litems
# # > content ritems
# # string = "< {}\n> {}".format(self._join(self.old_parts),
# # self._join(self.new_parts))
# return str
def _join(self, items: List[str]) -> str:
'''Serialize given list of strings to a tokenized string'''
return " ".join(items)
def link_to(self, other: 'CmpToken'):
'''Link current token and <other> token together'''
self._next = other
other._previous = self
|
python
|
from django.contrib import admin
from django.contrib import admin
from .models import subCategory
admin.site.register (subCategory)
# Register your models here.
|
python
|
import requests
import convertapi
from io import BytesIO
from .exceptions import *
class Client:
def get(self, path, params = {}, timeout = None):
timeout = timeout or convertapi.timeout
r = requests.get(self.url(path), params = params, headers = self.headers(), timeout = timeout)
return self.handle_response(r)
def post(self, path, payload, timeout = None):
timeout = timeout or convertapi.timeout
r = requests.post(self.url(path), data = payload, headers = self.headers(), timeout = timeout)
return self.handle_response(r)
def upload(self, io, filename):
url = convertapi.base_uri + 'upload'
encoded_filename = requests.utils.quote(filename)
headers = self.headers()
headers.update({
'Content-Disposition': "attachment; filename*=UTF-8''" + encoded_filename,
})
r = requests.post(url, data = io, headers = headers, timeout = convertapi.upload_timeout)
return self.handle_response(r)
def download(self, url, path):
r = requests.get(url, stream = True, timeout = convertapi.download_timeout)
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size = 1024):
if chunk:
f.write(chunk)
return path
def download_io(self, url):
response = requests.get(url, timeout = convertapi.download_timeout)
return BytesIO(response.content)
def handle_response(self, r):
try:
r.raise_for_status()
except requests.RequestException as e:
try:
raise ApiError(r.json())
except ValueError:
raise e
return r.json()
def url(self, path):
return "%s%s?Secret=%s" % (convertapi.base_uri, path, convertapi.api_secret)
def headers(self):
return {
'User-Agent': convertapi.user_agent,
}
|
python
|
from django.urls import reverse
from django.shortcuts import render
from django.utils import timezone, crypto
from django.core.paginator import Paginator
from django.http.response import HttpResponse, HttpResponseRedirect, JsonResponse, FileResponse
from django.views.decorators.http import require_http_methods
from django.template.loader import get_template
from django.contrib import messages
from django.contrib.auth import authenticate, login as auth_login, logout as auth_logout, update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from xhtml2pdf import pisa
from functools import reduce
from Easynote import forms, models
from Easynote.lib import const, aes
import binascii, bleach, io, json
# Create your views here.
@require_http_methods("GET")
def handler403(request, *args, **kwargs):
"""
handler403 view. Handle 403 Http error and render 403 error page.
:param request: HttpRequest instance. Must be HttpRequest class.
:param *args: List instance. Must be a List class.
:param **kwargs: Dict instance. Must be a Dict class.
:rtype: HttpResponse class.
:return: HttpResponse.
"""
return render(request, "errors/403.html", {})
@require_http_methods("GET")
def handler404(request, *args, **kwargs):
"""
handler404 view. Handle 404 Http error and render 404 error page.
:param request: HttpRequest instance. Must be HttpRequest class.
:param *args: List instance. Must be a List class.
:param **kwargs: Dict instance. Must be a Dict class.
:rtype: HttpResponse class.
:return: HttpResponse.
"""
return render(request, "errors/404.html", {})
@require_http_methods("GET")
def handler500(request, *args, **kwargs):
"""
handler500 view. Handle 500 Http error and render 500 error page.
:param request: HttpRequest instance. Must be HttpRequest class.
:param *args: List instance. Must be a List class.
:param **kwargs: Dict instance. Must be a Dict class.
:rtype: HttpResponse class.
:return: HttpResponse.
"""
return render(request, "errors/500.html", {})
@require_http_methods("GET")
def index(request):
"""
index view. Handle index call GET then return to login page.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
return HttpResponseRedirect(reverse("login", args=()))
@require_http_methods(["GET","POST"])
def login(request):
"""
login view. Handle login user.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
# check whether user is already logged
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("dashboard", args=()))
# GET call
if request.method == "GET":
context = { "form" : forms.AuthenticationForm() }
else:
# POST call
form = forms.AuthenticationForm(data=request.POST)
if form.is_valid():
username = form.cleaned_data["username"]
password = form.cleaned_data["password"]
user = authenticate(request, username=username, password=password)
if user is None:
context = { "form" : form }
messages.error(request, "Invalid username or password.", extra_tags="login")
else:
profile = models.Profiles.objects.get(user=User.objects.get(username=username))
master_key = crypto.hashlib.pbkdf2_hmac("sha256", password.encode(), profile.salt.encode(), 100000)
request.session["master_key"] = master_key.hex()
auth_login(request, user)
return HttpResponseRedirect(reverse("dashboard", args=()))
else:
context = { "form" : form }
# form errors
# errors displayed in form
for k,v in form.errors.items():
messages.error(request, v.data[0].message, extra_tags=k)
return render(request, "registration/login.html", context)
@login_required
@require_http_methods("GET")
def logout(request):
"""
logout view. Handle logout user.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
request.session["master_key"] = ""
auth_logout(request)
return HttpResponseRedirect(reverse("login", args=()))
@require_http_methods(["GET","POST"])
def register(request):
"""
register view. Handle register user.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
# check whether user is already logged
if request.user.is_authenticated:
return HttpResponseRedirect(reverse("dashboard", args=()))
# GET call
if request.method == "GET":
context = { "form" : forms.RegisterForm() }
else:
# POST call
form = forms.RegisterForm(data=request.POST)
if form.is_valid():
username = bleach.clean(form.cleaned_data["username"], tags=[], strip=True)
password = bleach.clean(form.cleaned_data["password"], tags=[], strip=True)
confirm_password = bleach.clean(form.cleaned_data["confirm_password"], tags=[], strip=True)
if password == confirm_password:
users = User.objects.filter(username=username)
if users.exists():
# do not view error message for security
context = { "form" : form }
else:
user = form.save(commit=False)
user.set_password(password)
user.save()
salt = crypto.get_random_string(const.PROFILES["salt"])
profile = models.Profiles.objects.create(salt=salt, user=User.objects.get(username=username))
# auto login after create the user
auth_login(request, user)
master_key = crypto.hashlib.pbkdf2_hmac("sha256", password.encode(), salt.encode(), 100000)
request.session["master_key"] = master_key.hex()
return HttpResponseRedirect(reverse("dashboard", args=()))
else:
messages.error(request, "Password does not match.", extra_tags="register")
context = { "form" : form }
else:
context = { "form" : form }
# form errors
# errors displayed in form
for k,v in form.errors.items():
if k != "username":
messages.error(request, v.data[0].message, extra_tags=k)
return render(request, "registration/register.html", context)
@login_required
@require_http_methods("GET")
def dashboard(request):
"""
dashboard views. Handle dashboard page call GET.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
return render(request, "dashboard.html", {})
@login_required
@require_http_methods("GET")
def view_available_notes(request):
"""
view_available_notes views. Show all available notes for the user.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
# GET method
# paginator builds pagination and handle get parameter ?page=
# return current notes for current page with paginator.get_page
queryset = models.Notes.objects.filter(user=User.objects.get(username=request.user.username)).order_by("name")
paginator = Paginator(queryset, const.NOTES["items_per_page"])
page = request.GET.get("page")
notes = paginator.get_page(page)
context = { "notes":notes }
return render(request, "notes/notes.html", context)
@login_required
@require_http_methods(["GET","POST"])
def create_new_note(request):
"""
create_new_note views. Create new <Notes> entry into database from form input then redirect to notes/ page.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
if request.method == "GET":
# GET call
# create Note form
context = { "form":forms.NewNoteForm() }
else:
# POST call
# create Note form with POST inputs
form = forms.NewNoteForm(data=request.POST)
if form.is_valid():
name = bleach.clean(form.cleaned_data["name"], tags=[], strip=True)
summary = bleach.clean(form.cleaned_data["summary"], tags=const.BLEACH["AUTHORIZED_TAGS"],
attributes=const.BLEACH["AUTHORIZED_ATTRIBUTES"], styles=const.BLEACH["AUTHORIZED_STYLES"],
strip=True)
notes = models.Notes.objects.filter(name=name, user=User.objects.get(username=request.user.username))
if notes.exists():
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "A note with this name already in use.", extra_tags="notes")
elif len(name) > 0 and len(summary) > 0:
# generate random symetric key
# encrypt summary with random symetric key
key = crypto.get_random_string(const.KEYS["key"])
algorithm = aes.AdvancedEncryptionStandard(key.encode())
ciphersummary = algorithm.encrypt(summary.encode())
# create note entry in database
note = form.save(commit=False)
note.name = name
note.summary = ciphersummary.hex()
note.published_date = timezone.now()
note.updated_date = timezone.now()
note.changes = 0
note.views = 0
note.exports = 0
note.user = User.objects.get(username=request.user.username)
note.save()
# encrypt symetric key with master_key
master_key = binascii.unhexlify(request.session["master_key"])
algorithm = aes.AdvancedEncryptionStandard(master_key)
cipherkey = algorithm.encrypt(key.encode())
# create key entry
models.Keys.objects.create(key=cipherkey.hex(), note=note)
return HttpResponseRedirect(reverse("view_available_notes", args=()))
else:
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "NAME or SUMMARY field is empty.", extra_tags="notes")
else:
# form errors
# errors displayed in form
for k,v in form.errors.items():
messages.error(request, v.data[0].message, extra_tags=k)
context = { "form":form }
return render(request, "notes/new-note.html", context)
@login_required
@require_http_methods(["GET","POST"])
def edit_current_note(request, name):
"""
edit_current_note views. Edit current <Notes> entry then update the <Notes> entry fromform input.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
if request.method == "GET":
# GET call
try:
# recover encrypted symetric key
# convert string to hexadecimal
note = models.Notes.objects.get(name=name, user=User.objects.get(username=request.user.username))
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master key
master_key = binascii.unhexlify(request.session["master_key"])
algorithm = aes.AdvancedEncryptionStandard(master_key)
plaintext_key = algorithm.decrypt(cipherkey)
# convert string to hexadecimal
# decrypt summary with symetric key
algorithm = aes.AdvancedEncryptionStandard(plaintext_key)
ciphersummary = binascii.unhexlify(note.summary)
summary = algorithm.decrypt(ciphersummary)
data = { "name":note.name, "summary":summary.decode() }
form = forms.EditNoteForm(initial=data)
context = { "note":note, "form":form }
return render(request, "notes/edit-note.html", context)
except models.Notes.DoesNotExist as err:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, err, extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes",args=()))
else:
# POST call
form = forms.EditNoteForm(data=request.POST)
if form.is_valid():
summary = bleach.clean(form.cleaned_data["summary"], tags=const.BLEACH["AUTHORIZED_TAGS"],
attributes=const.BLEACH["AUTHORIZED_ATTRIBUTES"], styles=const.BLEACH["AUTHORIZED_STYLES"],
strip=True)
if len(summary) > 0:
try:
# recover encrypted symetric key
# convert string to hexadecimal
note = models.Notes.objects.get(name=name, user=User.objects.get(username=request.user.username))
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master key
master_key = binascii.unhexlify(request.session["master_key"])
algorithm = aes.AdvancedEncryptionStandard(master_key)
plaintext_key = algorithm.decrypt(cipherkey)
# convert string to hexadecimal
# decrypt summary with symetric key
algorithm = aes.AdvancedEncryptionStandard(plaintext_key)
ciphersummary = algorithm.encrypt(summary.encode())
note.summary = ciphersummary.hex()
note.updated_date = timezone.now()
note.changes = note.changes + 1
note.save()
except models.Notes.DoesNotExist as err:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, err, extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes", args=()))
else:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "SUMMARY field is empty.", extra_tags="notes")
else:
# form errors
# errors displayed in form
for k,v in form.errors.items():
messages.error(request, v.data[0].message, extra_tags=k)
return HttpResponseRedirect(reverse("edit_current_note", args=(name,)))
@login_required
@require_http_methods("GET")
def view_current_note(request, name):
"""
view_current_note views. Handle notes/view/ call GET.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
try:
# recover encrypted symetric key
# convert string to hexadecimal
note = models.Notes.objects.get(name=name, user=User.objects.get(username=request.user.username))
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master key
master_key = binascii.unhexlify(request.session["master_key"])
algorithm = aes.AdvancedEncryptionStandard(master_key)
plaintext_key = algorithm.decrypt(cipherkey)
# convert string to hexadecimal
# decrypt summary with symetric key
algorithm = aes.AdvancedEncryptionStandard(plaintext_key)
ciphersummary = binascii.unhexlify(note.summary)
summary = algorithm.decrypt(ciphersummary)
# update note stats
note.views = note.views + 1
note.save()
context = { "note":note, "summary":summary.decode() }
return render(request, "notes/view-note.html", context)
except models.Notes.DoesNotExist as err:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, err, extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes",args=()))
@login_required
@require_http_methods("POST")
def delete_current_note(request):
"""
delete_current_note views. Handle notes/delete/ page call POST. Delete the note passed in POST form for the user.
:param request: Must be <HttpRequest>
:return: <HttpResponse> instance
:rtype: <HttpResponse>
"""
try:
name = request.POST["name"]
note = models.Notes.objects.get(name=name, user=User.objects.get(username=request.user.username))
note.delete()
except models.Notes.DoesNotExist as err:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, err, extra_tags="notes")
except KeyError as err:
# appear whether name field is not in request
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "NAME field is mandatory.", extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes", args=()))
@login_required
@require_http_methods("GET")
def export_current_note(request, name):
"""
export_current_note views. Handle notes/export/ page call GET. Export the note as PDF file.
:param request: Must be <HttpRequest>
:return: PDF file.
:rtype: <HttpResponse>
"""
def render_to_pdf(template, context):
"""
render_to_pdf.
:param request: Must be <HttpRequest>
:return: PDF file.
:rtype: <HttpResponse>
"""
template = get_template(template)
html = template.render(context)
result = io.BytesIO()
pdf = pisa.pisaDocument(io.BytesIO(html.encode("ISO-8859-1")), result)
if pdf.err:
return None
else:
return result.getvalue()
try:
# recover encrypted symetric key
# convert string to hexadecimal
note = models.Notes.objects.get(name=name, user=User.objects.get(username=request.user.username))
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master key
master_key = binascii.unhexlify(request.session["master_key"])
algorithm = aes.AdvancedEncryptionStandard(master_key)
plaintext_key = algorithm.decrypt(cipherkey)
# convert string to hexadecimal
# decrypt summary with symetric key
algorithm = aes.AdvancedEncryptionStandard(plaintext_key)
ciphersummary = binascii.unhexlify(note.summary)
summary = algorithm.decrypt(ciphersummary)
context = { "note":note, "summary":summary.decode() }
result = render_to_pdf("notes/export-note.html", context)
if result:
# update note stats
note.exports = note.exports + 1
note.save()
return HttpResponse(result, content_type="application/pdf")
else:
messages.error(request, "Unable to generate the PDF file.", extra_tags="notes")
return HttpResponseRedirect(reverse("view_current_note", args=(name,)))
except models.Notes.DoesNotExist as err:
# appear whether note does not exist
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, err, extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes", args=()))
@login_required
@require_http_methods("GET")
def search_note(request):
"""
search_notes view. Get all <Notes> with name contains query.
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: JSONResponse instance.
:rtype: JSONResponse class.
"""
if request.is_ajax():
try:
query = request.GET["query"]
# check whether query is not empty
if len(query) > 0:
notes = models.Notes.objects.filter(name__contains=query, user=User.objects.get(username=request.user.username)).order_by("name")
else:
notes = models.Notes.objects.filter(user=User.objects.get(username=request.user.username)).order_by("name")
notes = [ note.as_dict() for note in notes ]
context = { "notes" : notes }
return JsonResponse(context)
except KeyError:
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "QUERY field is required.", extra_tags="notes")
return HttpResponseRedirect(reverse("view_available_notes",args=()))
@login_required
@require_http_methods("GET")
def view_current_profile(request):
"""
view_current_profile view. Display the current user profile. Equaly, handle password change.
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: HttpResponse instance.
:rtype: HttpResponse class.
"""
return render(request, "account/profile.html", {})
@login_required
@require_http_methods("POST")
def change_current_password(request):
"""
change_current_password view. Decrypt current keys then encrypt keys with new master key,
update current user session and change the user password.
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: HttpResponse instance.
:rtype: HttpResponse class.
"""
try:
form = forms.ChangePasswordForm(data=request.POST)
if form.is_valid():
username = bleach.clean(form.cleaned_data["username"], tags=[], strip=True)
current_password = bleach.clean(form.cleaned_data["current_password"], tags=[], strip=True)
new_password = bleach.clean(form.cleaned_data["new_password"], tags=[], strip=True)
confirm_password = bleach.clean(form.cleaned_data["confirm_password"], tags=[], strip=True)
if username == request.user.username:
user = models.User.objects.get(username=request.user.username)
if user.check_password(current_password):
if new_password == confirm_password:
notes = models.Notes.objects.filter(user=user)
current_master_key = binascii.unhexlify(request.session["master_key"])
profile = models.Profiles.objects.get(user=user)
new_master_key = crypto.hashlib.pbkdf2_hmac("sha256", new_password.encode(), profile.salt.encode(), 100000)
algorithm = aes.AdvancedEncryptionStandard(current_master_key)
for note in notes:
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master key
plaintext_key = algorithm.decrypt(cipherkey)
algorithm = aes.AdvancedEncryptionStandard(new_master_key)
cipherkey = algorithm.encrypt(plaintext_key)
key.key = cipherkey.hex()
key.save()
user.set_password(new_password)
user.save()
update_session_auth_hash(request, user)
request.session["master_key"] = new_master_key.hex()
else:
messages.error(request, "Password does not match.", extra_tags="profiles")
else:
messages.error(request, "Invalid password.", extra_tags="profiles")
else:
messages.error(request, "Invalid username.", extra_tags="profiles")
else:
# form errors
# errors displayed in form
for k,v in form.errors.items():
messages.error(request, v.data[0].message, extra_tags=k)
except models.User.DoesNotExist as err:
messages.error(request, err, extra_tags="profiles")
return HttpResponseRedirect(reverse("view_current_profile", args=()))
@login_required
@require_http_methods("GET")
def export_all_notes(request):
"""
export_all_notes view. Export all notes in json file.
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: HttpResponse instance.
:rtype: HttpResponse class.
"""
notes = models.Notes.objects.filter(user=User.objects.get(username=request.user.username))
if len(notes) > 0:
data = {}
data["notes"] = []
master_key = binascii.unhexlify(request.session["master_key"])
for note in notes:
key = models.Keys.objects.get(note=note)
cipherkey = binascii.unhexlify(key.key)
# decrypt key with master keys
algorithm = aes.AdvancedEncryptionStandard(master_key)
plaintext_key = algorithm.decrypt(cipherkey)
# convert string to hexadecimal
# decrypt summary with symetric key
algorithm = aes.AdvancedEncryptionStandard(plaintext_key)
ciphersummary = binascii.unhexlify(note.summary)
summary = algorithm.decrypt(ciphersummary)
data["notes"].append({ "name":note.name,
"summary":bleach.clean(summary.decode(), tags=[], strip=True),
"published_date":timezone.datetime.strftime(note.published_date, format="%b. %d, %Y, %H:%m %p."),
"updated_date":timezone.datetime.strftime(note.updated_date, format="%b. %d, %Y, %H:%m %p."),
"author":request.user.username })
date = timezone.datetime.strftime(timezone.now(), format="%Y_%M_%d_%H_%m_%S")
filename = "-".join([ request.user.username,"notes",date ])
response = HttpResponse(json.dumps(data), content_type="application/json")
response["Content-Disposition"] = "attachment; filename={}.json".format(filename)
return response
else:
# appear whether note does not exist
# add error message in request
# tag : profiles
# errors displayed with topper.js
messages.error(request, "No notes available." , extra_tags="profiles")
return HttpResponseRedirect(reverse("view_current_profile", args=()))
@login_required
@require_http_methods("POST")
def delete_current_account(request):
"""
delete_current_account view. Delete current user account and all related data in database (Profile, Notes, etc.)
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: HttpResponse instance.
:rtype: HttpResponse class.
"""
try:
username = request.POST["username"]
if username == request.user.username:
user = User.objects.get(username=request.user.username)
user.delete()
return HttpResponseRedirect(reverse("login", args=()))
except KeyError as err:
# appear whether username field is not in request
# add error message in request
# tag : profiles
# errors displayed with topper.js
messages.error(request, "Username field is mandatory.", extra_tags="profiles")
else:
# appear whether username is invalid
# add error message in request
# tag : profiles
# errors displayed with topper.js
messages.error(request, "Invalid username.", extra_tags="profiles")
return HttpResponseRedirect(reverse("view_current_profile", args=()))
@login_required
@require_http_methods("GET")
def get_statistics(request):
"""
get_statistics view. Get notes statistics (changes, views, exports, etc.)
:param request: HttpRequest instance. Must be a HttpRequest class.
:return: JsonResponse instance.
:rtype: JsonResponse class.
"""
if request.is_ajax():
try:
months = request.GET["months"]
notes = models.Notes.objects.filter(user=User.objects.get(username=request.user.username))
changes, views, exports = 0,0,0
if len(notes) > 0:
changes = reduce(lambda x,y: x+y, [ el.changes for el in notes ])
views = reduce(lambda x,y: x+y, [ el.views for el in notes ])
exports = reduce(lambda x,y: x+y, [ el.exports for el in notes ])
if months == "1":
notes = [ el.as_dict() for el in notes if (timezone.now() - el.published_date).days <= 28 ]
elif months == "3":
notes = [ el.as_dict() for el in notes if (timezone.now() - el.published_date).days <= 84 ]
elif months == "6":
notes = [ el.as_dict() for el in notes if (timezone.now() - el.published_date).days <= 168 ]
else:
notes = [ el.as_dict() for el in notes if (timezone.now() - el.published_date).days <= 336 ]
context = { "notes_count":len(notes),
"changes":changes,
"views":views,
"exports":exports,
"notes": notes }
return JsonResponse(context)
except KeyError:
# add error message in request
# tag : notes
# errors displayed with topper.js
messages.error(request, "MONTHS field is required.", extra_tags="statistics")
return HttpResponseRedirect(reverse("dashboard",args=()))
|
python
|
from tool.runners.python import SubmissionPy
class BebertSubmission(SubmissionPy):
def run(self, s):
positions = [line.split(", ") for line in s.splitlines()]
positions = [(int(x), int(y)) for x, y in positions]
# print(positions)
obj_less = 10000 # if len(positions) != 6 else 32
min_x = min(positions, key=lambda p: p[0])[0]
max_x = max(positions, key=lambda p: p[0])[0]
min_y = min(positions, key=lambda p: p[1])[1]
max_y = max(positions, key=lambda p: p[1])[1]
# print(min_x, max_x, min_y, max_y)
grid = [[]] * (max_y - min_y + 1)
for y in range(max_y - min_y + 1):
grid[y] = [0] * (max_x - min_x + 1)
for p, (x, y) in enumerate(positions):
for dx in range(max_x - x + 1):
for dy in range(max_y - y + 1):
j = x + dx - min_x
i = y + dy - min_y
grid[i][j] += dx + dy
for dy in range(1, y - min_y + 1):
j = x + dx - min_x
i = y - dy - min_y
grid[i][j] += dx + dy
for dx in range(1, x - min_x + 1):
for dy in range(max_y - y + 1):
j = x - dx - min_x
i = y + dy - min_y
grid[i][j] += dx + dy
for dy in range(1, y - min_y + 1):
j = x - dx - min_x
i = y - dy - min_y
grid[i][j] += dx + dy
count = 0
for line in grid:
for d in line:
if d < obj_less:
count += 1
return count
|
python
|
from setuptools import setup, find_packages
setup(
name='ysedu',
version='1.0',
author='Yuji Suehiro',
packages=find_packages(),
url='https://github.com/YujiSue/education',
description='Sample codes used for education.'
)
|
python
|
import re
text = input().lower()
word = input().lower()
pattern = rf"\b{word}\b"
valid_matches = len(re.findall(pattern, text))
print(valid_matches)
|
python
|
import torch
def center_of_mass(x, pytorch_grid=True):
"""
Center of mass layer
Arguments
---------
x : network output
pytorch_grid : use PyTorch convention for grid (-1,1)
Return
------
C : center of masses for each chs
"""
n_batch, chs, dim1, dim2, dim3 = x.shape
eps = 1e-8
if pytorch_grid:
arange1 = torch.linspace(-1,1,dim1).float().view(1,1,-1).repeat(n_batch, chs, 1)
arange2 = torch.linspace(-1,1,dim2).float().view(1,1,-1).repeat(n_batch, chs, 1)
arange3 = torch.linspace(-1,1,dim3).float().view(1,1,-1).repeat(n_batch, chs, 1)
else:
arange1 = torch.arange(dim1).float().view(1,1,-1).repeat(n_batch, chs, 1)
arange2 = torch.arange(dim2).float().view(1,1,-1).repeat(n_batch, chs, 1)
arange3 = torch.arange(dim3).float().view(1,1,-1).repeat(n_batch, chs, 1)
if x.is_cuda:
arange1, arange2, arange3 = arange1.cuda(), arange2.cuda(), arange3.cuda()
m1 = x.sum((3,4)) #mass along the dimN, shape [n_batch, chs, dimN]
M1 = m1.sum(-1, True) + eps #total mass along dimN
m2 = x.sum((2,4))
M2 = m2.sum(-1, True) + eps
m3 = x.sum((2,3))
M3 = m3.sum(-1, True) + eps
c1 = (arange1*m1).sum(-1,True)/M1 #center of mass along dimN, shape [n_batch, chs, 1]
c2 = (arange2*m2).sum(-1,True)/M2
c3 = (arange3*m3).sum(-1,True)/M3
C = torch.cat([c3,c2,c1],-1) #center of mass, shape [n_batch, chs, 3]
return C.transpose(-2,-1)
def close_form_affine(moving_kp, target_kp):
"""
Obtain affine matrix to align moving keypoints to target keypoints.
Affine matrix computed in a close form solution.
Arguments
---------
moving_kp : keypoints from the moving image [n_batch, 3, n_keypoints]
target_kp : keypoints from the fixed/target image [n_batch, 3, n_keypoints]
Return
------
out : affine matrix [n_batch, 3, 4]
"""
Y_cm = moving_kp
Y_tg = target_kp
# Initialize
one = torch.ones(Y_cm.shape[0], 1, Y_cm.shape[2]).float() #Add a row of ones
one = one.cuda() if Y_cm.is_cuda else one
_Y_cm = torch.cat([Y_cm, one],1)
out = torch.bmm(_Y_cm, torch.transpose(_Y_cm,-2,-1))
out = torch.inverse(out)
out = torch.bmm(torch.transpose(_Y_cm,-2,-1), out)
out = torch.bmm(Y_tg, out)
return out
|
python
|
# ---------------------------------------------------------------------
# Dispose Request
# ---------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
from typing import Union
# NOC modules
from .clearreq import ClearRequest
from .eventreq import EventRequest
from .raisereq import RaiseRequest
from .ensuregroupreq import EnsureGroupRequest
DisposeRequest = Union[ClearRequest, EventRequest, RaiseRequest, EnsureGroupRequest]
|
python
|
#!/usr/bin/env python3
# This script will run using the default Python 3 environment
# where LibreOffice's scripts are installed to (at least in Ubuntu).
# This converter is very similar to unoconv but has an option to remove
# line numbers, it is also simpler by being more tailored to the use-case.
# https://github.com/dagwieers/unoconv
from __future__ import absolute_import, print_function
import argparse
import os
import sys
import logging
import subprocess
import atexit
from time import sleep
from contextlib import contextmanager
from typing import Optional, Sequence
# pylint: disable=import-error
import uno # type: ignore
from com.sun.star.beans import PropertyValue # type: ignore
from com.sun.star.connection import NoConnectException # type: ignore
from com.sun.star.document import RedlineDisplayType # type: ignore
# pylint: enable=import-error
LOGGER = logging.getLogger(__name__)
FILTER_NAME_BY_EXT = {
'doc': 'MS Word 97',
'docx': 'Office Open XML Text',
'dotx': 'Office Open XML Text',
'rtf': 'Rich Text Format',
'pdf': 'writer_web_pdf_Export'
}
VALID_OUTPUT_FORMATS = sorted(FILTER_NAME_BY_EXT.keys())
def parse_args(argv: Optional[Sequence[str]] = None):
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='command')
convert_parser = subparsers.add_parser('convert')
convert_parser.add_argument(
'-f', '--format', type=str, required=True,
choices=VALID_OUTPUT_FORMATS,
help='Output format (ext)'
)
convert_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
convert_parser.add_argument(
'--output-file', type=str,
help='Output file (if specified, only one input file should be used)'
)
convert_parser.add_argument(
'input_file', type=str, nargs='+',
help='Input files (does not support pdf)'
)
convert_parser.add_argument(
'--remove-line-no', action='store_true', default=False,
help='remove line number'
)
convert_parser.add_argument(
'--remove-header-footer', action='store_true', default=False,
help='remove header and footer (including page number)'
)
convert_parser.add_argument(
'--remove-redline', action='store_true', default=False,
help='remove redlines (track changes, by accepting all changes)'
)
convert_parser.add_argument(
'--keep-listener-running', action='store_true', default=False,
help='keep listener running in the background'
)
convert_parser.add_argument(
'-n', '--no-launch', action='store_true', default=False,
help='fail if no listener is found (default: launch one)'
)
start_listener_parser = subparsers.add_parser('start-listener')
start_listener_parser.add_argument(
'-p', '--port', type=int, default=2002,
help='Port to the uno listener'
)
parser.add_argument(
'--debug', action='store_true', default=False,
help='enable debug output'
)
args = parser.parse_args(argv)
if args.debug:
logging.getLogger().setLevel('DEBUG')
LOGGER.debug('args: %s', args)
return args
def get_start_listener_command(port: int) -> Sequence[str]:
return [
'soffice',
'--headless',
'--invisible',
'--nocrashreport',
'--nodefault',
'--nofirststartwizard',
'--nologo',
'--norestore',
'--accept=socket,host=localhost,port={port};urp;StarOffice.ServiceManager'.format(
port=port
)
]
def get_resolver():
local_context = uno.getComponentContext()
resolver = local_context.ServiceManager.createInstanceWithContext(
"com.sun.star.bridge.UnoUrlResolver", local_context
)
return resolver
def connect(resolver, port: int):
return resolver.resolve(
"uno:socket,host=localhost,port={port};urp;StarOffice.ComponentContext".format(
port=port
)
)
def connect_with_timeout(resolver, port: int, timeout: float):
delay = 0.5
elapsed = 0.0
while True:
try:
connect_result = connect(resolver, port)
LOGGER.debug('connected to port %s', port)
return connect_result
except NoConnectException as e:
if elapsed >= timeout:
LOGGER.debug(
'connection failed, timeout exceeded (%.1f >= %s)',
elapsed, timeout
)
raise e
LOGGER.debug('connection failed, try again in %.1f (%.1f)', delay, elapsed)
sleep(delay)
elapsed += delay
def start_listener(port: int) -> subprocess.Popen:
LOGGER.debug('starting listener on port %d', port)
return subprocess.Popen(
get_start_listener_command(port)
)
def stop_listener(listener_process: subprocess.Popen):
LOGGER.debug('stopping listener process with pid: %s', listener_process.pid)
return listener_process.terminate()
@contextmanager
def managed_connection(resolver, port: int, no_launch: bool, keep_listener_running: bool):
timeout = 10
try:
yield connect_with_timeout(resolver, port, timeout)
except NoConnectException as e:
if no_launch:
raise e
LOGGER.debug('failed to connect, try to start listener')
listener_process = start_listener(port)
try:
yield connect_with_timeout(resolver, port, timeout)
finally:
if not keep_listener_running:
stop_listener(listener_process)
@contextmanager
def managed_desktop(connection, keep_listener_running: bool):
LOGGER.debug('starting desktop session')
desktop = connection.ServiceManager.createInstanceWithContext(
"com.sun.star.frame.Desktop", connection
)
try:
yield desktop
finally:
try:
if not keep_listener_running:
LOGGER.debug('terminate desktop session')
desktop.terminate()
except Exception as e: # pylint: disable=broad-except
LOGGER.warning('caught exception while terminating desktop: %s', e)
def create_property_value(name, value):
property_value = PropertyValue()
property_value.Name = name
property_value.Value = value
return property_value
def dict_to_property_values(d):
return tuple((
create_property_value(key, value)
for key, value in d.items()
))
def property_set_to_dict(property_set):
return {
prop.Name: property_set.getPropertyValue(prop.Name)
for prop in property_set.getPropertySetInfo().getProperties()
}
def disable_document_header_footer(document):
styleFamilies = document.getStyleFamilies()
pageStyles = styleFamilies.getByName('PageStyles')
if not styleFamilies.hasByName('PageStyles'):
return
for styleName in pageStyles.getElementNames():
pageStyle = pageStyles.getByName(styleName)
pageStyle.setPropertyValue('HeaderIsOn', False)
pageStyle.setPropertyValue('FooterIsOn', False)
def convert_document_file(
desktop,
input_file: str,
output_file: str,
output_ext: str,
remove_line_no: bool = False,
remove_redline: bool = False,
remove_header_footer: bool = False
):
output_filter_name = FILTER_NAME_BY_EXT[output_ext]
input_file_url = uno.systemPathToFileUrl(os.path.realpath(input_file))
document = desktop.loadComponentFromURL(
input_file_url,
"_blank", 0,
dict_to_property_values({'Hidden': True, 'ReadOnly': True})
)
if not document:
raise RuntimeError('failed to load document: %s' % input_file_url)
try:
if remove_line_no:
document.getLineNumberingProperties().IsOn = False
if remove_header_footer:
disable_document_header_footer(document)
if remove_redline:
document.setPropertyValue('RedlineDisplayType', RedlineDisplayType.NONE)
output_url = "file://" + os.path.abspath(output_file)
LOGGER.debug("output_url: %s", output_url)
document.storeToURL(
output_url,
dict_to_property_values({'FilterName': output_filter_name})
)
finally:
# close, parameter: DeliverOwnership
# "true: delegates the ownership of ths closing object to any one
# which throw the CloseVetoException.
# This new owner has to close the closing object again
# if his still running processes will be finished."
document.close(True)
def convert(desktop, args: argparse.Namespace):
if args.output_file and len(args.input_file) > 1:
raise RuntimeError(
''.join([
'only one input field should be specified together with --output-file.'
' (input files: %s)'
]) % args.input_file
)
for input_filename in args.input_file:
LOGGER.info(
'processing: %s (%s)',
input_filename,
'{:,d}'.format(os.path.getsize(input_filename))
)
name, input_ext = os.path.splitext(input_filename)
if input_ext.startswith('.'):
input_ext = input_ext[1:]
if not args.output_file and input_ext == args.format:
raise RuntimeError(
''.join([
'input and output format should not be the same',
' (unless --output-file was specified): %s -> %s'
]) % (
input_ext, args.format
)
)
if args.output_file:
output_filename = args.output_file
else:
output_filename = name + '.' + args.format
convert_document_file(
desktop,
input_filename,
output_filename,
args.format,
remove_line_no=args.remove_line_no,
remove_header_footer=args.remove_header_footer,
remove_redline=args.remove_redline
)
def run(args: argparse.Namespace):
if args.command == 'convert':
resolver = get_resolver()
with managed_connection(
resolver, args.port,
no_launch=args.no_launch,
keep_listener_running=args.keep_listener_running) as connection:
with managed_desktop(connection, args.keep_listener_running) as desktop:
convert(desktop, args)
elif args.command == 'start-listener':
p = start_listener(args.port)
atexit.register(
lambda: stop_listener(p)
)
p.wait()
else:
raise RuntimeError('invalid command: %s' % args.command)
class ExitCodes:
UNO_CONNECTION_ERROR = 9
def main(argv: Optional[Sequence] = None):
args = parse_args(argv)
try:
run(args)
except NoConnectException as e:
LOGGER.error('failed to connect to uno service: %s', e, exc_info=e)
sys.exit(ExitCodes.UNO_CONNECTION_ERROR)
except Exception as e:
LOGGER.error('failed to to run: %s (%s)', e, type(e), exc_info=e)
raise
if __name__ == '__main__':
logging.basicConfig(level='INFO')
main()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 23 18:53:28 2019
@author: Rajas khokle
Code Title - Time Series Modelling
"""
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import acf,pacf
#from statsmodels.graphics.tsaplots import plot_acf,plot_pacf
# Create Connection to the database
engine = create_engine('postgres://postgres:[email protected]:5432/Capstone')
# Get the Diabetes Database
diabetes_read="select * from diabetes where diabetes.tranbnfcode = '0601012V0BBAEAD' "
df = pd.read_sql(diabetes_read,engine)
data = df.groupby(['period']).sum()
data['period'] = data.index
plt.plot(data.quantity)
# Now Lets start building a model
data['dt'] = pd.to_datetime(data.period,format = '%Y%m',errors = 'coerce')
QD = pd.Series(data.quantity)
QD.index = data.dt
# Lets check the Stationarity
def Station(ds,rolling_window):
rol_mean = ds.rolling(window=rolling_window).mean()
rol_std = ds.rolling(window=rolling_window).std()
# plot the time series
plt.plot(ds,color ='blue',label='Original')
plt.plot(rol_mean,color ='red',label='Rolling_Mean')
plt.plot(rol_std,color ='black',label='Rolling_STD')
plt.legend(loc = 'best')
plt.title('Rolling Mean and Std')
plt.show()
## Run Dickey Fuller Test
adftest = adfuller(ds,autolag = 'AIC')
adfoutput = pd.Series(adftest[0:4],index = ['adf','p-value','lags_used',
'number_of_obseravtions'])
for key,value in adftest[4].items():
adfoutput['critical_value(%s)'%key] = value
return(adfoutput)
''' The p value from Dicky Fuller test should be less than 0.05 and adf value
should be less than the critical value for ensuring the stationarity '''
# Check Stationarity of the data
Station(QD,12)
# Now considering that the model is not stationary at all, we need to find the
# lag for which it will become stationary.
# For future automation, we can take square roots and other functions and then
# Evaluate stationarity for different functions and different lags. For initial
# model, only log function with 1st order differencing is consideres
# find optimal lag
# Lag more than 12 is not advisible due to seasonality
p_val=[]
adf=[]
crit_diff = []
QD_log = np.log(QD) # np.sqrt and other functions may be used
for i in range(12):
diff = QD_log - QD_log.shift(i+1)
diff.dropna(inplace = True)
adfoutput = Station(diff,12)
p_val.append(adfoutput['p-value'])
adf.append(adfoutput['adf'])
crit_diff.append(adfoutput['adf'] - adfoutput['critical_value(1%)'])
lag = np.argmin(p_val) # here we use the best lag for getting difference
QD_log_diff = QD_log - QD_log.shift(lag+1)
QD_log_diff.dropna(inplace = True)
adfoutput2=Station(QD_log_diff,3)
# Decompose the model
ss_decomposition = seasonal_decompose(QD_log)
fig = plt.figure()
fig.set_size_inches(12,10)
fig = ss_decomposition.plot()
plt.show
# Get the trend, seasonal and residual data
trend = ss_decomposition.trend
seasonal = ss_decomposition.seasonal
residual = ss_decomposition.resid
residual.dropna(inplace = True)
plt.plot(residual)
Station(residual,1)
# Let do the ACF and PACF on the log diff data
lag_acf = acf(QD_log_diff,nlags = 20)
lag_pacf = pacf(QD_log_diff,nlags=12,method = 'ols')
# plot ACF and calculate the 95% confidence ineterval
CI = 1.9/np.sqrt(len(QD_log_diff))
plt.plot(lag_acf)
plt.axhline(y=0,linestyle='--',color = 'blue')
plt.axhline(y = -CI,linestyle='--',color = 'blue')
plt.axhline(y = CI,linestyle='--',color = 'blue')
plt.title('Autocorrelation Function')
# Plot PACF
plt.plot(lag_pacf)
plt.axhline(y=0,linestyle='--',color = 'blue')
plt.axhline(y = -CI,linestyle='--',color = 'blue')
plt.axhline(y = CI,linestyle='--',color = 'blue')
plt.title('Partial Autocorrelation Function')
# See where is the first 0 or 95% confidence crossing
pacf_zero_crossings = np.where(np.diff(np.sign(lag_pacf)))[0]
pacf_CI_crossings = np.where(np.diff(np.sign(lag_acf-CI)))[0][0]
p = pacf_CI_crossings
acf_zero_crossing = np.where(np.diff(np.sign(lag_pacf)))[0][0]
q =acf_zero_crossing
# Get the lag which is closest to the zero - It is the lag for which the data
# is most stationary.
P = np.argmin(abs(lag_pacf))
Q = np.argmin(abs(lag_acf))
# Construct ARIMA Model
I=1
order_tuple = (P,I,Q)
model = ARIMA(QD_log_diff,order_tuple,freq='MS')
results_AR = model.fit()
pred = results_AR.fittedvalues
sumabserror = np.sum(np.abs(pred-QD_log_diff))
plt.plot(pred)
plt.plot(QD_log_diff)
plt.title('SAE %2f'% sumabserror)
# Convert sum and then exponential to get origianl values
pred_log = pd.Series(QD_log[0],index = QD_log.index)
pred_log = pred_log.add(pred,fill_value=0)
pred_final = np.exp(pred_log)
plt.plot(pred_final)
plt.plot(QD)
### Forcating is P in A
# Auto Arima
from pyramid.arima import auto_arima
stepwise_model = auto_arima(QD, start_p=1, start_q=1,
max_p=10, max_q=10, m=20,
start_P=0, seasonal=True,
d=1, D=1, trace=True,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
stepwise_model.fit(QD)
future_forecast = stepwise_model.predict(n_periods=100)
future_forecast_ser = pd.Series(future_forecast,index = QD.index)
plt.plot(QD)
plt.plot(future_forecast_ser)
# FBprophet
diabetes_read="select * from diabetes where diabetes.tranbnfcode = '0601011L0AAACAC' "
df = pd.read_sql(diabetes_read,engine)
data = df.groupby(['period']).sum()
data['period'] = data.index
plt.plot(data.quantity)
data['dt'] = pd.to_datetime(data.period,format = '%Y%m',errors = 'coerce')
QD = pd.Series(data.quantity)
QD.index = data.dt
ds=QD.index
y = data.quantity
x = pd.DataFrame(y,ds)
x['ds'] = x.index
x.reset_index(inplace =True,drop =True)
x.columns = ['y','ds']
from fbprophet import Prophet
model = Prophet()
model.fit(x)
future = model.make_future_dataframe(periods=60,freq='M')
forecast = model.predict(future)
fig1 = model.plot(forecast)
|
python
|
if __name__ == '__main__':
import BrowserState
else:
from . import BrowserState
from PyQt5 import QtCore, QtWidgets, QtWidgets
# The Sort dialog, with a grid of sorting buttons
class SortDialog(QtWidgets.QDialog):
# Constant giving the number of search keys
MaxSortKeyCount = 6
############# Signals #############
sort_request = QtCore.pyqtSignal(tuple,int) # tuple would be a field of form (type,text)
dialog_closed = QtCore.pyqtSignal()
############# Main Part #############
def __init__(self,sort_order):
super().__init__()
self._sort_order = sort_order
self._sort_order.sort_order_changed.connect(self._slot_sort_order_changed)
self._init_layout()
self._update_layout()
self.show()
def _init_layout(self):
# list of lists (row,col), where each row must have MaxSortKeyCount elements
# (which can be visible or hidden)
self._sort_buttons = []
# single list (of QLabels)
self._sort_labels = []
# initialize an empty grid layout for the sort buttons and labels
self._sort_layout = QtWidgets.QGridLayout()
self._close_button = QtWidgets.QPushButton('Close')
self._main_layout = QtWidgets.QVBoxLayout()
self._main_layout.addLayout(self._sort_layout)
self._main_layout.addWidget(self._close_button)
self.setLayout(self._main_layout)
# connect
self._close_button.clicked.connect(self.close)
def _update_layout(self):
# TODO maybe divide this in two functions, depending on whether the list of available field
# has changed?
# make enough rows to cover all fields
if len(self._sort_order.get_available_fields()) > len(self._sort_buttons):
for i in range(len(self._sort_order.get_available_fields()) - len(self._sort_buttons)):
self._add_button_row()
# make the right number of rows visible
self._make_visible(len(self._sort_order.get_available_fields()),len(self._sort_order.get_available_fields()))
# update labels and status
self._update_labels()
self._update_button_state()
# adds a single row of buttons
def _add_button_row(self):
new_row_number = len(self._sort_buttons)
new_label = QtWidgets.QLabel('Row %d' % new_row_number)
new_buttons = [ QtWidgets.QPushButton(str(i+1)) for i in range(self.MaxSortKeyCount) ]
self._sort_labels.append(new_label)
self._sort_buttons.append(new_buttons)
self._sort_layout.addWidget(new_label,new_row_number,0)
def bind_slot(row,col):
res = lambda: self._slot_sort_button_clicked(row,col)
return res
for pos,bt in enumerate(new_buttons):
bt.setCheckable(True)
self._sort_layout.addWidget(bt,new_row_number,pos+1)
# make closure
# print('Lambda bound:',new_row_number,pos)
# def slot_func(pos2=pos):
# print('slot_func: pos2 is %d (id %s)' % (pos2,id(pos2)))
# self._slot_sort_button_clicked(new_row_number,pos2)
# bt.clicked.connect(slot_func)
bt.clicked.connect(bind_slot(new_row_number,pos))
# makes the upper left number of buttons visible, hides the rest
def _make_visible(self,vis_row_count,vis_col_count):
for row in range(len(self._sort_labels)):
# self._sort_labels[row].setEnabled(row < vis_row_count)
if row < vis_row_count:
self._sort_labels[row].show()
else:
self._sort_labels[row].hide()
for col in range(self.MaxSortKeyCount):
if row < vis_row_count and col < vis_col_count:
self._sort_buttons[row][col].show()
else:
self._sort_buttons[row][col].hide()
# self._sort_buttons[row][col].setEnabled(row < vis_row_count and col < vis_col_count)
# TODO
def _update_labels(self):
for pos,field in enumerate(self._sort_order.get_available_fields()):
field_type,field_text = field
if field_type == BrowserState.Fields.FieldType.Result:
formatted_field_text = '<i>' + field_text + '</i>'
else:
formatted_field_text = field_text
self._sort_labels[pos].setText(formatted_field_text)
def _update_button_state(self):
order = self._sort_order.get_order()
# order is a list of fields
for row,field in enumerate(self._sort_order.get_available_fields()):
try:
sel_col = order.index(field)
except ValueError:
sel_col = -1
for col in range(self.MaxSortKeyCount):
self._sort_buttons[row][col].setChecked(col == sel_col)
############# Slots and events #############
def _slot_sort_order_to_be_changed(self):
pass
def _slot_sort_order_changed(self,new_order):
self._update_layout()
def _slot_sort_button_clicked(self,row,col):
field = self._sort_order.get_available_fields()[row]
print('SORT BUTTON CLICKED: %d / %d, field is %s' % (row,col,field))
self.sort_request.emit(field,col)
def closeEvent(self,e):
print('Close event!')
self.dialog_closed.emit()
super().closeEvent(e)
if __name__ == '__main__':
class TestApp(QtWidgets.QApplication):
def __init__(self):
super().__init__([])
# make sort order object
self._sort_order = BrowserState.SortOrder()
# major functions: set_available_fields, sort_request
# placeholder for sort dialog if present
self._sort_dialog = None
# make window
self._main_win = QtWidgets.QWidget()
self._field_edit = QtWidgets.QTextEdit()
self._dialog_button = QtWidgets.QPushButton('Dialog')
self._dialog_button.setCheckable(True)
self._set_fields_button = QtWidgets.QPushButton('Set Fields')
self._close_button = QtWidgets.QPushButton('Close')
self._layout = QtWidgets.QVBoxLayout()
self._layout.addWidget(self._field_edit)
self._layout.addWidget(self._dialog_button)
self._layout.addWidget(self._set_fields_button)
self._layout.addWidget(self._close_button)
self._main_win.setLayout(self._layout)
self._main_win.show()
# connections
self._close_button.clicked.connect(self.quit,QtCore.Qt.QueuedConnection)
self._dialog_button.clicked.connect(self._slot_dialog_button_clicked)
self._set_fields_button.clicked.connect(self._slot_set_fields_button_clicked)
def _slot_dialog_button_clicked(self):
if self._sort_dialog is None:
self._sort_dialog = SortDialog(self._sort_order)
self._sort_dialog.dialog_closed.connect(self._slot_dialog_closed)
self._sort_dialog.sort_request.connect(self._sort_order.sort_request)
self._sort_dialog.show()
self._dialog_button.setChecked(True)
else:
self._sort_dialog.deleteLater()
self._sort_dialog = None
self._dialog_button.setChecked(False)
def _slot_set_fields_button_clicked(self):
# if self._sort_dialog is not None:
field_texts = self._field_edit.toPlainText().split('\n')
fields = [ (1,f) for f in field_texts ]
self._sort_order.set_available_fields(fields)
def _slot_dialog_closed(self):
self._sort_dialog.deleteLater()
self._sort_dialog = None
self._dialog_button.setChecked(False)
if __name__ == '__main__':
app = TestApp()
QtCore.pyqtRemoveInputHook()
app.exec_()
|
python
|
# This script generates the scoring and schema files
# Creates the schema, and holds the init and run functions needed to
# operationalize the Iris Classification sample
# Import data collection library. Only supported for docker mode.
# Functionality will be ignored when package isn't found
try:
from azureml.datacollector import ModelDataCollector
except ImportError:
print("Data collection is currently only supported in docker mode. May be disabled for local mode.")
# Mocking out model data collector functionality
class ModelDataCollector(object):
def nop(*args, **kw): pass
def __getattr__(self, _): return self.nop
def __init__(self, *args, **kw): return None
pass
import os
# Prepare the web service definition by authoring
# init() and run() functions. Test the functions
# before deploying the web service.
def init():
global inputs_dc, prediction_dc
from sklearn.externals import joblib
# load the model from file into a global object
global model
model = joblib.load('model.pkl')
inputs_dc = ModelDataCollector("model.pkl", identifier="inputs")
prediction_dc = ModelDataCollector("model.pkl", identifier="prediction")
def run(input_df):
import json
# append 25 random features just like the training script does it.
import numpy as np
n = 25
random_state = np.random.RandomState(0)
n_samples, n_features = input_df.shape
input_df = np.c_[input_df, random_state.randn(n_samples, n)]
inputs_dc.collect(input_df)
# make prediction using the model
pred = model.predict(input_df)
prediction_dc.collect(pred)
# return all predictions
# return json.dumps(pred.tolist())
# return just the first prediction
return json.dumps(str(pred[0]))
def main():
from azureml.api.schema.dataTypes import DataTypes
from azureml.api.schema.sampleDefinition import SampleDefinition
from azureml.api.realtime.services import generate_schema
import pandas
df = pandas.DataFrame(data=[[3.0, 3.6, 1.3, 0.25]], columns=['sepal length', 'sepal width','petal length','petal width'])
# Turn on data collection debug mode to view output in stdout
os.environ["AML_MODEL_DC_DEBUG"] = 'true'
# Test the output of the functions
init()
input1 = pandas.DataFrame([[3.0, 3.6, 1.3, 0.25]])
print("Result: " + run(input1))
inputs = {"input_df": SampleDefinition(DataTypes.PANDAS, df)}
#Genereate the schema
generate_schema(run_func=run, inputs=inputs, filepath='./outputs/service_schema.json')
print("Schema generated")
if __name__ == "__main__":
main()
|
python
|
#Calculates the median, or middle value, of a list of numbers
#The median is the middle value for an odd numbered list, so median of 1,2,3,4,5
# is 3, and the average of the two center values of an even numbered list
#median of 1,2,3,4 is (2+3)/2 = 2.5
def median(numbers):
med = 0.0
numbers = sorted(numbers)
if len(numbers)%2 == 0:
index1 = int(len(numbers)/2)
index2 = index1 - 1
med = (numbers[index1]+numbers[index2])/2.0
else:
index = int((len(numbers)/2.0)-0.5)
med = numbers[index]
return med
median([3,1,4,5,6,2])
|
python
|
from .gyaodl import *
__copyright__ = '(c) 2021 xpadev-net https://xpadev.net'
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = 'xpadev-net'
__author_email__ = '[email protected]'
__url__ = 'https://github.com/xpadev-net/gyaodl'
__all__ = [GyaoDL]
|
python
|
import torch
from torch.nn import Module
from functools import partial
import warnings
from .kernel_samples import kernel_tensorized, kernel_online, kernel_multiscale
from .sinkhorn_samples import sinkhorn_tensorized
from .sinkhorn_samples import sinkhorn_online
from .sinkhorn_samples import sinkhorn_multiscale
from .kernel_samples import kernel_tensorized as hausdorff_tensorized
from .kernel_samples import kernel_online as hausdorff_online
from .kernel_samples import kernel_multiscale as hausdorff_multiscale
routines = {
"sinkhorn": {
"tensorized": sinkhorn_tensorized,
"online": sinkhorn_online,
"multiscale": sinkhorn_multiscale,
},
"hausdorff": {
"tensorized": hausdorff_tensorized,
"online": hausdorff_online,
"multiscale": hausdorff_multiscale,
},
"energy": {
"tensorized": partial(kernel_tensorized, name="energy"),
"online": partial(kernel_online, name="energy"),
"multiscale": partial(kernel_multiscale, name="energy"),
},
"gaussian": {
"tensorized": partial(kernel_tensorized, name="gaussian"),
"online": partial(kernel_online, name="gaussian"),
"multiscale": partial(kernel_multiscale, name="gaussian"),
},
"laplacian": {
"tensorized": partial(kernel_tensorized, name="laplacian"),
"online": partial(kernel_online, name="laplacian"),
"multiscale": partial(kernel_multiscale, name="laplacian"),
},
}
class SamplesLoss(Module):
"""Creates a criterion that computes distances between sampled measures on a vector space.
Warning:
If **loss** is ``"sinkhorn"`` and **reach** is **None** (balanced Optimal Transport),
the resulting routine will expect measures whose total masses are equal with each other.
Parameters:
loss (string, default = ``"sinkhorn"``): The loss function to compute.
The supported values are:
- ``"sinkhorn"``: (Un-biased) Sinkhorn divergence, which interpolates
between Wasserstein (blur=0) and kernel (blur= :math:`+\infty` ) distances.
- ``"hausdorff"``: Weighted Hausdorff distance, which interpolates
between the ICP loss (blur=0) and a kernel distance (blur= :math:`+\infty` ).
- ``"energy"``: Energy Distance MMD, computed using the kernel
:math:`k(x,y) = -\|x-y\|_2`.
- ``"gaussian"``: Gaussian MMD, computed using the kernel
:math:`k(x,y) = \exp \\big( -\|x-y\|_2^2 \,/\, 2\sigma^2)`
of standard deviation :math:`\sigma` = **blur**.
- ``"laplacian"``: Laplacian MMD, computed using the kernel
:math:`k(x,y) = \exp \\big( -\|x-y\|_2 \,/\, \sigma)`
of standard deviation :math:`\sigma` = **blur**.
p (int, default=2): If **loss** is ``"sinkhorn"`` or ``"hausdorff"``,
specifies the ground cost function between points.
The supported values are:
- **p** = 1: :math:`~~C(x,y) ~=~ \|x-y\|_2`.
- **p** = 2: :math:`~~C(x,y) ~=~ \\tfrac{1}{2}\|x-y\|_2^2`.
blur (float, default=.05): The finest level of detail that
should be handled by the loss function - in
order to prevent overfitting on the samples' locations.
- If **loss** is ``"gaussian"`` or ``"laplacian"``,
it is the standard deviation :math:`\sigma` of the convolution kernel.
- If **loss** is ``"sinkhorn"`` or ``"hausdorff"``,
it is the typical scale :math:`\sigma` associated
to the temperature :math:`\\varepsilon = \sigma^p`.
The default value of .05 is sensible for input
measures that lie in the unit square/cube.
Note that the *Energy Distance* is scale-equivariant, and won't
be affected by this parameter.
reach (float, default=None= :math:`+\infty` ): If **loss** is ``"sinkhorn"``
or ``"hausdorff"``,
specifies the typical scale :math:`\\tau` associated
to the constraint strength :math:`\\rho = \\tau^p`.
diameter (float, default=None): A rough indication of the maximum
distance between points, which is used to tune the :math:`\\varepsilon`-scaling
descent and provide a default heuristic for clustering **multiscale** schemes.
If **None**, a conservative estimate will be computed on-the-fly.
scaling (float, default=.5): If **loss** is ``"sinkhorn"``,
specifies the ratio between successive values
of :math:`\sigma=\\varepsilon^{1/p}` in the
:math:`\\varepsilon`-scaling descent.
This parameter allows you to specify the trade-off between
speed (**scaling** < .4) and accuracy (**scaling** > .9).
truncate (float, default=None= :math:`+\infty`): If **backend**
is ``"multiscale"``, specifies the effective support of
a Gaussian/Laplacian kernel as a multiple of its standard deviation.
If **truncate** is not **None**, kernel truncation
steps will assume that
:math:`\\exp(-x/\sigma)` or
:math:`\\exp(-x^2/2\sigma^2) are zero when
:math:`\|x\| \,>\, \\text{truncate}\cdot \sigma`.
cost (function or string, default=None): if **loss** is ``"sinkhorn"``
or ``"hausdorff"``, specifies the cost function that should
be used instead of :math:`\\tfrac{1}{p}\|x-y\|^p`:
- If **backend** is ``"tensorized"``, **cost** should be a
python function that takes as input a
(B,N,D) torch Tensor **x**, a (B,M,D) torch Tensor **y**
and returns a batched Cost matrix as a (B,N,M) Tensor.
- Otherwise, if **backend** is ``"online"`` or ``"multiscale"``,
**cost** should be a `KeOps formula <http://www.kernel-operations.io/api/math-operations.html>`_,
given as a string, with variables ``X`` and ``Y``.
The default values are ``"Norm2(X-Y)"`` (for **p** = 1) and
``"(SqDist(X,Y) / IntCst(2))"`` (for **p** = 2).
cluster_scale (float, default=None): If **backend** is ``"multiscale"``,
specifies the coarse scale at which cluster centroids will be computed.
If **None**, a conservative estimate will be computed from
**diameter** and the ambient space's dimension,
making sure that memory overflows won't take place.
debias (bool, default=True): If **loss** is ``"sinkhorn"``,
specifies if we should compute the **unbiased**
Sinkhorn divergence instead of the classic,
entropy-regularized "SoftAssign" loss.
potentials (bool, default=False): When this parameter is set to True,
the :mod:`SamplesLoss` layer returns a pair of optimal dual potentials
:math:`F` and :math:`G`, sampled on the input measures,
instead of differentiable scalar value.
These dual vectors :math:`(F(x_i))` and :math:`(G(y_j))`
are encoded as Torch tensors, with the same shape
as the input weights :math:`(\\alpha_i)` and :math:`(\\beta_j)`.
verbose (bool, default=False): If **backend** is ``"multiscale"``,
specifies whether information on the clustering and
:math:`\\varepsilon`-scaling descent should be displayed
in the standard output.
backend (string, default = ``"auto"``): The implementation that
will be used in the background; this choice has a major impact
on performance. The supported values are:
- ``"auto"``: Choose automatically, using a simple
heuristic based on the inputs' shapes.
- ``"tensorized"``: Relies on a full cost/kernel matrix, computed
once and for all and stored on the device memory.
This method is fast, but has a quadratic
memory footprint and does not scale beyond ~5,000 samples per measure.
- ``"online"``: Computes cost/kernel values on-the-fly, leveraging
online map-reduce CUDA routines provided by
the `pykeops <https://www.kernel-operations.io>`_ library.
- ``"multiscale"``: Fast implementation that scales to millions
of samples in dimension 1-2-3, relying on the block-sparse
reductions provided by the `pykeops <https://www.kernel-operations.io>`_ library.
"""
def __init__(
self,
loss="sinkhorn",
p=2,
blur=0.05,
reach=None,
diameter=None,
scaling=0.5,
truncate=5,
cost=None,
kernel=None,
cluster_scale=None,
debias=True,
potentials=False,
verbose=False,
backend="auto",
a_init=0,
SinkhornMaxIter=None,
):
super(SamplesLoss, self).__init__()
self.loss = loss
self.backend = backend
self.p = p
self.blur = blur
self.reach = reach
self.truncate = truncate
self.diameter = diameter
self.scaling = scaling
self.cost = cost
self.kernel = kernel
self.cluster_scale = cluster_scale
self.debias = debias
self.potentials = potentials
self.verbose = verbose
self.a_init = a_init
self.SinkhornMaxIter = SinkhornMaxIter
def forward(self, *args):
"""Computes the loss between sampled measures.
Documentation and examples: Soon!
Until then, please check the tutorials :-)"""
l_x, α, x, l_y, β, y = self.process_args(*args)
B, N, M, D, l_x, α, l_y, β = self.check_shapes(l_x, α, x, l_y, β, y)
backend = (
self.backend
) # Choose the backend -----------------------------------------
if l_x is not None or l_y is not None:
if backend in ["auto", "multiscale"]:
backend = "multiscale"
else:
raise ValueError(
'Explicit cluster labels are only supported with the "auto" and "multiscale" backends.'
)
elif backend == "auto":
if M * N <= 5000 ** 2:
backend = (
"tensorized" # Fast backend, with a quadratic memory footprint
)
else:
if (
D <= 3
and self.loss == "sinkhorn"
and M * N > 10000 ** 2
and self.p == 2
):
backend = "multiscale" # Super scalable algorithm in low dimension
else:
backend = "online" # Play it safe, without kernel truncation
# Check compatibility between the batchsize and the backend --------------------------
if backend in ["multiscale"]: # multiscale routines work on single measures
if B == 1:
α, x, β, y = α.squeeze(0), x.squeeze(0), β.squeeze(0), y.squeeze(0)
elif B > 1:
warnings.warn(
"The 'multiscale' backend do not support batchsize > 1. "
+ "Using 'tensorized' instead: beware of memory overflows!"
)
backend = "tensorized"
if B == 0 and backend in [
"tensorized",
"online",
]: # tensorized and online routines work on batched tensors
α, x, β, y = α.unsqueeze(0), x.unsqueeze(0), β.unsqueeze(0), y.unsqueeze(0)
# Run --------------------------------------------------------------------------------
values = routines[self.loss][backend](
α,
x,
β,
y,
p=self.p,
blur=self.blur,
reach=self.reach,
diameter=self.diameter,
scaling=self.scaling,
truncate=self.truncate,
cost=self.cost,
kernel=self.kernel,
cluster_scale=self.cluster_scale,
debias=self.debias,
potentials=self.potentials,
labels_x=l_x,
labels_y=l_y,
verbose=self.verbose,
a_init = self.a_init,
SinkhornMaxIter = self.SinkhornMaxIter,
)
# Make sure that the output has the correct shape ------------------------------------
if (
self.potentials
): # Return some dual potentials (= test functions) sampled on the input measures
F, G = values
return F.view_as(α), G.view_as(β)
else: # Return a scalar cost value
if backend in ["multiscale"]: # KeOps backends return a single scalar value
if B == 0:
return values # The user expects a scalar value
else:
return values.view(
-1
) # The user expects a "batch list" of distances
else: # "tensorized" backend returns a "batch vector" of values
if B == 0:
return values[0] # The user expects a scalar value
else:
return values # The user expects a "batch vector" of distances
def process_args(self, *args):
if len(args) == 6:
return args
if len(args) == 4:
α, x, β, y = args
return None, α, x, None, β, y
elif len(args) == 2:
x, y = args
α = self.generate_weights(x)
β = self.generate_weights(y)
return None, α, x, None, β, y
else:
raise ValueError(
"A SamplesLoss accepts two (x, y), four (α, x, β, y) or six (l_x, α, x, l_y, β, y) arguments."
)
def generate_weights(self, x):
if x.dim() == 2: #
N = x.shape[0]
return torch.ones(N).type_as(x) / N
elif x.dim() == 3:
B, N, _ = x.shape
return torch.ones(B, N).type_as(x) / N
else:
raise ValueError(
"Input samples 'x' and 'y' should be encoded as (N,D) or (B,N,D) (batch) tensors."
)
def check_shapes(self, l_x, α, x, l_y, β, y):
if α.dim() != β.dim():
raise ValueError(
"Input weights 'α' and 'β' should have the same number of dimensions."
)
if x.dim() != y.dim():
raise ValueError(
"Input samples 'x' and 'y' should have the same number of dimensions."
)
if x.shape[-1] != y.shape[-1]:
raise ValueError(
"Input samples 'x' and 'y' should have the same last dimension."
)
if (
x.dim() == 2
): # No batch --------------------------------------------------------------------
B = 0 # Batchsize
N, D = x.shape # Number of "i" samples, dimension of the feature space
M, _ = y.shape # Number of "j" samples, dimension of the feature space
if α.dim() not in [1, 2]:
raise ValueError(
"Without batches, input weights 'α' and 'β' should be encoded as (N,) or (N,1) tensors."
)
elif α.dim() == 2:
if α.shape[1] > 1:
raise ValueError(
"Without batches, input weights 'α' should be encoded as (N,) or (N,1) tensors."
)
if β.shape[1] > 1:
raise ValueError(
"Without batches, input weights 'β' should be encoded as (M,) or (M,1) tensors."
)
α, β = α.view(-1), β.view(-1)
if l_x is not None:
if l_x.dim() not in [1, 2]:
raise ValueError(
"Without batches, the vector of labels 'l_x' should be encoded as an (N,) or (N,1) tensor."
)
elif l_x.dim() == 2:
if l_x.shape[1] > 1:
raise ValueError(
"Without batches, the vector of labels 'l_x' should be encoded as (N,) or (N,1) tensors."
)
l_x = l_x.view(-1)
if len(l_x) != N:
raise ValueError(
"The vector of labels 'l_x' should have the same length as the point cloud 'x'."
)
if l_y is not None:
if l_y.dim() not in [1, 2]:
raise ValueError(
"Without batches, the vector of labels 'l_y' should be encoded as an (M,) or (M,1) tensor."
)
elif l_y.dim() == 2:
if l_y.shape[1] > 1:
raise ValueError(
"Without batches, the vector of labels 'l_y' should be encoded as (M,) or (M,1) tensors."
)
l_y = l_y.view(-1)
if len(l_y) != M:
raise ValueError(
"The vector of labels 'l_y' should have the same length as the point cloud 'y'."
)
N2, M2 = α.shape[0], β.shape[0]
elif (
x.dim() == 3
): # batch computation ---------------------------------------------------------
(
B,
N,
D,
) = x.shape
# Batchsize, number of "i" samples, dimension of the feature space
(
B2,
M,
_,
) = y.shape
# Batchsize, number of "j" samples, dimension of the feature space
if B != B2:
raise ValueError("Samples 'x' and 'y' should have the same batchsize.")
if α.dim() not in [2, 3]:
raise ValueError(
"With batches, input weights 'α' and 'β' should be encoded as (B,N) or (B,N,1) tensors."
)
elif α.dim() == 3:
if α.shape[2] > 1:
raise ValueError(
"With batches, input weights 'α' should be encoded as (B,N) or (B,N,1) tensors."
)
if β.shape[2] > 1:
raise ValueError(
"With batches, input weights 'β' should be encoded as (B,M) or (B,M,1) tensors."
)
α, β = α.squeeze(-1), β.squeeze(-1)
if l_x is not None:
raise NotImplementedError(
'The "multiscale" backend has not been implemented with batches.'
)
if l_y is not None:
raise NotImplementedError(
'The "multiscale" backend has not been implemented with batches.'
)
B2, N2 = α.shape
B3, M2 = β.shape
if B != B2:
raise ValueError(
"Samples 'x' and weights 'α' should have the same batchsize."
)
if B != B3:
raise ValueError(
"Samples 'y' and weights 'β' should have the same batchsize."
)
else:
raise ValueError(
"Input samples 'x' and 'y' should be encoded as (N,D) or (B,N,D) (batch) tensors."
)
if N != N2:
raise ValueError(
"Weights 'α' and samples 'x' should have compatible shapes."
)
if M != M2:
raise ValueError(
"Weights 'β' and samples 'y' should have compatible shapes."
)
return B, N, M, D, l_x, α, l_y, β
|
python
|
import pytest
from cookietemple.list.list import TemplateLister
"""
This test class is for testing the list subcommand:
Syntax: cookietemple list
"""
def test_non_empty_output(capfd):
"""
Verifies that the list command does indeed have content
:param capfd: pytest fixture -> capfd: Capture, as text, output to file descriptors 1 and 2.
"""
# Capture stdout
lister = TemplateLister()
lister.list_available_templates()
out, err = capfd.readouterr()
assert not err
assert out
@pytest.mark.skip(reason="Again here, check how to check rich output of a Table")
def test_header(capfd):
"""
Verifies that the list command does have the following header
Name Handle Version (Short Description and Available Libs are rendered different)
:param capfd: pytest fixture -> capfd: Capture, as text, output to file descriptors 1 and 2.
"""
# Capture stdout
lister = TemplateLister()
lister.list_available_templates()
out, err = capfd.readouterr()
# We skip the COOKIETEMPLE autogenerated lines (0-2)
header = set(out.split("\n")[4].split())
assert "Name" in header and "Handle" in header
|
python
|
import scrapy
class ImdbImageSpiderSpider(scrapy.Spider):
name = 'imdb_image_spider'
allowed_domains = ['https://www.imdb.com/']
file_handle = open("url.txt", "r")
temp = file_handle.readline()
file_handle.close()
temp += "/mediaindex?ref_=tt_ov_mi_sm"
tmp = list()
tmp.append(temp)
start_urls = tmp
def parse(self, response):
image_src = response.xpath("//*[@class='MediaViewerImagestyles__PortraitContainer-sc-1qk433p-2 iUyzNI']").extract_first()
yield {"image_src" : image_src
}
|
python
|
'''
伽马变化
'''
import numpy as np
import cv2
from matplotlib import pyplot as plt
# 定义伽马变化函数
def gamma_trans(img, gamma):
# 先归一化到1,做伽马计算,再还原到[0,255]
gamma_list = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)]
# 将列表转换为np.array,并指定数据类型为uint8
gamma_table = np.round(np.array(gamma_list)).astype(np.uint8)
# 使用OpenCV的look up table函数修改图像的灰度值
return cv2.LUT(img, gamma_table)
im = cv2.imread('me.jpg', 0)
cv2.imshow('org', im)
# 使用伽马值为0.5的变化,实现对暗部的拉升,亮部的压缩
im_gama05 = gamma_trans(im, 0.5)
cv2.imshow('gama0.5', im_gama05)
# 使用伽马值为2的变化,实现对亮部的拉升,暗部的压缩
im_gama2 = gamma_trans(im, 2)
cv2.imshow('gama2', im_gama2)
cv2.waitKey()
cv2.destroyAllWindows()
|
python
|
def findSubstring( S, L):
""" The idea is very simple, use brute-force with hashing.
First we compute the hash for each word given in L and add them up.
Then we traverse from S[0] to S[len(S) - total_length], for each index, i
f the first substring in L, then we compute the total hash for that partition,
if hashes match, then we have a match.
"""
if S is None or L is None or len(L) == 0:
return []
# the length of each word
len_of_word = len(L[0])
# the length of entire substring
total_length = len_of_word * len(L)
# use a set to reduce lookup time
word_set = set(L)
# total hash for the given list of words
target_hash = 0
for item in L:
target_hash += hash(item)
ret = []
for start in xrange(len(S) - total_length + 1):
if S[start:start+len_of_word] not in word_set:
continue
end = start + total_length - 1
test_hash = 0
for walker in xrange(start, end + 1, len_of_word):
substring = S[walker:walker + len_of_word]
# early termination if any of the substring not in set
if substring not in word_set:
break
test_hash += hash(substring)
if test_hash == target_hash:
ret.append(start)
return ret
print findSubstring('shsamsampamasdasdpamsam',['sam','pam'])
|
python
|
from dagger.input.from_node_output import FromNodeOutput
from dagger.input.protocol import Input
from dagger.serializer import DefaultSerializer
from tests.input.custom_serializer import CustomSerializer
def test__conforms_to_protocol():
assert isinstance(FromNodeOutput("node", "output"), Input)
def test__exposes_node_and_output_name():
node_name = "another-node"
output_name = "another-nodes-output"
input_ = FromNodeOutput(node=node_name, output=output_name)
assert input_.node == node_name
assert input_.output == output_name
def test__with_default_serializer():
input_ = FromNodeOutput("node", "output")
assert input_.serializer == DefaultSerializer
def test__with_custom_serializer():
serializer = CustomSerializer()
input_ = FromNodeOutput("node", "output", serializer=serializer)
assert input_.serializer == serializer
def test__representation():
serializer = CustomSerializer()
input_ = FromNodeOutput("my-node", "my-output", serializer=serializer)
assert (
repr(input_)
== f"FromNodeOutput(node=my-node, output=my-output, serializer={repr(serializer)})"
)
|
python
|
"""This is a Python demo for the `Sphinx tutorial <http://quick-sphinx-tutorial.readthedocs.org/en/latest/>`_.
This demo has an implementation of a Python script called ``giza`` which
calculates the square of a given number.
"""
import argparse
def calc_square(number, verbosity):
"""
Calculate the square of a given number.
:param number: An integer number.
:param verbosity: An integer value for output verbosity.
:return: The square of number.
"""
answer = number**2
if verbosity >= 2:
print "the square of {} equals {}".format(number, answer)
elif verbosity >= 1:
print "{}^2 == {}".format(number, answer)
else:
print answer
return answer
def main():
"""
A small wrapper that is used for running as a CLI Script.
Examples:
::
$ giza 2
> 4
$ giza -v 3
> 3^2 == 9
$ giza -vv 4
> the square of 4 equals 16
"""
parser = argparse.ArgumentParser()
parser.add_argument("number", type=int,
help="display a square of a given number")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="increase output verbosity")
args = parser.parse_args()
calc_square(args.number, args.verbosity)
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
LANGUAGE = "english"
SENTENCES_COUNT = 10
if __name__ == "__main__":
url = "https://en.wikipedia.org/wiki/Automatic_summarization"
html='''
<p>Well, I do not know what type of features you are giving to your neural network. However, in general, I would go with a single neural network. It seems that you have no limitation in resources for training your network and the only problem is resources while you apply your network. </p>
<p>The thing is that probably the two problems have things in common (e.g. both types of plates are rectangular). This means that if you use two networks, each has to solve the same sub-problem (the common part) again. If you use only one network the common part of the problem takes fewer cells/weights to be solved and the remaining weights/cells can be employed for better recognition.</p>
<p>In the end, if I was in your place I would try both of them. I think that is the only way to be really sure what is the best solution. When speaking theoretically it is possible that we do not include some factors.</p>
'''
#parser = HtmlParser.from_url(url, Tokenizer(LANGUAGE))
#parser=HtmlParser.from_string(html, tokenizer=Tokenizer(LANGUAGE), url=None )
# or for plain text files
from post_rec.Utility.TextPreprocessing import InformationAbstrator
text_extractor=InformationAbstrator(100)
text_extractor.initParagraphFilter(text_extractor.lexrankSummary)
plain_text=" ".join( text_extractor.clipText(html) )
parser = PlaintextParser.from_string(plain_text, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
for sentence in summarizer(parser.document, SENTENCES_COUNT):
print(sentence)
|
python
|
from django.db import models
from searchEngine.models import InternetResult
from search.models import SearchParameters
class TwitterUser(models.Model):
id = models.CharField(max_length=128, primary_key=True)
username = models.CharField(max_length=60)
link = models.URLField()
avatar = models.TextField(default="")
@property
def get_node_id(self):
return 'twitterUser' + str(self.id)
class Hashtag(models.Model):
id = models.CharField(max_length=128, primary_key=True)
link = models.URLField(null=True)
@property
def get_node_id(self):
return 'hashtag' + str(self.id)
class Tweet(models.Model):
id = models.CharField(max_length=128, primary_key=True)
content = models.TextField()
date = models.DateField()
time = models.TimeField()
username = models.CharField(max_length=60)
userlink = models.URLField()
link = models.URLField()
likes = models.IntegerField()
replies = models.IntegerField()
retweets = models.IntegerField()
hashtags = models.ManyToManyField(Hashtag)
user = models.ForeignKey(TwitterUser, on_delete=models.CASCADE, null=True)
searches = models.ManyToManyField(SearchParameters, related_name='tweets')
internet_articles = models.ManyToManyField(InternetResult, related_name='tweets')
@property
def get_node_id(self):
return 'tweet' + str(self.id)
|
python
|
'''
hist_it.py - GPUE: Split Operator based GPU solver for Nonlinear
Schrodinger Equation, Copyright (C) 2011-2015, Lee J. O'Riordan
<[email protected]>, Tadhg Morgan, Neil Crowley. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
|
python
|
from unittest import TestCase
from ..mock_ldap_helpers import DEFAULT_DC
from ..mock_ldap_helpers import group
from ..mock_ldap_helpers import group_dn
from ..mock_ldap_helpers import mock_ldap_directory
from ..mock_ldap_helpers import person
from ..mock_ldap_helpers import person_dn
class TestPerson(TestCase):
def test_attrs(self):
name = "bigfoot"
person_attrs = person(name)
self.assertEqual(person_attrs[0],
"cn=%s,ou=people,%s" % (name, DEFAULT_DC))
self.assertEqual(person_attrs[1]['uid'], [name])
self.assertEqual(person_attrs[1]['cn'], [name])
self.assertEqual(person_attrs[1]['sn'], [name])
self.assertEqual(person_attrs[1]['mail'], ["%s@test" % name])
def test_password(self):
name = "yeti"
person_attrs = person(name)
self.assertEqual(person_attrs[1]['userPassword'], ['password'])
person_attrs = person(name, password='snowstorm')
self.assertEqual(person_attrs[1]['userPassword'], ['snowstorm'])
def test_email(self):
name = "sasquatch"
dc = "dc=test,dc=example,dc=com"
person_attrs = person(name, dc=dc)
self.assertEqual(person_attrs[0],
"cn=%s,ou=people,%s" % (name, dc))
self.assertEqual(person_attrs[1]['mail'],
["%[email protected]" % name])
class TestGroup(TestCase):
def test_attrs(self):
member = person('bigfoot')
name = "north-american-great-apes"
group_attrs = group(name, member)
self.assertEqual(group_attrs[1]['cn'], [name])
class TestMockLdapDirectory(TestCase):
def test_child_group_no_user(self):
"""Child groups shouldn't require a user for mockldap setup."""
directory = mock_ldap_directory(
extra_users=[
('somebody', 'the_parent'),
],
group_lineage=[
('the_parent', 'child_1'),
('the_parent', 'child_2'),
('child_1', 'child_1_1'),
('child_2', 'child_2_1'),
('child_2', 'child_2_2'),
],
ldap_dc="",
)
self.assertEqual(len(directory), 12)
self.assertIn(group_dn('child_1', ""), directory)
self.assertEqual(directory[group_dn('child_1', '')]['memberOf'],
[group_dn('child_1_1', '')])
self.assertEqual(directory[group_dn('child_1', "")]['member'],
[group_dn('the_parent', '')])
self.assertIn(group_dn('child_2', ""), directory)
self.assertEqual(
directory[group_dn('child_2', '')]['memberOf'],
[group_dn('child_2_1', ''), group_dn('child_2_2', '')])
self.assertEqual(directory[group_dn('child_2', "")]['member'],
[group_dn('the_parent', '')])
self.assertIn(group_dn('child_1_1', ""), directory)
self.assertEqual(directory[group_dn('child_1_1', "")]['memberOf'], [])
self.assertEqual(directory[group_dn('child_1_1', "")]['member'],
[group_dn('child_1', '')])
self.assertIn(group_dn('child_2_1', ""), directory)
self.assertEqual(directory[group_dn('child_2_1', "")]['memberOf'], [])
self.assertEqual(directory[group_dn('child_2_1', "")]['member'],
[group_dn('child_2', '')])
self.assertIn(group_dn('child_2_2', ""), directory)
self.assertEqual(directory[group_dn('child_2_2', "")]['memberOf'], [])
self.assertEqual(directory[group_dn('child_2_2', "")]['member'],
[group_dn('child_2', '')])
def test_no_args(self):
directory = mock_ldap_directory()
self.assertEqual(len(directory), 5)
self.assertSetEqual(
set(directory.keys()),
{
'ou=Access,dc=example,dc=com',
'dc=example,dc=com',
'ou=Service-Accounts,dc=example,dc=com',
'cn=auth,ou=people,dc=example,dc=com',
'ou=People,dc=example,dc=com'
})
def test_bind_user(self):
directory = mock_ldap_directory(ldap_dc="")
dn = person_dn('auth', "")
self.assertIn(dn, directory)
self.assertEqual(directory[dn]['userPassword'], ['password'])
def test_bind_password(self):
directory = mock_ldap_directory(
bind_user='ned', default_password='mancy', ldap_dc="")
dn = person_dn('ned', "")
self.assertEqual(directory[dn]['userPassword'], ['password'])
directory = mock_ldap_directory(
bind_user='ned', bind_password='flanders',
default_password='mancy', ldap_dc="")
self.assertEqual(directory[dn]['userPassword'], ['flanders'])
def test_default_password(self):
dn = person_dn('somebody', "")
directory = mock_ldap_directory(
ldap_dc="",
extra_users=[('somebody', 'agroup')])
self.assertEqual(directory[dn]['userPassword'], ['password'])
directory = mock_ldap_directory(
ldap_dc="",
extra_users=[('somebody', 'agroup')],
default_password='swordfish')
self.assertEqual(directory[dn]['userPassword'], ['swordfish'])
|
python
|
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import tkinter as tk
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.axes import Axes
from matplotlib.ticker import PercentFormatter
from src.action_space.action import Action
from src.analyze.graph.graph_analyzer import GraphAnalyzer
from src.analyze.core.controls import EpisodeCheckButtonControl, MoreFiltersControl, ActionGroupControl
from src.analyze.core.episode_selector import EpisodeSelector
from src.episode.episode import Episode
class AnalyzeEpisodeActionDistribution(GraphAnalyzer):
def __init__(self, guru_parent_redraw, matplotlib_canvas: FigureCanvasTkAgg,
control_frame: tk.Frame, episode_selector: EpisodeSelector):
super().__init__(guru_parent_redraw, matplotlib_canvas, control_frame)
self._episodes_control = EpisodeCheckButtonControl(guru_parent_redraw, control_frame)
self._more_filters_control = MoreFiltersControl(guru_parent_redraw, control_frame, True)
self._group_control = ActionGroupControl(guru_parent_redraw, control_frame)
self._episode_selector = episode_selector
def build_control_frame(self, control_frame: tk.Frame):
self._episodes_control.add_to_control_frame()
self._more_filters_control.add_to_control_frame()
self._group_control.add_to_control_frame()
self._episode_selector.add_to_control_frame(control_frame, self.guru_parent_redraw)
def add_plots(self):
if not self.all_episodes:
return
episode = self._episode_selector.get_selected_episode()
if not episode:
return
action_mapping = self._get_action_mapping()
action_names = [name for name in action_mapping.keys()]
this_episode_data = self._map_frequencies(np.array(episode.action_frequency), action_mapping)
show_filtered = self._episodes_control.show_filtered()
show_all = self._episodes_control.show_all()
x_ticks = np.arange(len(action_names))
grid_spec = self.graph_figure.add_gridspec(1, 1, left=0.08, right=0.98, bottom=0.11, top=0.92)
axes: Axes = self.graph_figure.add_subplot(grid_spec[0])
axes.bar(x_ticks, this_episode_data, 0.9, label='This Episode')
if show_filtered and show_all:
filtered_episodes_data = self._get_mapped_data_for_episodes(self.filtered_episodes, action_mapping)
all_episodes_data = self._get_mapped_data_for_episodes(self.all_episodes, action_mapping)
axes.bar(x_ticks - 0.1, filtered_episodes_data, 0.2, label='Filtered')
axes.bar(x_ticks + 0.1, all_episodes_data, 0.2, label='All')
elif show_filtered:
filtered_episodes_data = self._get_mapped_data_for_episodes(self.filtered_episodes, action_mapping)
axes.bar(x_ticks - 0, filtered_episodes_data, 0.3, label='Filtered')
elif show_all:
all_episodes_data = self._get_mapped_data_for_episodes(self.all_episodes, action_mapping)
axes.bar(x_ticks - 0, all_episodes_data, 0.3, label='All')
axes.set_xticks(x_ticks)
axes.set_xticklabels(action_names)
axes.yaxis.set_major_formatter(PercentFormatter())
axes.set_title("Action Distribution for Episode #" + str(episode.id))
if len(action_names) >= 5:
axes.set_ybound(0, 50)
else:
axes.set_ybound(0, 100)
if axes.has_data():
axes.legend(frameon=True, framealpha=0.8, shadow=True)
def _get_mapped_data_for_episodes(self, episodes, action_mapping: dict):
data = np.array(episodes[0].action_frequency)
episode: Episode
for episode in episodes[1:]:
data = np.add(data, episode.action_frequency)
return self._map_frequencies(data, action_mapping)
def _map_frequencies(self, frequencies: np.ndarray, action_mapping: dict):
mapped_frequencies = []
for mapping in action_mapping.values():
freq = 0
for i in mapping:
freq += frequencies[i]
mapped_frequencies.append(freq)
return self._get_counts_as_percentage(np.array(mapped_frequencies))
@staticmethod
def _get_counts_as_percentage(counts: np.ndarray):
total_count = sum(counts)
return counts * 100 / total_count
def _get_action_mapping(self):
show_all_actions = not self._more_filters_control.filter_actions()
group_by_steering = self._group_control.group_by_steering()
group_by_speed = self._group_control.group_by_speed()
assert not (group_by_steering and group_by_speed)
mapping = {}
action: Action
for action in self.action_space.get_all_actions():
if show_all_actions or self.action_space_filter.should_show_action(action.get_index()):
if group_by_speed:
action_name = action.get_speed_group_name()
elif group_by_steering:
action_name = action.get_steering_group_name()
else:
action_name = action.get_readable_for_x_axis()
if action_name in mapping:
mapping[action_name].append(action.get_index())
else:
mapping[action_name] = [action.get_index()]
return mapping
|
python
|
#
# Copyright (c) 2013, Digium, Inc.
# Copyright (c) 2018, AVOXI, Inc.
#
"""ARI client library
"""
import aripy3.client
import swaggerpy3.http_client
import urllib.parse
from .client import ARIClient
async def connect(base_url, username, password):
"""Helper method for easily connecting to ARI.
:param base_url: Base URL for Asterisk HTTP server (http://localhost:8088/)
:param username: ARI username
:param password: ARI password.
:return:
"""
split = urllib.parse.urlsplit(base_url)
http_client = swaggerpy3.http_client.AsyncHttpClient()
http_client.set_basic_auth(split.hostname, username, password)
ari = ARIClient()
await ari.connect(base_url, http_client)
return ari
|
python
|
import argparse
import os.path
import shutil
import fileinput
import re
import subprocess
import sys
import time
import os
import glob
import codecs
from pathlib import Path
from typing import Dict, List
from pyrsistent import pmap, freeze
from pyrsistent.typing import PMap, PVector
from typing import Mapping, Sequence
binDl = []
binDbl= []
def genMEM(args,cp,dp):
if(args.bin[0] == '/' or args.bin[1] == ':'):
fb=Path(args.bin)
else:
fb=Path(cp,args.bin)
#read from application bin
try:
with open(fb, "rb") as f:
while True:
binD = f.read(1)
if not binD:
break
binDl.append(binD)
except IOError:
print('Application binary file '+str(fb)+' was not found!')
print('Aborting simulation...')
quit()
for i in binDl:
binDv = ord(i)
binDb = '{0:02x}'.format(binDv)
binDbl.append(binDb)
ft=len(binDbl) % 4
if(ft != 0):
for x in range(4-ft):
binDbl.append('00')
words=''
wordsl=[]
r=0
while (r < len(binDbl)):
for n in range(r, r+4):
words = binDbl[n]+words
wordsl.append(words)
words=''
r=r+4
#calculate address
adr=int(4096/4)
adrx=hex(adr).split('x')[-1]
wordsl.insert(0,'@'+str(adrx))
dt=Path(dp,'MEM_x16.TXT')
fm=open(dt,'w')
for l in range(0,len(wordsl)):
fm.write(wordsl[l]+'\n')
fm.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-b',
'--bin',
default=None,
help='Location to Application bin(Relative Path)')
args = parser.parse_args()
#check efinity environment
try:
os.environ['EFINITY_HOME']
except:
print('neither EFINITY_HOME nor EFXIPM_HOME is set. Stop.')
quit()
#check bin file is valid for simulation
if(args.bin != None and args.bin[-4:] != ".bin"):
print('Application file must be in .bin format!')
print('Eg. apb3Demo.bin')
print('Aborting simulation...')
quit()
#check directory
cp=os.getcwd()
dest_sim=Path(cp, 'SimSOC')
tb_sim=Path(dest_sim, 'tb_soc.v')
#move file to new folder
if os.path.exists(dest_sim):
shutil.rmtree(dest_sim)
os.mkdir(dest_sim)
for d in glob.glob(r'*.bin'):
shutil.copy(d, dest_sim)
for d in glob.glob(r'*.v*'):
shutil.copy(d, dest_sim)
for d in glob.glob(r'*.TXT'):
shutil.copy(d, dest_sim)
for d in glob.glob(r'*.do'):
shutil.copy(d, dest_sim)
for d in glob.glob(r'*.vh'):
shutil.copy(d, dest_sim)
#additional rtl files
path_source=os.path.join("..","source","**","*.v*")
for filename in glob.iglob(path_source, recursive=True):
shutil.copy(filename, dest_sim)
path_model=os.path.join("model","*.v")
for filename in glob.iglob(path_model , recursive=True):
shutil.copy(filename, dest_sim)
path_model_third_party=os.path.join("model_third_party","*.v*")
for filename in glob.iglob(path_model_third_party , recursive=True):
shutil.copy(filename, dest_sim)
path_base=os.path.join("..","*.v*")
for filename in glob.iglob(path_base , recursive=True):
shutil.copy(filename, dest_sim)
path_cam_dma_fifo=os.path.join("..","ip","cam_dma_fifo","Testbench","cam_dma_fifo.v")
shutil.copy(path_cam_dma_fifo, dest_sim)
path_cam_pixel_remap_fifo=os.path.join("..","ip","cam_pixel_remap_fifo","Testbench","cam_pixel_remap_fifo.v")
shutil.copy(path_cam_pixel_remap_fifo, dest_sim)
path_display_dma_fifo=os.path.join("..","ip","display_dma_fifo","Testbench","display_dma_fifo.v")
shutil.copy(path_display_dma_fifo, dest_sim)
path_hw_accel_dma_in_fifo=os.path.join("..","ip","hw_accel_dma_in_fifo","Testbench","hw_accel_dma_in_fifo.v")
shutil.copy(path_hw_accel_dma_in_fifo, dest_sim)
path_hw_accel_dma_out_fifo=os.path.join("..","ip","hw_accel_dma_out_fifo","Testbench","hw_accel_dma_out_fifo.v")
shutil.copy(path_hw_accel_dma_out_fifo, dest_sim)
path_dma=os.path.join("..","ip","dma","Testbench","dma.v")
shutil.copy(path_dma, dest_sim)
path_SapphireSoc=os.path.join("..","ip","SapphireSoc","Testbench","SapphireSoc.v")
shutil.copy(path_SapphireSoc, dest_sim)
path_csi2_rx_cam=os.path.join("..","ip","csi2_rx_cam","Testbench","csi2_rx_cam_define.vh")
shutil.copy(path_csi2_rx_cam, dest_sim)
path_csi2_rx_cam_msim=os.path.join("..","ip","csi2_rx_cam","Testbench","modelsim","csi2_rx_cam.sv")
shutil.copy(path_csi2_rx_cam_msim, dest_sim)
path_hbram_msim=os.path.join("..","ip","hbram","Testbench","modelsim","hbram.sv")
shutil.copy(path_hbram_msim, dest_sim)
path_hbram=os.path.join("..","ip","hbram","hbram_define.vh")
shutil.copy(path_hbram, dest_sim)
path_efx_dsi_tx_msim=os.path.join("..","ip","dsi_tx_display","Testbench","modelsim","dsi_tx_display.sv")
shutil.copy(path_efx_dsi_tx_msim, dest_sim)
path_efx_dsi_tx=os.path.join("..","ip","dsi_tx_display","Testbench","dsi_tx_display_define.vh")
shutil.copy(path_efx_dsi_tx, dest_sim)
test=os.listdir(dest_sim)
cur=os.getcwd()
for item in test:
if item.endswith("_tmpl.v"):
item=os.path.join(cur,"SimSOC",item)
os.remove(item)
if item.endswith("_softTap.v"):
item=os.path.join(cur,"SimSOC",item)
os.remove(item)
if item.endswith("hbram_top.encrypted.v"):
item=os.path.join(cur,"SimSOC",item)
os.remove(item)
#check if any bin file parse in
if(args.bin == None):
pass
else:
genMEM(args,cp,dest_sim)
#skip original sequences if custom bin found
if(args.bin == None):
pass
else:
tmpf = Path(dest_sim, 'tb_soc.v'+'.tmp')
with codecs.open(tb_sim, 'r', encoding='utf-8') as fi, \
codecs.open(tmpf, 'w', encoding='utf-8') as fo:
for line in fi:
#new_line = do_processing(line) # do your line processing here
new_line = line.replace('//`define SKIP_TEST', '`define SKIP_TEST')
fo.write(new_line)
os.remove(tb_sim) # remove original
os.rename(tmpf, tb_sim) # rename temp to original name
#run simulation
os.chdir(dest_sim)
if(os.name == 'nt'):
os.system('vsim.exe -do sim.do')
else:
os.system('vsim -do sim.do')
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
import os
import setuptools
PACKAGE = 'dockerblade'
PATH = os.path.join(os.path.dirname(__file__),
'src/{}/version.py'.format(PACKAGE))
with open(PATH, 'r') as fh:
exec(fh.read())
setuptools.setup(version=__version__)
|
python
|