content
stringlengths
0
894k
type
stringclasses
2 values
corruptionValues = { "Glimpse_of_Clarity_1": 15, "Crit_DMG_1": 10, "Crit_DMG_2": 15, "Crit_DMG_3": 20, "Flash_of_Insight_1": 20, "Lash_of_the_Void_1": 25, "Percent_Crit_1": 10, "Percent_Crit_2": 15, "Percent_Crit_3": 20, "Percent_Haste_1": 10, "Percent_Haste_2": 15, "Percent_Haste_3": 20, "Percent_Vers_1": 10, "Percent_Vers_2": 15, "Percent_Vers_3": 20, "Percent_Mast_1": 10, "Percent_Mast_2": 15, "Percent_Mast_3": 20, "Crit_Proc_1": 15, "Crit_Proc_2": 20, "Crit_Proc_3": 35, "Haste_Proc_1": 15, "Haste_Proc_2": 20, "Haste_Proc_3": 35, "Versatility_Proc_1": 15, "Versatility_Proc_2": 20, "Versatility_Proc_3": 35, "Mastery_Proc_1": 15, "Mastery_Proc_2": 20, "Mastery_Proc_3": 35, "Echoing_Void_1": 25, "Echoing_Void_2": 35, "Echoing_Void_3": 60, "Infinite_Star_1": 20, "Infinite_Star_2": 50, "Infinite_Star_3": 75, "Ineffable_Truth_1": 12, "Ineffable_Truth_2": 30, "Twilight_Devastation_1": 25, "Twilight_Devastation_2": 50, "Twilight_Devastation_3": 75, "Twisted_Appendage_1": 15, "Twisted_Appendage_2": 35, "Twisted_Appendage_3": 66, "Void_Ritual_1": 15, "Void_Ritual_2": 35, "Void_Ritual_3": 66, "Gushing_Wound_1": 15 }
python
from enum import Enum, auto from fastapi import Request from fastapi.responses import JSONResponse class ErrCode(Enum): NO_ERROR = 0 EMAIL_DUPLICATED = auto() NO_ITEM = auto() ErrDict = { ErrCode.NO_ERROR: "정상", ErrCode.EMAIL_DUPLICATED: "동일한 이메일이 존재합니다.", ErrCode.NO_ITEM: "해당 항목이 존재하지 않습니다. ", } class ResError(Exception): status_code = 0 err_code = ErrCode.NO_ERROR def __init__(self, status_code: int, err_code: ErrCode): self.status_code = status_code self.err_code = err_code def init_app(app): @app.exception_handler(ResError) async def exception_handler(request: Request, err: ResError): content = { "err_code": err.err_code.name, "detail": ErrDict[err.err_code], } return JSONResponse( status_code=err.status_code, content=content, )
python
import numpy as np from sklearn.model_selection import TimeSeriesSplit from sklearn.utils import indexable from sklearn.utils.validation import _num_samples import backtrader as bt import backtrader.indicators as btind import datetime as dt import pandas as pd import pandas_datareader as web from pandas import Series, DataFrame import random from copy import deepcopy class TimeSeriesSplitImproved(TimeSeriesSplit): """Time Series cross-validator Provides train/test indices to split time series data samples that are observed at fixed time intervals, in train/test sets. In each split, test indices must be higher than before, and thus shuffling in cross validator is inappropriate. This cross-validation object is a variation of :class:`KFold`. In the kth split, it returns first k folds as train set and the (k+1)th fold as test set. Note that unlike standard cross-validation methods, successive training sets are supersets of those that come before them. Read more in the :ref:`User Guide `. Parameters ---------- n_splits : int, default=3 Number of splits. Must be at least 1. Examples -------- >>> from sklearn.model_selection import TimeSeriesSplit >>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]]) >>> y = np.array([1, 2, 3, 4]) >>> tscv = TimeSeriesSplit(n_splits=3) >>> print(tscv) # doctest: +NORMALIZE_WHITESPACE TimeSeriesSplit(n_splits=3) >>> for train_index, test_index in tscv.split(X): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0] TEST: [1] TRAIN: [0 1] TEST: [2] TRAIN: [0 1 2] TEST: [3] >>> for train_index, test_index in tscv.split(X, fixed_length=True): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0] TEST: [1] TRAIN: [1] TEST: [2] TRAIN: [2] TEST: [3] >>> for train_index, test_index in tscv.split(X, fixed_length=True, ... train_splits=2): ... print("TRAIN:", train_index, "TEST:", test_index) ... X_train, X_test = X[train_index], X[test_index] ... y_train, y_test = y[train_index], y[test_index] TRAIN: [0 1] TEST: [2] TRAIN: [1 2] TEST: [3] Notes ----- When ``fixed_length`` is ``False``, the training set has size ``i * train_splits * n_samples // (n_splits + 1) + n_samples % (n_splits + 1)`` in the ``i``th split, with a test set of size ``n_samples//(n_splits + 1) * test_splits``, where ``n_samples`` is the number of samples. If fixed_length is True, replace ``i`` in the above formulation with 1, and ignore ``n_samples % (n_splits + 1)`` except for the first training set. The number of test sets is ``n_splits + 2 - train_splits - test_splits``. """ def split(self, X, y=None, groups=None, fixed_length=False, train_splits=1, test_splits=1): """Generate indices to split data into training and test set. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Always ignored, exists for compatibility. groups : array-like, with shape (n_samples,), optional Always ignored, exists for compatibility. fixed_length : bool, hether training sets should always have common length train_splits : positive int, for the minimum number of splits to include in training sets test_splits : positive int, for the number of splits to include in the test set Returns ------- train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. """ X, y, groups = indexable(X, y, groups) n_samples = _num_samples(X) n_splits = self.n_splits n_folds = n_splits + 1 train_splits, test_splits = int(train_splits), int(test_splits) if n_folds > n_samples: raise ValueError( ("Cannot have number of folds ={0} greater" " than the number of samples: {1}.").format(n_folds, n_samples)) if (n_folds - train_splits - test_splits) < 0 and (test_splits > 0): raise ValueError( ("Both train_splits and test_splits must be positive" " integers.")) indices = np.arange(n_samples) split_size = (n_samples // n_folds) test_size = split_size * test_splits train_size = split_size * train_splits test_starts = range(train_size + n_samples % n_folds, n_samples - (test_size - split_size), split_size) if fixed_length: for i, test_start in zip(range(len(test_starts)), test_starts): rem = 0 if i == 0: rem = n_samples % n_folds yield (indices[(test_start - train_size - rem):test_start], indices[test_start:test_start + test_size]) else: for test_start in test_starts: yield (indices[:test_start], indices[test_start:test_start + test_size]) class SMAC(bt.Strategy): """A simple moving average crossover strategy; crossing of a fast and slow moving average generates buy/sell signals""" params = {"fast": 20, "slow": 50, # The windows for both fast and slow moving averages "optim": False, "optim_fs": (20, 50)} # Used for optimization; equivalent of fast and slow, but a tuple # The first number in the tuple is the fast MA's window, the # second the slow MA's window def __init__(self): """Initialize the strategy""" self.fastma = dict() self.slowma = dict() self.regime = dict() if self.params.optim: # Use a tuple during optimization self.params.fast, self.params.slow = self.params.optim_fs # fast and slow replaced by tuple's contents if self.params.fast > self.params.slow: raise ValueError( "A SMAC strategy cannot have the fast moving average's window be " + \ "greater than the slow moving average window.") for d in self.getdatanames(): # The moving averages self.fastma[d] = btind.SimpleMovingAverage(self.getdatabyname(d), # The symbol for the moving average period=self.params.fast, # Fast moving average plotname="FastMA: " + d) self.slowma[d] = btind.SimpleMovingAverage(self.getdatabyname(d), # The symbol for the moving average period=self.params.slow, # Slow moving average plotname="SlowMA: " + d) # Get the regime self.regime[d] = self.fastma[d] - self.slowma[d] # Positive when bullish def next(self): """Define what will be done in a single step, including creating and closing trades""" for d in self.getdatanames(): # Looping through all symbols pos = self.getpositionbyname(d).size or 0 if pos == 0: # Are we out of the market? # Consider the possibility of entrance # Notice the indexing; [0] always mens the present bar, and [-1] the bar immediately preceding # Thus, the condition below translates to: "If today the regime is bullish (greater than # 0) and yesterday the regime was not bullish" if self.regime[d][0] > 0 and self.regime[d][-1] <= 0: # A buy signal self.buy(data=self.getdatabyname(d)) else: # We have an open position if self.regime[d][0] <= 0 and self.regime[d][-1] > 0: # A sell signal self.sell(data=self.getdatabyname(d)) class PropSizer(bt.Sizer): """A position sizer that will buy as many stocks as necessary for a certain proportion of the portfolio to be committed to the position, while allowing stocks to be bought in batches (say, 100)""" params = {"prop": 0.1, "batch": 100} def _getsizing(self, comminfo, cash, data, isbuy): """Returns the proper sizing""" if isbuy: # Buying target = self.broker.getvalue() * self.params.prop # Ideal total value of the position price = data.close[0] shares_ideal = target / price # How many shares are needed to get target batches = int(shares_ideal / self.params.batch) # How many batches is this trade? shares = batches * self.params.batch # The actual number of shares bought if shares * price > cash: return 0 # Not enough money for this trade else: return shares else: # Selling return self.broker.getposition(data).size # Clear the position class AcctValue(bt.Observer): alias = ('Value',) lines = ('value',) plotinfo = {"plot": True, "subplot": True} def next(self): self.lines.value[0] = self._owner.broker.getvalue() # Get today's account value (cash + stocks) class AcctStats(bt.Analyzer): """A simple analyzer that gets the gain in the value of the account; should be self-explanatory""" def __init__(self): self.start_val = self.strategy.broker.get_value() self.end_val = None def stop(self): self.end_val = self.strategy.broker.get_value() def get_analysis(self): return {"start": self.start_val, "end": self.end_val, "growth": self.end_val - self.start_val, "return": self.end_val / self.start_val} start = dt.datetime(2018, 1, 1) end = dt.datetime(2020, 10, 31) # Different stocks from past posts because of different data source (no plot for NTDOY) symbols = ["BTC-USD", "ETH-USD", "BNB-USD"] datafeeds = {s: web.DataReader(s, "yahoo", start, end) for s in symbols} for df in datafeeds.values(): df["OpenInterest"] = 0 # PandasData reader expects an OpenInterest column; # not provided by Google and we don't use it so set to 0 cerebro = bt.Cerebro(stdstats=False) plot_symbols = ["BTC-USD", "ETH-USD", "BNB-USD"] is_first = True # plot_symbols = [] for s, df in datafeeds.items(): data = bt.feeds.PandasData(dataname=df, name=s) if s in plot_symbols: if is_first: data_main_plot = data is_first = False else: data.plotinfo.plotmaster = data_main_plot else: data.plotinfo.plot = False cerebro.adddata(data) # Give the data to cerebro cerebro.broker.setcash(1000000) cerebro.broker.setcommission(0.02) cerebro.addstrategy(SMAC) cerebro.addobserver(AcctValue) cerebro.addobservermulti(bt.observers.BuySell) # Plots up/down arrows cerebro.addsizer(PropSizer) cerebro.addanalyzer(AcctStats) cerebro.run()
python
import argparse parser = argparse.ArgumentParser(prog='build_snp_map_for_neale_lab_gwas.py', description=''' Build the SNP map table: phased genotype variant <=> Neale's lab GWAS ''') parser.add_argument('--genotype-pattern', help=''' In the form: prefix{chr}suffix. Will load 1..22 chromosomes (no X). ''') parser.add_argument('--genotype-sample', help=''' The corresponding sample file ''') parser.add_argument('--output', help=''' File name of output (if not exists, it will be created) ''') parser.add_argument('--gwas', help=''' Neale's lab GWAS (one GWAS as example, they all share the same variant set) ''') args = parser.parse_args() import logging, time, sys # configing util logging.basicConfig( level = logging.INFO, stream = sys.stderr, format = '%(asctime)s %(message)s', datefmt = '%Y-%m-%d %I:%M:%S %p' ) import bgen_reader import pandas as pd import helper logging.info('Loading GWAS') gwas = pd.read_csv(args.gwas, header=0, sep= '\t', compression='gzip') map_table = pd.DataFrame() for i in range(1, 23): i = str(i) logging.info(f'Processing chr{i}: Loading BGEN') bgen = bgen_reader.read_bgen( args.genotype_pattern.format(chr=i), samples_filepath = args.genotype_sample ) logging.info(f'Processing chr{i}: Loading variant table') variant = bgen["variants"].compute() variant['chrom'] = i logging.info(f'Processing chr{i}: Building variant ID candidates') variant['allele_1st'] = variant['allele_ids'].apply(lambda x: x.split(',')[0]) variant['allele_2nd'] = variant['allele_ids'].apply(lambda x: x.split(',')[1]) variant['varid1'] = variant[['chrom', 'pos', 'allele_1st', 'allele_2nd']].apply(lambda x: helper.make_id(x), axis=1) variant['varid2'] = variant[['chrom', 'pos', 'allele_2nd', 'allele_1st']].apply(lambda x: helper.make_id(x), axis=1) logging.info(f'Processing chr{i}: Running checker') variant_check = helper.join_with_varid( variant['varid1'], variant['varid2'], gwas['variant'] ) variant = pd.merge(variant, variant_check, left_on=['varid1', 'varid2'], right_on=['id1', 'id2'], how='left') map_table = pd.concat([map_table, variant[['chrom', 'pos', 'allele_ids', 'id', 'rsid', 'assigned_id', 'assigned_sign']]]) # save logging.info('Saving the results') map_table.to_csv(args.output, compression='gzip', sep='\t', index = None)
python
def hello_world(): return "hi"
python
import filecmp import os import subprocess import unittest from clockwork import gvcf from cluster_vcf_records import vcf_record modules_dir = os.path.dirname(os.path.abspath(gvcf.__file__)) data_dir = os.path.join(modules_dir, "tests", "data", "gvcf") def lines_from_vcf_ignore_file_date(vcf): with open(vcf) as f: return [x for x in f if not x.startswith("##fileDate=")] class TestGvcf(unittest.TestCase): def test_move_info_fields_to_format(self): record = vcf_record.VcfRecord( "ref\t1\t.\tC\tG\t.\t.\tfoo=bar;spam=eggs\tcleese\tchapman" ) gvcf._move_info_fields_to_format(record) assert record.INFO == {} assert record.FORMAT == {"foo": "bar", "spam": "eggs", "cleese": "chapman"} def test_gvcf_from_minos_vcf_and_samtools_gvcf(self): ref_fasta = os.path.join( data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.ref.fa" ) minos_vcf = os.path.join( data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.minos.vcf" ) samtools_vcf = os.path.join( data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.samtools.vcf" ) tmp_out = "tmp.gvcf_from_minos_vcf_and_samtools_gvcf.out.vcf" subprocess.check_output(f"rm -f {tmp_out}", shell=True) gvcf.gvcf_from_minos_vcf_and_samtools_gvcf( ref_fasta, minos_vcf, samtools_vcf, tmp_out ) expect_lines = lines_from_vcf_ignore_file_date( os.path.join(data_dir, "gvcf_from_minos_vcf_and_samtools_gvcf.out.vcf") ) got_lines = lines_from_vcf_ignore_file_date(tmp_out) self.assertEqual(expect_lines, got_lines) os.unlink(tmp_out) def test_samtools_vcf_record_to_frs(self): record = vcf_record.VcfRecord( "ref\t1\t.\tC\tG\t.\t.\tCALLER=samtools\tDP4\t1,2,14,13" ) self.assertEqual(gvcf._samtools_vcf_record_to_frs(record, 0), 0.1) self.assertEqual(gvcf._samtools_vcf_record_to_frs(record, 1), 0.9) def test_vcf_record_pass_index(self): record = vcf_record.VcfRecord( "ref\t1\t.\tC\tG\t.\t.\tCALLER=samtools\tGT:DP:DP4\t1/1:20:1,2,14,13" ) self.assertEqual(1, gvcf._vcf_record_pass_index(record, min_frs=0.9, min_dp=5)) self.assertEqual( None, gvcf._vcf_record_pass_index(record, min_frs=0.9, min_dp=21) ) self.assertEqual( None, gvcf._vcf_record_pass_index(record, min_frs=0.99, min_dp=5) ) record = vcf_record.VcfRecord( "ref\t1\t.\tC\tG\t.\tPASS\tCALLER=minos\tGT:DP:FRS\t1/1:20:0.95" ) self.assertEqual(1, gvcf._vcf_record_pass_index(record)) self.assertEqual( 1, gvcf._vcf_record_pass_index(record, require_minos_pass=False) ) self.assertEqual(None, gvcf._vcf_record_pass_index(record, min_frs=0.96)) self.assertEqual(None, gvcf._vcf_record_pass_index(record, min_dp=21)) record = vcf_record.VcfRecord( "ref\t1\t.\tC\tG\t.\tFAIL\tCALLER=minos\tGT:DP:FRS\t1/1:20:0.95" ) self.assertEqual(None, gvcf._vcf_record_pass_index(record)) self.assertEqual( 1, gvcf._vcf_record_pass_index(record, require_minos_pass=False) ) self.assertEqual( None, gvcf._vcf_record_pass_index(record, require_minos_pass=False, min_frs=0.96), ) self.assertEqual( None, gvcf._vcf_record_pass_index(record, require_minos_pass=False, min_dp=21), ) def test_gvcf_to_fasta(self): vcf = os.path.join(data_dir, "gvcf_to_fasta.vcf") tmp_out = "tmp.gvcf_to_fasta.fa" subprocess.check_output(f"rm -f {tmp_out}", shell=True) gvcf.gvcf_to_fasta(vcf, tmp_out) expect_fasta = os.path.join(data_dir, "gvcf_to_fasta.fa") self.assertTrue(filecmp.cmp(tmp_out, expect_fasta, shallow=False)) os.unlink(tmp_out)
python
from torch.utils.data import dataloader from torchvision.models.inception import inception_v3 from inception_v4 import inceptionv4 import torch import torch.distributed as dist import argparse import torch.nn as nn import torch.backends.cudnn as cudnn import torch.optim import torch.utils.data import torch.utils.data.distributed from torchvision.models import resnet import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models import os from utils import * import time model_names = ['alexnet', 'inception_v3', 'resnet50', 'resnet152', 'vgg16', 'inception_v4'] parser = argparse.ArgumentParser( description="Pytorch imagenet distributed training") parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('-a', '--arch', metavar='ARCH', default='alexnet', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: alexnet)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=1, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=64, type=int, metavar='N', help='mini-batch size (default: 64), this is the total ' 'batch size of all GPUs on the current node when ' 'using Data Parallel or Distributed Data Parallel') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate', dest='lr') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay') parser.add_argument('--world-size', default=1, type=int, help='number of nodes for distributed training') parser.add_argument('--rank', default=0, type=int, help='node rank for distributed training') parser.add_argument('--dist-url', default='tcp://localhost:7890', type=str, help='url used to set up distributed training') parser.add_argument('-p', '--print-freq', default=5, type=int, metavar='N', help='print frequency (default: 5)') parser.add_argument('--fast', action='store_true', help='if setted, run only 100 mini batches.') best_acc1 = 0 args = parser.parse_args() def join_process_group(): print('==> Join process group') if dist.is_available() and dist.is_nccl_available(): dist.init_process_group( backend='nccl', init_method=args.dist_url, world_size=args.world_size, rank=args.rank) print('==> Process[{}] is ready.'.format(args.rank)) else: raise RuntimeError( "Error: Pytorch distributed framework or NCCL is unavailable.") def main_worker(): global best_acc1 join_process_group() # create model if args.arch != 'inception_v4': if args.arch != 'inception_v3': model = models.__dict__[args.arch]() else: model = models.inception_v3(aux_logits=False) else: model = inceptionv4(num_classes=1000, pretrained=None) device = torch.device('cuda', 0) # Set reasonable CUDA_VISIBLE_DEVICES model = model.to(device) # ddp model = nn.parallel.DistributedDataParallel(model) criterion = nn.CrossEntropyLoss().to(device) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # model size total_params = sum([torch.numel(p) for p in model.parameters()]) print('==> Model({}): {:.2f} MB'.format( args.arch, total_params * 4 / (1024 * 1024))) cudnn.benchmark = True # data loading print('==> Create Data Loader') traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) input_size = 224 if args.arch != 'inception_v3' else 299 train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ]) ) train_sampler = torch.utils.data.distributed.DistributedSampler( train_dataset) train_loader = torch.utils.data.DataLoader( dataset=train_dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True, sampler=train_sampler ) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(input_size), transforms.ToTensor(), normalize, ]) ), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, ) # train & val iteration print('==> Train and Val') for epoch in range(args.start_epoch, args.epochs): train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer=optimizer, epoch=epoch, args=args) if not args.fast: train(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, epoch=epoch, args=args) else: fast_test(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, args=args) def fast_test(train_loader, model, criterion, optimizer, args): speed_meter = SpeedMerter(is_master=(dist.get_rank() == 0)) model.train() start_time = time.time() for i, (images, target) in enumerate(train_loader): if i == 50: break images = images.cuda(0, non_blocking=True) target = target.cuda(0, non_blocking=True) output = model(images) loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() if (i+1) % 10 == 0: end_time = time.time() num_images = args.batch_size * 10 speed = num_images / (end_time - start_time) speed_meter.update(val=speed) print('[{}/50] {} imgs/s'.format(i+1, speed)) start_time = time.time() speed_meter.output() def train(train_loader, model, criterion, optimizer, epoch, args): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1, top5], prefix="Epoch: [{}]".format(epoch)) # switch to train mode model.train() end = time.time() for i, (images, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) images = images.cuda(0, non_blocking=True) target = target.cuda(0, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time elapsed_time = time.time() - end batch_time.update(elapsed_time) end = time.time() def validate(val_loader, model, criterion, args): batch_time = AverageMeter('Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') top5 = AverageMeter('Acc@5', ':6.2f') progress = ProgressMeter( len(val_loader), [batch_time, losses, top1, top5], prefix='Test: ') # switch to evaluate mode model.eval() with torch.no_grad(): end = time.time() for i, (images, target) in enumerate(val_loader): images = images.cuda(0, non_blocking=True) target = target.cuda(0, non_blocking=True) # compute output output = model(images) loss = criterion(output, target) # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(loss.item(), images.size(0)) top1.update(acc1[0], images.size(0)) top5.update(acc5[0], images.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() # TODO: this should also be done with the ProgressMeter print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}' .format(top1=top1, top5=top5)) return top1.avg if __name__ == '__main__': torch.cuda.empty_cache() main_worker()
python
# Generated by Django 2.1.3 on 2018-11-02 08:18 from django.db import migrations, models import django.db.models.deletion import mptt.fields class Migration(migrations.Migration): dependencies = [ ('pages', '0007_language_code'), ] operations = [ migrations.AlterField( model_name='media', name='extension', field=models.CharField(blank=True, editable=False, max_length=32), ), migrations.AlterField( model_name='page', name='parent', field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='pages.Page', verbose_name='parent'), ), migrations.AlterField( model_name='page', name='redirect_to', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='redirected_pages', to='pages.Page', verbose_name='redirect to'), ), migrations.AlterField( model_name='page', name='redirect_to_url', field=models.CharField(blank=True, max_length=200, null=True, verbose_name='redirect to url'), ), migrations.AlterField( model_name='pagealias', name='page', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='pages.Page', verbose_name='page'), ), ]
python
from enum import Enum, unique @unique class BrowserType(Enum): """Class to define browser type, e.g. Chrome, Firefox, etc.""" CHROME = "Chrome" EDGE = "Edge" FIREFOX = "Firefox" INTERNET_EXPLORER = "Internet Explorer" OPERA = "Opera" SAFARI = "Safari"
python
# -*- coding: utf-8 -*- """End to end test of running a job. :copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function import pytest import os # TODO(e-carlin): Tests that need to be implemented # - agent never starts # - agent response is bad (ex no req_id) # - server_req is malformed # - agent starts but we never get an incoming 'read_for_work' message # - canceling of requests in the q and running requests # - using only the resources that are available # - agent sigterm -> sigkill progression # - send kill to uknown agent _REPORT = 'heightWeightReport' def test_runCancel(fc): from pykern import pkunit from pykern.pkdebug import pkdc, pkdp, pkdlog import time d = fc.sr_sim_data() d.models.simulation.name = 'srunit_long_run' d = fc.sr_post( 'runSimulation', dict( forceRun=False, models=d.models, report=_REPORT, simulationId=d.models.simulation.simulationId, simulationType=d.simulationType, ), ) for _ in range(10): assert d.state != 'error' if d.state == 'running': break time.sleep(d.nextRequestSeconds) d = fc.sr_post('runStatus', d.nextRequest) else: pkunit.pkfail('runStatus: failed to start running: {}', d) x = d.nextRequest d = fc.sr_post( 'runCancel', x, ) assert d.state == 'canceled' d = fc.sr_post( 'runStatus', x, ) assert d.state == 'canceled' def test_runSimulation(fc): from pykern import pkunit from pykern.pkdebug import pkdp, pkdlog from sirepo import job import time d = fc.sr_sim_data() d = fc.sr_post( 'runSimulation', dict( forceRun=False, models=d.models, report=_REPORT, simulationId=d.models.simulation.simulationId, simulationType=d.simulationType, ), ) for _ in range(10): pkdlog(d) assert d.state != 'error' if d.state == 'completed': break time.sleep(d.nextRequestSeconds) d = fc.sr_post('runStatus', d.nextRequest) else: pkunit.pkfail('runStatus: failed to complete: {}', d) # Just double-check it actually worked assert u'plots' in d def test_remove_srw_report_dir(fc): from pykern import pkio from pykern import pkunit import sirepo.srdb m = 'intensityReport' data = fc.sr_sim_data('NSLS-II ESM beamline') fc.sr_run_sim(data, m) g = pkio.sorted_glob(sirepo.srdb.root().join('user', fc.sr_uid, 'srw', '*', m)) pkunit.pkeq(1, len(g)) pkio.unchecked_remove(*g) fc.sr_run_sim(data, m)
python
n = int(input()) suma = 0 dif = [] for i in range(n): a , b = map(int, input().split()) suma += b*(n-1) dif.append(a-b) dif = sorted(dif, reverse = True) for j in range(n): suma+= j*dif[j] print(suma)
python
"""Tools for converting model parameter from Caffe to Keras.""" import numpy as np import os import sys import shutil import h5py import collections import pickle def dump_weights(model_proto, model_weights, weight_output, shape_output=None, caffe_home='~/caffe'): """Helper function to dump caffe model weithts in keras tf format # Arguments model_proto: path to the caffe model .prototxt file model_weights: path to the caffe model .caffemodel file weight_output: path to HDF5 output file shape_output: path to pickle output file # Notes caffe requres to run the function in python 2.x """ def expand(path): return os.path.abspath(os.path.expanduser(path)) caffe_home = expand(caffe_home) model_proto = expand(model_proto) model_weights = expand(model_weights) #print(caffe_home + '\n' + model_proto + '\n' + model_weights + '\n' + weight_output + '\n' + shape_output ) # import caffe sys.path.insert(0, os.path.join(caffe_home, 'python')) import caffe # create model caffe.set_mode_cpu() net = caffe.Net(model_proto, model_weights, caffe.TEST) if os.path.exists(weight_output): os.remove(weight_output) f = h5py.File(weight_output, 'w') # process the layers layer_names = list(net._layer_names) weights_shape = {} for name in net.params: layer = net.layers[layer_names.index(name)] blobs = net.params[name] blobs_shape = [list(b.shape) for b in blobs] weights_shape[name] = blobs_shape print('%-25s %-20s %-3s %s' % (name, layer.type, len(blobs), blobs_shape)) params = collections.OrderedDict() if layer.type == 'Convolution': W = blobs[0].data W = W.transpose(2,3,1,0) params[name+'_W_1:0'] = W if len(blobs) > 1: b = blobs[1].data params[name+'_b_1:0'] = b elif layer.type == 'Normalize': gamma = blobs[0].data params[name+'_gamma_1:0'] = gamma elif layer.type == 'BatchNorm': size = blobs[0].shape[0] running_mean = blobs[0].data running_std = blobs[1].data gamma = np.empty(size) gamma.fill(blobs[2].data[0]) beta = np.zeros(size) params[name+'_gamma_1:0'] = gamma params[name+'_beta_1:0'] = beta params[name+'_running_mean_1:0'] = running_mean params[name+'_running_std_1:0'] = running_std elif layer.type == 'Scale': gamma = blobs[0].data beta = blobs[1].data params[name+'_gamma_1:0'] = gamma params[name+'_beta_1:0'] = beta elif layer.type == 'InnerProduct': W = blobs[0].data W = W.T b = blobs[1].data params[name+'_W_1:0'] = W params[name+'_b_1:0'] = b else: if len(blobs) > 0: print('UNRECOGNISED BLOBS') # create group and add parameters g = f.create_group(name) for weight_name, value in params.items(): param_dset = g.create_dataset(weight_name, value.shape, dtype=value.dtype) if not value.shape: # scalar param_dset[()] = value else: param_dset[:] = value g.attrs['weight_names'] = list(params.keys()) f.attrs['layer_names'] = layer_names f.flush() f.close() # output model shape if shape_output is not None: output_shape = {} for layer_name, blob in net.blobs.iteritems(): #print('%-40s %s' %(layer_name, str(blob.data.shape))) output_shape[layer_name] = blob.data.shape shape = {} shape['output_shape'] = output_shape shape['weights_shape'] = weights_shape shape_output = expand(shape_output) if os.path.exists(shape_output): os.remove(shape_output) with open(shape_output , 'wb') as f: pickle.dump(shape, f, protocol=pickle.HIGHEST_PROTOCOL) def add_missing_layers(model, input_file_name, output_file_name): """Helper function to add the missing keras layers in a HDF5 file # Arguments model: keras model input_file_name: path to input HDF5 file output_file_name: path to output HDF5 file """ shutil.copy(input_file_name, output_file_name) f = h5py.File(output_file_name, 'r+') # add missing layers layer_names_model = [layer.name for layer in model.layers] layer_names_new = [] for name in layer_names_model: if not name in f.keys(): print('add %s' % name) g = f.create_group(name) g.attrs['weight_names'] = [] layer_names_new.append(name) print('update layer_names') f.attrs['layer_names'] = [s.encode('ascii') for s in layer_names_new] f.flush() f.close() def compare_output_shape(model, shape_file): """Compares the output shape of the layers in caffe and keras model # Arguments model: keras model shape_file: path to pickle file dumped by 'dump_weights' """ with open(shape_file, 'rb') as f: shape = pickle.load(f) #print('%-30s %-20s %-20s' % ('', 'caffe shape', 'keras shape')) for layer in model.layers: if layer.name in shape['output_shape']: shape_caffe = list(shape['output_shape'][layer.name][1:]) # TODO: depends on layer type if len(shape_caffe) == 3: shape_caffe_mod = [shape_caffe[1], shape_caffe[2], shape_caffe[0]] else: shape_caffe_mod = list(shape_caffe) shape_keras = list(layer.output_shape[1:]) mismatch = 'mismatch' if (shape_caffe_mod != shape_keras) else '' print('%-30s %-20s %-20s %s' % (layer.name, shape_caffe, shape_keras, mismatch)) #print('%-30s \n%-20s \n%-20s' % (layer.name, shape_caffe, shape_keras)) def compare_weights_shape(model, shape_file): """Compares the parameter shape of the layers in caffe and keras model # Arguments model: keras model shape_file: path to pickle file dumped by 'dump_weights' """ with open(shape_file, 'rb') as f: shape = pickle.load(f) #print('%-30s %-20s %-20s' % ('', 'caffe shape', 'keras shape')) for layer in model.layers: if layer.name in shape['weights_shape']: shape_caffe = shape['weights_shape'][layer.name] # TODO: depends on layer type shape_caffe_mod = [ [s[2],s[3],s[1],s[0]] if len(s) == 4 else s for s in shape_caffe] shape_keras = [w.shape.as_list() for w in layer.weights] mismatch = 'mismatch' if not all([shape_caffe_mod[i] == shape_keras[i] for i in range(len(shape_keras))]) else '' print('%-30s %-40s %-40s %s' % (layer.name, shape_caffe, shape_keras, mismatch)) #print('%-30s \n%-40s \n%-40s' % (layer.name, shape_caffe, shape_keras)) if __name__ == '__main__': model_proto = './resnet152/ResNet-152-deploy.prototxt' model_weights = './resnet152/ResNet-152-model.caffemodel' weights_output = 'resnet152_weights.hdf5' shape_output = 'resnet152_shape.pkl' dump_weights(model_proto, model_weights, weights_output, shape_output=shape_output)
python
import logging from typing import List from homeassistant.helpers.entity import Entity from gehomesdk.erd import ErdCode, ErdApplianceType from .base import ApplianceApi from ..entities import GeErdSensor, GeErdBinarySensor _LOGGER = logging.getLogger(__name__) class DryerApi(ApplianceApi): """API class for dryer objects""" APPLIANCE_TYPE = ErdApplianceType.DRYER def get_all_entities(self) -> List[Entity]: base_entities = super().get_all_entities() dryer_entities = [ GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_STATE), GeErdSensor(self, ErdCode.LAUNDRY_MACHINE_SUBCYCLE), GeErdBinarySensor(self, ErdCode.LAUNDRY_END_OF_CYCLE), GeErdSensor(self, ErdCode.LAUNDRY_TIME_REMAINING), GeErdSensor(self, ErdCode.LAUNDRY_CYCLE), GeErdSensor(self, ErdCode.LAUNDRY_DELAY_TIME_REMAINING), GeErdSensor(self, ErdCode.LAUNDRY_DOOR), GeErdSensor(self, ErdCode.LAUNDRY_DRYNESSNEW_LEVEL), GeErdSensor(self, ErdCode.LAUNDRY_TEMPERATURENEW_OPTION), GeErdBinarySensor(self, ErdCode.LAUNDRY_REMOTE_STATUS) ] entities = base_entities + dryer_entities return entities
python
import re m = re.search(r'([a-zA-Z0-9])\1+', input().strip()) print(m.group(1) if m else -1)
python
import logging import sys, os import datetime import eons, esam import pandas as pd #Class name is what is used at cli, so we defy convention here in favor of ease-of-use. class in_excel(esam.DataFunctor): def __init__(self, name=eons.INVALID_NAME()): super().__init__(name) self.requiredKWArgs.append("file") #self.data will be returned, so we shouldn't be asking for it. self.requiredKWArgs.remove("data") #Override of UserFunctor method. def PreCall(self, **kwargs): self.Clear() def UserFunction(self, **kwargs): xlsxFileName = kwargs.get("file") xlsx = pd.ExcelFile(xlsxFileName) for sheet in xlsx.sheet_names: dataFrame = pd.read_excel(xlsx, sheet) for i, r in enumerate(dataFrame.to_dict('records')): recordDatum = eons.SelfRegistering("Pandatum") recordDatum.FromDict(r) recordDatum.uniqueId = f"{xlsxFileName}/{sheet}/{i}" self.data.AddDatum(recordDatum) return self.data
python
from django.test import Client, TestCase from django.urls import reverse from django.contrib.auth import get_user_model from posts.forms import PostForm from posts.models import Post User = get_user_model() class TaskCreateFormTests(TestCase): @classmethod def setUpClass(cls): super().setUpClass() # Создаем форму, если нужна проверка атрибутов cls.form = PostForm() cls.user = User.objects.create_user(username='auth') cls.authorized_client = Client() cls.authorized_client.force_login(cls.user) def test_create_post(self): """Валидная форма создает запись в Post.""" post_count = Post.objects.count() form_data = { 'text': 'Тестовый заголовок', 'pk': 1 } response = self.authorized_client.post( reverse('posts:post_create'), data=form_data, follow=True ) self.assertRedirects(response, reverse( 'posts:profile', kwargs={'username': 'auth'})) self.assertEqual(Post.objects.count(), post_count + 1) self.assertTrue( Post.objects.filter( text='Тестовый заголовок', pk=1 ).exists() ) def test_edit_post(self): form_data = { 'text': 'Тестовый заголовок', 'pk': 1 } response = self.authorized_client.post( reverse('posts:post_create'), data=form_data, follow=True ) form_data = { 'text': 'Тестовый отредактированный', 'pk': 1 } response = self.authorized_client.post( reverse('posts:post_edit', kwargs={'post_id': 1}), data=form_data, follow=True ) self.assertRedirects(response, reverse( 'posts:post_detail', kwargs={'post_id': 1})) self.assertTrue( Post.objects.filter( text='Тестовый отредактированный', pk=1 ).exists() )
python
from abc import ABC, abstractmethod import asyncio from typing import Callable class AbstractConnectSignal(ABC): def __init__(self) -> None: self.targets = set() def connect(self, target: Callable): if target not in self.targets: self.targets.add(target) @abstractmethod async def emit(self, *args, **kwargs): # IDEA maybe as asyncio.task await self._emit_to_targets(*args, **kwargs) async def _emit_to_targets(self, *args, **kwargs): for target in self.targets: if asyncio.iscoroutinefunction(target): asyncio.create_task(target(*args, **kwargs)) else: target(*args, **kwargs)
python
#pip install pdfplumber import pdfplumber pdf = pdfplumber.open('./Relação') paginas = len(pdf.pages) #quantidade de paginas text = "" for i in range(paginas): page = pdf.pages[i] text += page.extract_text() print(text)
python
import logging import json logger = logging.getLogger(__name__) def __virtual__(): ''' Only load if jenkins_common module exist. ''' if 'jenkins_common.call_groovy_script' not in __salt__: return ( False, 'The jenkins_smtp state module cannot be loaded: ' 'jenkins_common not found') return True def config(name, host, username, password, reply_to=None, port=25, ssl=False, charset="UTF-8"): """ Jenkins SMTP server config state method :param name: configuration name :param host: SMTP host :param username: SMTP username :param password: SMTP password :param reply_to: sent emails ReplyTo header (optional) :param port: SMTP port (optional, default 25) :param ssl: use SSL for SMTP (optional, default False) :param charset: SMTP charset (optional, default UTF-8) :returns: salt-specified state dict """ template = __salt__['jenkins_common.load_template']( 'salt://jenkins/files/groovy/smtp.template', __env__) return __salt__['jenkins_common.api_call'](name, template, ['CHANGED', 'EXISTS'], {'params': json.dumps({ 'username': username, 'password': password, 'host': host, 'useReplyTo': True if reply_to else False, 'replyTo': reply_to, 'port': port if port else 25, 'ssl': True if ssl else False, 'charset': charset if charset else 'UTF-8' }) }, 'SMTP config') def admin_email(name, email): """ Jenkins Admin user email config state method :param name: jenkins admin email :returns: salt-specified state dict """ template = __salt__['jenkins_common.load_template']( 'salt://jenkins/files/groovy/admin_email.template', __env__) return __salt__['jenkins_common.api_call'](name, template, ['CHANGED', 'EXISTS'], {'email': email}, 'Admin email config')
python
from django.contrib.auth import get_user_model from django.contrib.auth.forms import UserChangeForm, UserCreationForm CustomUser = get_user_model() # TODO: are we using this form now that we have django-allauth? class CustomUserCreationForm(UserCreationForm): class Meta: model = CustomUser fields = ( "email", "username", ) class CustomUserChangeForm(UserChangeForm): class Meta: model = CustomUser fields = ( "email", "username", )
python
from fairseq.tasks import register_task from fairseq.tasks.translation import TranslationTask from fairseq import utils, search from glob import glob import os from morphodropout.binarize import SRC_SIDE, TGT_SIDE from morphodropout.dataset import build_combined_dataset from morphodropout.seq_gen import SequenceGenerator @register_task('morpho_translation') class MorphoTranslation(TranslationTask): def __init__(self, args, src_dict, tgt_dict): super().__init__(args, src_dict, tgt_dict) self.morpho_dropout_final = args.morpho_dropout self.morpho_dropout_initial = args.morpho_dropout_initial self.morpho_dropout_end_epoch = args.morpho_dropout_end_epoch @staticmethod def add_args(parser): TranslationTask.add_args(parser) parser.add_argument('--morpho-dropout', type=float, default=0.5) parser.add_argument('--morpho-dropout-initial', type=float, default=None) parser.add_argument('--morpho-dropout-end-epoch', type=int, default=None) def morpho_dropout_for(self, epoch: int) -> float: if self.morpho_dropout_initial is None: return self.morpho_dropout_final assert self.morpho_dropout_end_epoch is not None initial = self.morpho_dropout_initial final = self.morpho_dropout_final period = float(self.morpho_dropout_end_epoch) morpho_dropout = initial + (min(epoch, period) * (final - initial) / period) return morpho_dropout def load_dataset(self, split, epoch=1, combine=False, **kwargs): paths = utils.split_paths(self.args.data) assert len(paths) > 0 data_path = paths[(epoch - 1) % len(paths)] split_path = os.path.join(data_path, split) src_data_files = glob(split_path + "_{}_".format(SRC_SIDE) + "*") tgt_data_files = glob(split_path + "_{}_".format(TGT_SIDE) + "*") data_files = src_data_files + tgt_data_files if not data_files: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path)) self.datasets[split] = build_combined_dataset( self.src_dict, src_data_files, self.morpho_dropout_for(epoch) if split == 'train' else 0.0, self.tgt_dict, tgt_data_files, self.args.seed, epoch, ) def build_generator(self, models, args): # copied from fairseq_task.py to choose our implementation # Choose search strategy. Defaults to Beam Search. sampling = getattr(args, "sampling", False) sampling_topk = getattr(args, "sampling_topk", -1) sampling_topp = getattr(args, "sampling_topp", -1.0) diverse_beam_groups = getattr(args, "diverse_beam_groups", -1) diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5) match_source_len = getattr(args, "match_source_len", False) diversity_rate = getattr(args, "diversity_rate", -1) if ( sum( int(cond) for cond in [ sampling, diverse_beam_groups > 0, match_source_len, diversity_rate > 0, ] ) > 1 ): raise ValueError("Provided Search parameters are mutually exclusive.") assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling" assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling" if sampling: search_strategy = search.Sampling( self.target_dictionary, sampling_topk, sampling_topp ) elif diverse_beam_groups > 0: search_strategy = search.DiverseBeamSearch( self.target_dictionary, diverse_beam_groups, diverse_beam_strength ) elif match_source_len: # this is useful for tagging applications where the output # length should match the input length, so we hardcode the # length constraints for simplicity search_strategy = search.LengthConstrainedBeamSearch( self.target_dictionary, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0, ) elif diversity_rate > -1: search_strategy = search.DiverseSiblingsSearch( self.target_dictionary, diversity_rate ) else: search_strategy = search.BeamSearch(self.target_dictionary) return SequenceGenerator( models, self.target_dictionary, beam_size=getattr(args, "beam", 5), max_len_a=getattr(args, "max_len_a", 0), max_len_b=getattr(args, "max_len_b", 200), min_len=getattr(args, "min_len", 1), normalize_scores=(not getattr(args, "unnormalized", False)), len_penalty=getattr(args, "lenpen", 1), unk_penalty=getattr(args, "unkpen", 0), temperature=getattr(args, "temperature", 1.0), match_source_len=getattr(args, "match_source_len", False), no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0), search_strategy=search_strategy, )
python
name = "pip_test_package"
python
#!/usr/bin/env python import os, os.path, sys import socket if __name__ == "__main__": PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..',)) print "PROJECT_ROOT=", PROJECT_ROOT sys.path.append(PROJECT_ROOT) # Add virtualenv dirs to python path host = socket.gethostname() print "HOSTNAME=%s" % host if host=='irrigatorpro': if "test" in PROJECT_ROOT: VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/test/' else: VIRTUAL_ENV_ROOT = '/www/VirtualEnvs/irrigator_pro/' else: VIRTUAL_ENV_ROOT = os.path.join( PROJECT_ROOT, 'VirtualEnvs', 'irrigator_pro') print "VIRTUAL_ENV_ROOT='%s'" % VIRTUAL_ENV_ROOT activate_this = os.path.join(VIRTUAL_ENV_ROOT, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) # Get settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "irrigator_pro.settings") import django django.setup() from farms.unified_field_data import generate_objects from farms.models import * from datetime import date, datetime from django.contrib.auth.models import User # Get the cumulative report in a given date range. user = User.objects.get(email='[email protected]') print "user: ", user # Get a crop season crop_season = CropSeason.objects.get(name='Corn 2015', description='mine') # need one with probes. field = Field.objects.get(name='North') print 'crop season: ', crop_season print 'field: ', field unified_records = generate_objects(crop_season, field, user, date.today()) for r in unified_records: print r.date print r.water_register print r.uga_records print r.manual_records
python
import glob import os import re import requests from Bio.SeqIO import SeqRecord from Bio import SeqIO from .utils import is_fasta class PrimerDesigner: """Class for designing primers from FASTA files. It will send a FASTA alignment to `primers4clades`_ in order to design degenerate primers. Input data needed is an alignment in FASTA format containing at least 4 sequences. It is recommended that the beginning of each FASTA sequence description contains the taxon name between square brackets. Parameters: folder (str): path of folder containing the FASTA file alignments taxon_for_codon_usage (str): optional taxon name that will be inserted in the description of FASTA sequences between square brackets so that can be used by primer4clades to infer the codon table to use tm (str): temperature min_amplength (str): minimum amplicon length max_amplength (str): maximum amplicon length gencode (str): genetic code. See below for all available genetic codes clustype (str): cluster distance metric: ``dna``, ``protein``. amptype (str): substitution model used to estimate phylogenetic information email (str): your email address so that primer4clades can send you email with detailed results Example: >>> # The values shown are the default. Change them if needed. >>> from primer_designer import PrimerDesigner >>> pd = PrimerDesigner() >>> pd.folder = "alignments" # folder containing the FASTA file alignments >>> pd.tm = "55" # annealing temperature >>> pd.min_amplength = "250" # minimum amplicon length >>> pd.max_amplength = "500" # maximum amplicon length >>> pd.gencode = "universal" # see below for all available genetic codes >>> pd.mode = "primers" >>> pd.clustype = "dna" >>> pd.amptype = "dna_GTRG" # substitution model used to estimate phylogenetic information >>> pd.email = "[email protected]" # primer4clades will send you an email with very detailed results >>> pd.design_primers() >>> >>> # You can input a taxon name to include in the description of every >>> # FASTA sequence so that primer4clades can infer the correct codon >>> # table to apply to the analysis. >>> pd.taxon_for_codon_usage = "Bombyx mori" >>> pd.design_primers() The best primer pairs will be printed to your screen. Detailed results will be saved as HTML files in your alignments folder. But it is recommended if you also get the results by email. primers4clades_ will send you one email for each alignment. The genetic code table (variable ``gencode``) can be any of the following: * ``universal`` for standard * ``2`` for vertebrate mitochondrial * ``3`` for yeast mitochondrial * ``4`` for mold and protozoa mitochondrial * ``5`` for invertebrate mitochondrial * ``6`` for ciliate * ``9`` for echinoderm and flatworm * ``10`` for euplotid nuclear * ``11`` for bacterial and plastid * ``12`` for alternative yeast nuclear * ``13`` for ascidian mitochondrial * ``14`` for flatworm mitochondrial * ``15`` for Blepharisma nuclear * ``16`` for Chlorophycean mitochondrial * ``21`` for Trematode mitochondrial * ``22`` for Scenedesmus obliquus mitochondrial * ``23`` for Thraustochytrium mitochondrial The evolutionary substitution model can be any of the following (variable ``amptype``): * ``protein_WAGG`` for protein WAG+G * ``protein_JTTG`` for protein JTT+G * ``protein_Blosum62G`` for protein Blosum62+G * ``protein_VTG`` for protein VT+G * ``protein_DayhoffG`` for protein Dayhoff+G * ``protein_MtREVG`` for protein MtREV+G * ``dna_HKYG`` for dna HKY+G * ``dna_GTRG`` for dna GTR+G * ``dna_K80G`` for dna K80+G * ``dna_TrNG`` for dna TrN+G * ``dna_JC69G`` for dna JC69+G .. _primers4clades: http://floresta.eead.csic.es/primers4clades/#0 """ def __init__(self, folder=None, taxon_for_codon_usage=None, tm="55", min_amplength="100", max_amplength="500", gencode="universal", mode="primers", clustype="dna", amptype="dna_GTR", email=None): self.folder = folder self.taxon_for_codon_usage = taxon_for_codon_usage self.tm = tm self.min_amplength = min_amplength self.max_amplength = max_amplength self.gencode = gencode self.mode = mode self.clustype = clustype self.amptype = amptype self.email = email self.report = "" def design_primers(self): alns = self.get_alignments() if alns: self.call_primer4clades_for_primers(alns) # Write primers to alignment file with open("primers_report.txt", "a") as handle: handle.write(self.report) print("\nDone.\nAll primers have been saved in the file \"primers_report.txt\"") return self.report else: msg = "\nError! the folder {0} is empty.\n".format(self.folder) raise AttributeError(msg) def call_primer4clades_for_primers(self, alns): for aln in alns: if is_fasta(aln): if self.taxon_for_codon_usage: aln = self.insert_taxon_in_new_fasta_file(aln) print("\nProcessing file \"{0}\"".format(aln)) r = self.request_primers(aln) self.process_response(aln, r.text) def get_alignments(self): if os.path.exists(self.folder): all_files = os.path.join(self.folder, "*") alns = glob.glob(all_files) else: msg = "\nError! the folder {0} does not exist.\n".format(self.folder) raise AttributeError(msg) return alns def insert_taxon_in_new_fasta_file(self, aln): """primer4clades infers the codon usage table from the taxon names in the sequences. These names need to be enclosed by square brackets and be present in the description of the FASTA sequence. The position is not important. I will insert the names in the description in a new FASTA file. Returns: Filename of modified FASTA file that includes the name of the taxon. """ new_seq_records = [] for seq_record in SeqIO.parse(aln, 'fasta'): new_seq_record_id = "[{0}] {1}".format(self.taxon_for_codon_usage, seq_record.id) new_seq_record = SeqRecord(seq_record.seq, id=new_seq_record_id) new_seq_records.append(new_seq_record) base_filename = os.path.splitext(aln) new_filename = '{0}_modified{1}'.format(base_filename[0], base_filename[1]) SeqIO.write(new_seq_records, new_filename, "fasta") return new_filename def process_response(self, aln, response_body): this_file = os.path.split(aln)[1] this_file = re.sub(".fas.*", "", this_file) msg = 'Writing detailed results as file "{0}.html"'.format(aln) print(msg) with open("{0}.html".format(aln), "w") as handle: handle.write(response_body) self.make_report_from_html_file(response_body, this_file) def make_report_from_html_file(self, response_body, this_file): """Processes the results from primer4clades (a html file). Makes a report based on the best possible primer pair (with highest quality and longest amplicon). """ amplicon_tuples = self.get_amplicon_data_as_tuples(response_body) best_amplicon = self.choose_best_amplicon(amplicon_tuples) if best_amplicon is not None: self.report += """\n\n\ #################################################### # Alignment {0} """.format(this_file) self.report += self.format_amplicon(best_amplicon) def get_amplicon_data_as_tuples(self, response_body): amplicons = re.findall("(## Amplicon.+) codon", response_body) primers_codehop = self.group_primers(re.findall("(\w+ codeh)_corr.+\n", response_body)) primers_relaxed = self.group_primers(re.findall("(\w+ relax)_corr.+\n", response_body)) primers_degen = self.group_primers(re.findall("(\w+ degen)_corr.+\n", response_body)) primer_pair_qualities = re.findall("# primer pair.+= ([0-9]+)%\n", response_body) expected_pcr_product_lengths = re.findall("# expected PCR .+= ([0-9]+)\n", response_body) forward_temperatures = re.findall("(# fwd: minTm.+)\n", response_body) reverse_temperatures = re.findall("(# rev: minTm.+)\n", response_body) amplicon_tuples = zip(amplicons, primers_codehop, primers_relaxed, primers_degen, primer_pair_qualities, expected_pcr_product_lengths, forward_temperatures, reverse_temperatures) return amplicon_tuples def format_amplicon(self, best_amplicon): best_amplicon_formatted = "" for idx, value in enumerate(best_amplicon): if idx == 0: best_amplicon_formatted += "{0}".format(value).replace("##", "# Best") elif idx in [2, 3]: best_amplicon_formatted += "\n\n{0}".format(value) elif idx == 4: best_amplicon_formatted += "\n\n# primer pair quality = {0}%".format( value) elif idx == 5: best_amplicon_formatted += "\n# expected PCR product length (nt) = {0}".format( value) else: best_amplicon_formatted += "\n{0}".format(value) return best_amplicon_formatted def group_primers(self, my_list): """Group elements in list by certain number 'n'""" new_list = [] n = 2 for i in range(0, len(my_list), n): grouped_primers = my_list[i:i + n] forward_primer = grouped_primers[0].split(" ") reverse_primer = grouped_primers[1].split(" ") formatted_primers = ">F_{0}\n{1}".format(forward_primer[1], forward_primer[0]) formatted_primers += "\n>R_{0}\n{1}".format(reverse_primer[1], reverse_primer[0]) new_list.append(formatted_primers) return new_list def choose_best_amplicon(self, amplicon_tuples): """Iterates over amplicon tuples and returns the one with highest quality and amplicon length. """ quality = 0 amplicon_length = 0 best_amplicon = None for amplicon in amplicon_tuples: if int(amplicon[4]) >= quality and int(amplicon[5]) >= amplicon_length: quality = int(amplicon[4]) amplicon_length = int(amplicon[5]) best_amplicon = amplicon return best_amplicon def request_primers(self, aln): url = "http://floresta.eead.csic.es/primers4clades/primers4clades.cgi" params = { 'tm': self.tm, 'min_amplength': self.min_amplength, 'max_amplength': self.max_amplength, 'mode': self.mode, 'gencode': self.gencode, 'clustype': self.clustype, 'email': self.email, } files = {'sequencefile': open(aln, 'rb')} r = requests.post(url, files=files, data=params) return r
python
#!/bin/env python3 import random import sys import os import time from collections import defaultdict from typing import Dict, Tuple, Union, Set import requests sys.path.append(os.path.dirname(os.path.abspath(__file__))) import expand_utilities as eu from expand_utilities import QGOrganizedKnowledgeGraph sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../") # ARAXQuery directory from ARAX_response import ARAXResponse sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../") # ARAX directory from biolink_helper import BiolinkHelper sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../../") # code directory from RTXConfiguration import RTXConfiguration sys.path.append(os.path.dirname(os.path.abspath(__file__))+"/../../UI/OpenAPI/python-flask-server/") from openapi_server.models.node import Node from openapi_server.models.edge import Edge from openapi_server.models.attribute import Attribute from openapi_server.models.query_graph import QueryGraph class KG2Querier: def __init__(self, response_object: ARAXResponse): self.response = response_object self.biolink_helper = BiolinkHelper() self.kg2_infores_curie = "infores:rtx-kg2" self.max_allowed_edges = 1000000 self.max_edges_per_input_curie = 1000 self.curie_batch_size = 100 def answer_one_hop_query(self, query_graph: QueryGraph) -> QGOrganizedKnowledgeGraph: """ This function answers a one-hop (single-edge) query using KG2c, via PloverDB. :param query_graph: A TRAPI query graph. :return: An (almost) TRAPI knowledge graph containing all of the nodes and edges returned as results for the query. (Organized by QG IDs.) """ log = self.response final_kg = QGOrganizedKnowledgeGraph() # Verify this is a valid one-hop query graph if len(query_graph.edges) != 1: log.error(f"answer_one_hop_query() was passed a query graph that is not one-hop: " f"{query_graph.to_dict()}", error_code="InvalidQuery") return final_kg if len(query_graph.nodes) != 2: log.error(f"answer_one_hop_query() was passed a query graph with more than two nodes: " f"{query_graph.to_dict()}", error_code="InvalidQuery") return final_kg # Get canonical versions of the input curies qnode_keys_with_curies = [qnode_key for qnode_key, qnode in query_graph.nodes.items() if qnode.ids] for qnode_key in qnode_keys_with_curies: qnode = query_graph.nodes[qnode_key] canonical_curies = eu.get_canonical_curies_list(qnode.ids, log) log.debug(f"Using {len(canonical_curies)} curies as canonical curies for qnode {qnode_key}") qnode.ids = canonical_curies qnode.categories = None # Important to clear this, otherwise results are limited (#889) # Send the query to plover in batches of input curies qedge_key = next(qedge_key for qedge_key in query_graph.edges) input_qnode_key = self._get_input_qnode_key(query_graph) input_curies = query_graph.nodes[input_qnode_key].ids input_curie_set = set(input_curies) curie_batches = [input_curies[i:i+self.curie_batch_size] for i in range(0, len(input_curies), self.curie_batch_size)] log.debug(f"Split {len(input_curies)} input curies into {len(curie_batches)} batches to send to Plover") log.info(f"Max edges allowed per input curie for this query is: {self.max_edges_per_input_curie}") batch_num = 1 for curie_batch in curie_batches: log.debug(f"Sending batch {batch_num} to Plover (has {len(curie_batch)} input curies)") query_graph.nodes[input_qnode_key].ids = curie_batch plover_answer, response_status = self._answer_query_using_plover(query_graph, log) if response_status == 200: batch_kg = self._load_plover_answer_into_object_model(plover_answer, log) final_kg = eu.merge_two_kgs(batch_kg, final_kg) # Prune down highly-connected input curies if we're over the max number of allowed edges if final_kg.edges_by_qg_id.get(qedge_key): if len(final_kg.edges_by_qg_id[qedge_key]) > self.max_allowed_edges: log.debug(f"Have exceeded max num allowed edges ({self.max_allowed_edges}); will attempt to " f"reduce the number of edges by pruning down highly connected nodes") final_kg = self._prune_highly_connected_nodes(final_kg, qedge_key, input_curie_set, input_qnode_key, self.max_edges_per_input_curie, log) # Error out if this pruning wasn't sufficient to bring down the edge count if len(final_kg.edges_by_qg_id[qedge_key]) > self.max_allowed_edges: log.error(f"Query for qedge {qedge_key} produced more than {self.max_allowed_edges} edges, " f"which is too much for the system to handle. You must somehow make your query " f"smaller (specify fewer input curies or use more specific predicates/categories).", error_code="QueryTooLarge") return final_kg else: log.error(f"Plover returned response of {response_status}. Answer was: {plover_answer}", error_code="RequestFailed") return final_kg batch_num += 1 return final_kg def answer_single_node_query(self, single_node_qg: QueryGraph) -> QGOrganizedKnowledgeGraph: log = self.response qnode_key = next(qnode_key for qnode_key in single_node_qg.nodes) qnode = single_node_qg.nodes[qnode_key] final_kg = QGOrganizedKnowledgeGraph() # Convert qnode curies as needed (either to synonyms or to canonical versions) if qnode.ids: qnode.ids = eu.get_canonical_curies_list(qnode.ids, log) qnode.categories = None # Important to clear this to avoid discrepancies in types for particular concepts # Send request to plover plover_answer, response_status = self._answer_query_using_plover(single_node_qg, log) if response_status == 200: final_kg = self._load_plover_answer_into_object_model(plover_answer, log) else: log.error(f"Plover returned response of {response_status}. Answer was: {plover_answer}", error_code="RequestFailed") return final_kg @staticmethod def _prune_highly_connected_nodes(kg: QGOrganizedKnowledgeGraph, qedge_key: str, input_curies: Set[str], input_qnode_key: str, max_edges_per_input_curie: int, log: ARAXResponse) -> QGOrganizedKnowledgeGraph: # First create a lookup of which edges belong to which input curies input_nodes_to_edges_dict = defaultdict(set) for edge_key, edge in kg.edges_by_qg_id[qedge_key].items(): if edge.subject in input_curies: input_nodes_to_edges_dict[edge.subject].add(edge_key) if edge.object in input_curies: input_nodes_to_edges_dict[edge.object].add(edge_key) # Then prune down highly-connected nodes (delete edges per input curie in excess of some set limit) for node_key, connected_edge_keys in input_nodes_to_edges_dict.items(): connected_edge_keys_list = list(connected_edge_keys) if len(connected_edge_keys_list) > max_edges_per_input_curie: random.shuffle(connected_edge_keys_list) # Make it random which edges we keep for this input curie edge_keys_to_remove = connected_edge_keys_list[max_edges_per_input_curie:] log.debug(f"Randomly removing {len(edge_keys_to_remove)} edges from answer for input curie {node_key}") for edge_key in edge_keys_to_remove: kg.edges_by_qg_id[qedge_key].pop(edge_key, None) # Document that not all answers for this input curie are included node = kg.nodes_by_qg_id[input_qnode_key].get(node_key) if node: if not node.attributes: node.attributes = [] if not any(attribute.attribute_type_id == "biolink:incomplete_result_set" for attribute in node.attributes): node.attributes.append(Attribute(attribute_type_id="biolink:incomplete_result_set", # TODO: request this as actual biolink item? value_type_id="metatype:Boolean", value=True, attribute_source="infores:rtx-kg2", description=f"This attribute indicates that not all " f"nodes/edges returned as answers for this input " f"curie were included in the final answer due to " f"size limitations. {max_edges_per_input_curie} " f"edges for this input curie were kept.")) # Then delete any nodes orphaned by removal of edges node_keys_used_by_edges = kg.get_all_node_keys_used_by_edges() for qnode_key, nodes in kg.nodes_by_qg_id.items(): orphan_node_keys = set(nodes).difference(node_keys_used_by_edges) if orphan_node_keys: log.debug(f"Removing {len(orphan_node_keys)} {qnode_key} nodes orphaned by the above step") for orphan_node_key in orphan_node_keys: del kg.nodes_by_qg_id[qnode_key][orphan_node_key] return kg @staticmethod def _answer_query_using_plover(qg: QueryGraph, log: ARAXResponse) -> Tuple[Dict[str, Dict[str, Union[set, dict]]], int]: rtxc = RTXConfiguration() rtxc.live = "Production" # First prep the query graph (requires some minor additions for Plover) dict_qg = qg.to_dict() dict_qg["include_metadata"] = True # Ask plover to return node/edge objects (not just IDs) dict_qg["respect_predicate_symmetry"] = True # Ignore direction for symmetric predicate, enforce for asymmetric # Allow subclass_of reasoning for qnodes with a small number of curies for qnode in dict_qg["nodes"].values(): if qnode.get("ids") and len(qnode["ids"]) < 5: if "allow_subclasses" not in qnode or qnode["allow_subclasses"] is None: qnode["allow_subclasses"] = True # Then send the actual query response = requests.post(f"{rtxc.plover_url}/query", json=dict_qg, timeout=60, headers={'accept': 'application/json'}) if response.status_code == 200: log.debug(f"Got response back from Plover") return response.json(), response.status_code else: log.warning(f"Plover returned a status code of {response.status_code}. Response was: {response.text}") return dict(), response.status_code def _load_plover_answer_into_object_model(self, plover_answer: Dict[str, Dict[str, Union[set, dict]]], log: ARAXResponse) -> QGOrganizedKnowledgeGraph: answer_kg = QGOrganizedKnowledgeGraph() # Load returned nodes into TRAPI object model for qnode_key, nodes in plover_answer["nodes"].items(): num_nodes = len(nodes) log.debug(f"Loading {num_nodes} {qnode_key} nodes into TRAPI object model") start = time.time() for node_key, node_tuple in nodes.items(): node = self._convert_kg2c_plover_node_to_trapi_node(node_tuple) answer_kg.add_node(node_key, node, qnode_key) log.debug(f"Loading {num_nodes} {qnode_key} nodes into TRAPI object model took " f"{round(time.time() - start, 2)} seconds") # Load returned edges into TRAPI object model for qedge_key, edges in plover_answer["edges"].items(): num_edges = len(edges) log.debug(f"Loading {num_edges} edges into TRAPI object model") start = time.time() for edge_key, edge_tuple in edges.items(): edge = self._convert_kg2c_plover_edge_to_trapi_edge(edge_tuple) answer_kg.add_edge(edge_key, edge, qedge_key) log.debug(f"Loading {num_edges} {qedge_key} edges into TRAPI object model took " f"{round(time.time() - start, 2)} seconds") return answer_kg @staticmethod def _convert_kg2c_plover_node_to_trapi_node(node_tuple: list) -> Node: node = Node(name=node_tuple[0], categories=eu.convert_to_list(node_tuple[1])) return node def _convert_kg2c_plover_edge_to_trapi_edge(self, edge_tuple: list) -> Edge: edge = Edge(subject=edge_tuple[0], object=edge_tuple[1], predicate=edge_tuple[2], attributes=[]) knowledge_sources = edge_tuple[3] # Indicate that this edge came from the KG2 KP edge.attributes.append(Attribute(attribute_type_id="biolink:aggregator_knowledge_source", value=self.kg2_infores_curie, value_type_id="biolink:InformationResource", attribute_source=self.kg2_infores_curie)) # Create knowledge source attributes for each of this edge's knowledge sources knowledge_source_attributes = [Attribute(attribute_type_id="biolink:knowledge_source", value=infores_curie, value_type_id="biolink:InformationResource", attribute_source=self.kg2_infores_curie) for infores_curie in knowledge_sources] edge.attributes += knowledge_source_attributes return edge @staticmethod def _get_input_qnode_key(one_hop_qg: QueryGraph) -> str: qedge = next(qedge for qedge in one_hop_qg.edges.values()) qnode_a_key = qedge.subject qnode_b_key = qedge.object qnode_a = one_hop_qg.nodes[qnode_a_key] qnode_b = one_hop_qg.nodes[qnode_b_key] if qnode_a.ids and qnode_b.ids: # Considering the qnode with fewer curies the 'input' is more efficient for querying Plover return qnode_a_key if len(qnode_a.ids) < len(qnode_b.ids) else qnode_b_key elif qnode_a.ids: return qnode_a_key else: return qnode_b_key
python
import pprint import cyok bit_file = 'foobar.bit' # load DLL cyok.load_library() # check version print('FrontPanel DLL built on: %s, %s' % cyok.get_version()) # connect to device dev = cyok.PyFrontPanel() print('Opening device connection.') dev.open_by_serial() print('Getting device information.') dev_info = dev.get_device_info() pprint.pprint(dev_info) print('Program FPGA with bit file.') dev.configure_fpga(bit_file) if not dev.is_front_panel_enabled(): raise ValueError('FrontPanel is not enabled on the device.') print('Closing device.') dev.close() # free DLL cyok.free_library()
python
import electrum from aiohttp import web from base import BaseDaemon class BTCDaemon(BaseDaemon): name = "BTC" electrum = electrum DEFAULT_PORT = 5000 daemon = BTCDaemon() app = web.Application() daemon.configure_app(app) daemon.start(app)
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models def migrate_HeatSensor(apps, schema_editor): HeatSensor = apps.get_model('heatcontrol', 'HeatSensor') HeatControl = apps.get_model('heatcontrol', 'HeatControl') HeatControlProfile = apps.get_model('heatcontrol', 'HeatControlProfile') for hs in HeatSensor.objects.select_related('sensor', 'daytype').all(): hc, created = HeatControl.objects.get_or_create(sensor=hs.sensor, defaults={'kp': 1, 'ki': 1, 'kd': 1}) HeatControlProfile.objects.create(heatcontrol=hc, daytype=hs.daytype, start=hs.start, end=hs.end, target_temp=hs.target_temp) def migrate_HeatSensorOverride(apps, schema_editor): HeatSensorOverride = apps.get_model('heatcontrol', 'HeatSensorOverride') HeatControl = apps.get_model('heatcontrol', 'HeatControl') HeatControlOverride = apps.get_model('heatcontrol', 'HeatControlOverride') for hso in HeatSensorOverride.objects.select_related('sensor').all(): hc, created = HeatControl.objects.get_or_create(sensor=hso.sensor, defaults={'kp': 1, 'ki': 1, 'kd': 1}) HeatControlOverride.objects.create(heatcontrol=hc, start=hso.start, end=hso.end, target_temp=hso.target_temp) class Migration(migrations.Migration): dependencies = [ ('heatcontrol', '0003_auto_20161204_0620'), ] operations = [ migrations.RunPython(migrate_HeatSensor), migrations.RunPython(migrate_HeatSensorOverride), ]
python
from __future__ import print_function from loguru import logger import io3d import io3d.datasets import sed3 import numpy as np import matplotlib.pyplot as plt logger.enable("io3d") logger.disable("io3d") import matplotlib.pyplot as plt from pathlib import Path import bodynavigation import exsu import sys import os import tensorflow as tf import os from skimage.transform import resize from skimage.io import imsave import numpy as np from skimage.segmentation import mark_boundaries from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose from tensorflow.keras.optimizers import Adam, SGD from tensorflow.keras.callbacks import ModelCheckpoint from tensorflow.keras import backend as K from tensorflow.keras.callbacks import History from skimage.exposure import rescale_intensity from skimage import io # from data import load_train_data, load_test_data from sklearn.utils import class_weight from typing import Optional from numbers import Number def window( data3d: np.ndarray, vmin: Optional[Number] = None, vmax: Optional[Number] = None, center: Optional[Number] = None, width: Optional[Number] = None, vmin_out: Optional[Number] = 0, vmax_out: Optional[Number] = 255, dtype=np.uint8): """ Rescale input ndarray and trim the outlayers. :param data3d: ndarray with numbers :param vmin: minimal input value. Skipped if center and width is given. :param vmax: maximal input value. Skipped if center and width is given. :param center: Window center :param width: Window width :param vmin_out: Output mapping minimal value :param vmax_out: Output mapping maximal value :param dtype: Output dtype :return: """ if width and center: vmin = center - (width / 2.) vmax = center + (width / 2.) # logger.debug(f"vmin={vmin}, vmax={vmax}") k = float(vmax_out - vmin_out) / (vmax - vmin) q = vmax_out - k * vmax # logger.debug(f"k={k}, q={q}") data3d_out = data3d * k + q data3d_out[data3d_out > vmax_out] = vmax_out data3d_out[data3d_out < vmin_out] = vmin_out return data3d_out.astype(dtype) import h5py import tensorflow as tf class generator: def __init__(self, label, organ_label, is_mask=False): self.label = label self.organ_label = organ_label self.is_mask=is_mask def __call__(self): fnimgs = Path(f'mask_{self.label}_{self.organ_label}') if self.is_mask else Path(f'img_{self.label}') for indx in range(len(fnimgs.glob("*.npy"))): fnimg = fnimgs / f"{indx:06d}.npy" img = np.load(fnimg) yield img # with h5py.File(self.file, 'r') as hf: # for im in hf["train_img"]: # imgs_train = np.load(f'imgs_train_{experiment_label}.npy') # yield im def load_train_data(experiment_label): imgs_train = np.load(f'imgs_train_{experiment_label}.npy') masks_train = np.load(f'masks_train_{experiment_label}.npy') return imgs_train, masks_train def load_test_data(experiment_label): imgs_test = np.load(f'imgs_test_{experiment_label}.npy') masks_test = np.load(f'masks_test_{experiment_label}.npy') return imgs_test, masks_test def get_dataset_loaders(label, organ_label): imgs = tf.data.Dataset.from_generator( generator(label, organ_label, is_mask=False), tf.uint8, tf.TensorShape([512, 512, 3])) masks = tf.data.Dataset.from_generator( generator(label, organ_label, is_mask=True), tf.uint8, tf.TensorShape([512, 512, 3])) return imgs, masks def create_train_data(label="train", datasets=None, dataset_label="", organ_label="rightkidney", skip_if_exists=True): # fnimgs = f'imgs_{label}_{dataset_label}.npy' # fnmasks =f'masks_{label}_{dataset_label}.npy' fnimgs = Path(f'img_{label}_{dataset_label}') fnmasks =Path(f'mask_{label}_{dataset_label}_{organ_label}') fnpattern = "{dataset}_{i:02d}_{k:05d}.npy" p_imgs = fnimgs p_masks =fnmasks # if p_imgs.exists() and p_imgs.is_dir() and p_masks.exists() and p_masks.is_dir() and skip_if_exists: # logger.info("Files exists. Skipping creation and loading instead.") # # imgs_train = np.load(fnimgs) # # masks_train = np.load(fnmasks) if True: # imgs_train = [] # masks_train = [] if not datasets: datasets = { "3Dircadb1": {"start": 1, "stop": 2}, # "sliver07": {"start":0, "stop":0} } indx = 0 for dataset in datasets: for i in range( datasets[dataset]["start"], datasets[dataset]["stop"] ): logger.debug(f"{dataset} {i}") fn0 = fnpattern.format(dataset=dataset, i=i, k=0) if not (fnmasks / fn0).exists(): # logger.info(f"File {fn0} exists. Skipping") # continue segm3dp = io3d.datasets.read_dataset(dataset, organ_label, i) if segm3dp is None: print(f" Organ label '{organ_label}' does not exist. Skipping.") continue for k in range(segm3dp.data3d.shape[0]): np.save(fnmasks / fnpattern.format(dataset=dataset, i=i, k=k) , segm3d[k]) if not (fnimgs / fn0).exists(): data3dp = io3d.datasets.read_dataset(dataset, "data3d", i) data3d = window(data3dp["data3d"], center=40, width=400, vmin_out=0, vmax_out=255, dtype=np.uint8) segm3d = segm3dp["data3d"] bn = bodynavigation.body_navigation.BodyNavigation(data3dp["data3d"], voxelsize_mm=data3dp["voxelsize_mm"]) feature_list = [ data3d, bn.dist_to_sagittal(), bn.dist_coronal(), bn.dist_to_diaphragm_axial(), bn.dist_to_surface(), ] # print(f"shapes: data3d={data3d.shape}, dst={dst.shape}") # for j in range(0, data3d.shape[0]): # imgs_train.append(np.stack([data3d[j, :, :], feature_list[0][j, :, :]], axis=2)) # masks_train.append(segm3d[j, :, :]) all_features = expand_dims_and_concat(feature_list, 3) for k in range(all_features.shape[0]): fnimgs.mkdir(parents=True, exist_ok=True) fnmasks.mkdir(parents=True, exist_ok=True) np.save(fnimgs / fnpattern.format(dataset=dataset, i=i, k=k), all_features[k]) indx += 1 logger.debug(f"i={i}, {all_features.shape}") # imgs_train = np.array(imgs_train, dtype=np.int16) # masks_train = np.array(masks_train, dtype=np.uint8) # np.save(fnimgs, imgs_train) # np.save(fnmasks, masks_train) # print(f'Saving to .npy files done. imgs.shape={imgs_train.shape}, masks.shape={masks_train.shape}') # return imgs_train, masks_train def dice_coef(y_true, y_pred): smooth = 1. y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) def dice_coef_loss(y_true, y_pred): return -dice_coef(y_true, y_pred) # The functions return our metric and loss # %% # one_weight = (1-num_of_ones)/(num_of_ones + num_of_zeros) # zero_weight = (1-num_of_zeros)/(num_of_ones + num_of_zeros) def weighted_binary_crossentropy(zero_weight, one_weight): def weighted_binary_crossentropy(y_true, y_pred): b_ce = K.binary_crossentropy(y_true, y_pred) # weighted calc weight_vector = y_true * one_weight + (1 - y_true) * zero_weight weighted_b_ce = weight_vector * b_ce return K.mean(weighted_b_ce) return weighted_binary_crossentropy def save_segmentations(imgs_test, imgs_mask_test, pred_dir='preds'): print(f"shapes={imgs_test.shape},{imgs_mask_test.shape}") if not os.path.exists(pred_dir): os.mkdir(pred_dir) for k in range(len(imgs_mask_test)): a = rescale_intensity(imgs_test[k][:, :], out_range=(-1, 1)) b = (imgs_mask_test[k][:, :] > 0.5).astype('uint8') io.imsave(os.path.join(pred_dir, f'{k:05}_pred.png'), mark_boundaries(a, b)) # nb_channels = 2 class UNetTrainer(): def __init__(self, nb_channels, img_rows, img_cols, experiment_label, organ_label): self.nb_channels = nb_channels self.img_rows = img_rows self.img_cols = img_cols self.experiment_label = experiment_label self.organ_label = organ_label pass def get_unet(self, weights=None): if weights is None: weights = [0.05956, 3.11400] # {0: 0.5956388648542532, 1: 3.1140000760253925} inputs = Input((self.img_rows, self.img_cols, self.nb_channels)) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs) conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1) conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2) conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3) conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4) conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5) up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6) conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6) up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7) conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7) up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8) conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8) up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9) conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9) conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9) # conv10 = Conv2D(2, (1, 1), activation='softmax')(conv9) model = Model(inputs=[inputs], outputs=[conv10]) # model.compile(optimizer=Adam(lr=1e-3), loss=dice_coef_loss, metrics=[dice_coef]) # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[dice_coef, "accuracy"]) model.compile(optimizer='adam', loss=weighted_binary_crossentropy(weights[0], weights[1]), metrics=[dice_coef, "accuracy"]) # model.compile(optimizer='adam', loss=weighted_binary_crossentropy(weights[0], weights[1]), metrics=[dice_coef, "accuracy"]) # categorical crossentropy (weighted) return model # The different layers in our neural network model (including convolutions, maxpooling and upsampling) # %% def preprocess(self, imgs, is_mask=False): new_shape = list(imgs.shape).copy() new_shape[1] = self.img_rows new_shape[2] = self.img_cols # imgs_p = np.ndarray((imgs.shape[0], img_rows, img_cols, imgs.shape[3]), dtype=np.uint8) imgs_p = np.ndarray(new_shape, dtype=np.uint8) for i in range(imgs.shape[0]): imgs_p[i] = resize(imgs[i], new_shape[1:], preserve_range=True) # imgs_p[i] = resize(imgs[i, 0 ], (img_cols, img_rows), preserve_range=True) # imgs_p = imgs_p[..., np.newaxis] if is_mask: imgs_p = (imgs_p > 0).astype('float32') else: imgs_p = imgs_p.astype('float32') return imgs_p # We adapt here our dataset samples dimension so that we can feed it to our network # %% # %% def train_and_predict(self, continue_training=False, epochs=50, step=1): # if True: print('-' * 30) print('Loading and preprocessing train data...') print('-' * 30) experiment_label = self.experiment_label # imgs_train, imgs_mask_train = load_train_data(self.experiment_label) imgs_train, imgs_mask_train = get_dataset_loaders("train", self.organ_label) imgs_train = imgs_train[::step] imgs_mask_train = imgs_mask_train[::step] logger.debug(f"imgs_train.shape={imgs_train.shape}") logger.debug(f"imgs_mask_train.shape={imgs_mask_train.shape}") imgs_train = self.preprocess(imgs_train) imgs_mask_train = self.preprocess(imgs_mask_train, is_mask=True) logger.debug(f"imgs_train.shape={imgs_train.shape}") logger.debug(f"imgs_mask_train.shape={imgs_mask_train.shape}") # TODO remove - using small part of dataset # imgs_train = imgs_train[50:65] # imgs_mask_train = imgs_mask_train[50:65] # imgs_train = imgs_train.astype('float32') # mean = np.mean(imgs_train) # mean for data centering # std = np.std(imgs_train) # std for data normalization # imgs_train -= mean # imgs_train /= std # Normalization of the train set # imgs_mask_train = (imgs_mask_train > 0).astype('float32') y_train = imgs_mask_train # Calculate the weights for each class so that we can balance the data cl_weights = class_weight.compute_class_weight( 'balanced', np.unique(y_train.flatten()), y_train.flatten() ) print(f"weights={cl_weights}") cl_weights_dct = dict(enumerate(cl_weights)) print('-' * 30) print('Creating and compiling model...') print('-' * 30) model = self.get_unet(cl_weights) if continue_training: model.load_weights(f'weights_{experiment_label}.h5') model_checkpoint = ModelCheckpoint(f'weights_{experiment_label}.h5', monitor='val_loss', save_best_only=True) # Saving the weights and the loss of the best predictions we obtained print('-' * 30) print('Fitting model...') print('-' * 30) log_dir = f'logs\\{experiment_label}\\' # Path(log_dir).mkdir(parents=True, exist_ok=True) model.fit_generator() history = model.fit( imgs_train, imgs_mask_train, batch_size=10, epochs=epochs, verbose=1, shuffle=True, validation_split=0.2, callbacks=[ model_checkpoint, tf.keras.callbacks.TensorBoard(log_dir=log_dir) ], # class_weight=weights_dct # tohle nefunguje pro 4d data ) # predict_test_data(mean=None, std=None) self.predict_test_data(history) return history def predict_test_data(self, history): print('-' * 30) print('Loading and preprocessing test data...') print('-' * 30) # imgs_test, imgs_maskt = load_test_data(self.experiment_label) imgs_test, imgs_maskt = get_dataset_loaders("test", self.organ_label) imgs_test = self.preprocess(imgs_test) imgs_maskt = self.preprocess(imgs_maskt, is_mask=True) y_train = imgs_maskt # Calculate the weights for each class so that we can balance the data cl_weights = class_weight.compute_class_weight( 'balanced', np.unique(y_train.flatten()), y_train.flatten() ) model = self.get_unet(cl_weights) # TODO remove this limit # imgs_test = imgs_test[50:65] # imgs_maskt = imgs_maskt[50:65] imgs_test = imgs_test.astype('float32') # imgs_test -= mean # imgs_test /= std # Normalization of the test set # TODO remove this part # going to test on train set # imgs_test = imgs_train # imgs_maskt = imgs_mask_train print('-' * 30) print('Loading saved weights...') print('-' * 30) model.load_weights(f'weights_{self.experiment_label}.h5') print('-' * 30) print('Predicting masks on test data...') print('-' * 30) imgs_mask_test = model.predict(imgs_test, verbose=1) np.save('imgs_mask_test.npy', imgs_mask_test) print('-' * 30) print('Saving predicted masks to files...') print('-' * 30) pred_dir = f"preds/{self.experiment_label}" Path(pred_dir).mkdir(parents=True, exist_ok=True) # Saving our predictions in the directory 'preds' logger.debug(f"imgs_test.shape={imgs_test.shape}") logger.debug(f"imgs_mask_test.shape={imgs_mask_test.shape}") # save_segmentations(imgs_test[:, :, :, 0, 0], imgs_mask_test[:, :, :, 0], pred_dir=pred_dir) save_segmentations(imgs_test[:, :, :, 0], imgs_mask_test[:, :, :, 0], pred_dir=pred_dir) plt.plot(history.history['dice_coef']) plt.plot(history.history['val_dice_coef']) plt.title('Model dice coeff') plt.ylabel('Dice coeff') plt.xlabel('Epoch') plt.legend(['Train', 'Test'], loc='upper left') plt.show() # plotting our dice coeff results in function of the number of epochs def load_batch(): pass def expand_dims_and_concat(larr:np.ndarray, axis:int): larr = list(map(lambda x: np.expand_dims(x,axis), larr)) arr = np.concatenate(larr, axis=axis) return arr
python
""" -*- test-case-name: PyHouse.Modules.Computer.Mqtt.test.test_computer -*- @name: PyHouse/src/Modules/Computer/Mqtt/mqtt_client.py @author: D. Brian Kimmel @contact: [email protected] @copyright: (c) 2015-2016 by D. Brian Kimmel @license: MIT License @note: Created on Jun 5, 2015 @Summary: Connect this computer node to the household Mqtt Broker. """ # Import system type stuff import copy import datetime from twisted.internet import defer # from twisted.internet.endpoints import SSL4ClientEndpoint # from twisted.internet.ssl import Certificate, optionsForClientTLS # Import PyMh files and modules. from Modules.Core.data_objects import NodeData, MqttInformation, MqttJson from Modules.Computer.Mqtt.mqtt_actions import Actions from Modules.Computer.Mqtt.mqtt_protocol import PyHouseMqttFactory from Modules.Computer.Mqtt.mqtt_xml import Xml as mqttXML from Modules.Utilities import json_tools, xml_tools from Modules.Computer import logging_pyh as Logger LOG = Logger.getLogger('PyHouse.Mqtt_Client ') PEM_FILE = '/etc/pyhouse/ca_certs/rootCA.pem' class Struct: def __init__(self, **args): self.__dict__.update(args) class Util(object): """ """ def connect_to_one_broker_TCP(self, p_pyhouse_obj, p_broker): l_clientID = 'PyH-' + p_pyhouse_obj.Computer.Name l_host = p_broker.BrokerAddress l_port = p_broker.BrokerPort l_username = None # p_broker.UserName l_password = None # p_broker.Password p_broker._ClientAPI = self LOG.info('Connecting via TCP...') if l_host is None or l_port is None: LOG.error('Bad Mqtt broker Address: {} or Port: {}'.format(l_host, l_port)) p_broker._ProtocolAPI = None else: l_factory = PyHouseMqttFactory(p_pyhouse_obj, l_clientID, p_broker, l_username, l_password) _l_connector = p_pyhouse_obj.Twisted.Reactor.connectTCP(l_host, l_port, l_factory) LOG.info('TCP Connected to broker: {}; Host:{}'.format(p_broker.Name, l_host)) pass @defer.inlineCallbacks def connect_to_one_broker_TLS(self, _p_pyhouse_obj, _p_broker): # l_host = p_broker.BrokerAddress # l_port = p_broker.BrokerPort # l_username = p_broker.UserName # l_password = p_broker.Password # l_clientID = 'PyH-' + p_pyhouse_obj.Computer.Name LOG.info('Connecting via TLS...') # l_factory = protocol.Factory.forProtocol(echoclient.EchoClient) # l_factory = PyHouseMqttFactory(p_pyhouse_obj, l_clientID, p_broker, l_username, l_password) # l_certData = PEM_FILE.getContent() # l_authority = Certificate.loadPEM(l_certData) # l_options = optionsForClientTLS(l_host.decode('utf-8'), l_authority) # l_endpoint = SSL4ClientEndpoint(p_pyhouse_obj.Twisted.Reactor, l_host, l_port, l_options) # l_client = yield l_endpoint.connect(l_factory) l_done = defer.Deferred() # l_client.connectionLost = lambda reason: l_done.callback(None) yield l_done def connect_to_all_brokers(self, p_pyhouse_obj): """ This will create a connection for each active broker in the config file. These connections will automatically reconnect if the connection is broken (broker reboots e.g.) """ l_count = 0 for l_broker in p_pyhouse_obj.Computer.Mqtt.Brokers.itervalues(): if not l_broker.Active: continue if l_broker.BrokerPort < 2000: self.connect_to_one_broker_TCP(p_pyhouse_obj, l_broker) else: self.connect_to_one_broker_TLS(p_pyhouse_obj, l_broker) l_count += 1 LOG.info('TCP Connected to {} Broker(s).'.format(l_count)) return l_count @staticmethod def _make_topic(p_pyhouse_obj, p_topic): l_topic = p_pyhouse_obj.Computer.Mqtt.Prefix + p_topic return l_topic @staticmethod def _make_message(p_pyhouse_obj, p_message = None): """ @param p_pyhouse_obj: is the entire PyHouse Data tree. @param message_json: is message that is already json encoded\ @param message_obj: is additional object that will be added into the meddage as Json. """ l_message = MqttJson() l_message.Sender = p_pyhouse_obj.Computer.Name l_message.DateTime = datetime.datetime.now() if p_message is None: pass elif isinstance(p_message, object): xml_tools.stuff_new_attrs(l_message, p_message) else: xml_tools.stuff_new_attrs(l_message, p_message) # print(PrettyFormatAny.form(l_message, 'Mqtt Client - Message')) l_json = json_tools.encode_json(l_message) return l_json class API(Util): """This interfaces to all of PyHouse. """ def __init__(self, p_pyhouse_obj): self.m_pyhouse_obj = p_pyhouse_obj p_pyhouse_obj.APIs.Computer.MqttAPI = self p_pyhouse_obj.Computer.Mqtt = MqttInformation() p_pyhouse_obj.Computer.Mqtt.Prefix = 'ReSeT' p_pyhouse_obj.Computer.Mqtt.Brokers = {} LOG.info("Initialized.") def LoadXml(self, p_pyhouse_obj): """ Load the Mqtt xml info. """ # LOG.info("Loading XML") l_mqtt = MqttInformation() l_mqtt.Prefix = p_pyhouse_obj.Computer.Name l_mqtt.Brokers = mqttXML.read_mqtt_xml(p_pyhouse_obj) p_pyhouse_obj.Computer.Mqtt.Brokers = l_mqtt.Brokers LOG.info("Loaded {} Brokers".format(len(l_mqtt.Brokers))) if p_pyhouse_obj.Computer.Mqtt.Brokers != {}: # LOG.info('Connecting to all MQTT Brokers.') l_count = self.connect_to_all_brokers(p_pyhouse_obj) LOG.info("Mqtt {} broker(s) Started.".format(l_count)) else: LOG.info('No Mqtt brokers are configured.') LOG.info("Loaded XML") return l_mqtt def Start(self): """ if self.m_pyhouse_obj.Computer.Mqtt.Brokers != {}: LOG.info('Connecting to all MQTT Brokers.') l_count = self.connect_to_all_brokers(self.m_pyhouse_obj) LOG.info("Mqtt {} broker(s) Started.".format(l_count)) else: LOG.info('No Mqtt brokers are configured.') """ pass def SaveXml(self, p_xml): l_xml = mqttXML().write_mqtt_xml(self.m_pyhouse_obj.Computer.Mqtt.Brokers) p_xml.append(l_xml) LOG.info("Saved Mqtt XML.") return p_xml def Stop(self): LOG.info("Stopped.") # ## The following are public commands that may be called from everywhere def MqttPublish(self, p_topic, p_message): """Send a topic, message to the broker for it to distribute to the subscription list # self.m_pyhouse_obj.APIs.Computer.MqttAPI.MqttPublish("schedule/execute", l_schedule) @param p_topic: is the partial topic, the prefix will be prepended. @param message_json : is the JSON message we want to send @param message_obj: is an additional object that we will convert to JSON and merge it into the message. """ l_topic = Util._make_topic(self.m_pyhouse_obj, p_topic) l_message = Util._make_message(self.m_pyhouse_obj, p_message) for l_broker in self.m_pyhouse_obj.Computer.Mqtt.Brokers.itervalues(): if not l_broker.Active: continue try: l_broker._ProtocolAPI.publish(l_topic, l_message) LOG.info('Mqtt publishing:\n\tBroker: {}\t\tTopic:{}\n'.format(l_broker.Name, l_topic)) except AttributeError as e_err: LOG.error("Mqtt Unpublished.\n\tERROR:{}\n\tTopic:{}\n\tMessage:{}\n".format(e_err, l_topic, l_message)) def MqttDispatch(self, p_topic, p_message): """Dispatch a received MQTT message according to the topic. TODO: This needs protection from poorly formed Mqtt messages. """ l_topic = p_topic.split('/')[2:] # Drop the pyhouse/housename/ as that is all we subscribed to. l_message = json_tools.decode_json_unicode(p_message) l_logmsg = Actions(self.m_pyhouse_obj).mqtt_dispatch(l_topic, l_message) LOG.info(l_logmsg) def doPyHouseLogin(self, p_client, p_pyhouse_obj): """Login to PyHouse via MQTT """ self.m_client = p_client l_name = p_pyhouse_obj.Computer.Name try: l_node = copy.deepcopy(p_pyhouse_obj.Computer.Nodes[l_name]) except (KeyError, TypeError): l_node = NodeData() l_node.NodeInterfaces = {} # self.MqttPublish('computer/startup', l_node) # ## END DBK
python
import torch.nn as nn from ..builder import VQA_MODELS, build_backbone, build_encoder, build_head @VQA_MODELS.register_module() class VISDIALPRINCIPLES(nn.Module): def __init__(self, vocabulary_len, word_embedding_size, encoder, backbone, head): super().__init__() self.embedding_model = nn.Embedding(vocabulary_len, word_embedding_size, padding_idx=0) self.encoder_model = build_encoder(encoder) self.backbone = build_backbone(backbone) self.head = build_head(head) # 包括 classification head, generation head def forward(self, data): img = data['img_feat'] ques = data['ques'] his = data['hist'] batch_size, rnd, max_his_length = his.size() cap = his[:, 0, :] ques_len = data['ques_len'] hist_len = data['hist_len'] cap_len = hist_len[:, 0] ques_embed = self.embedding_model(ques) cap_emb = self.embedding_model(cap.contiguous()) his = his.contiguous().view(-1, max_his_length) his_embed = self.embedding_model(his) q_output, c_output, his_feat = self.encoder_model(ques_embed, ques_len, cap_emb, cap_len, his_embed, hist_len) ques_location = ques_len.view(-1).cpu().numpy() - 1 ques_encoded = q_output[range(batch_size), ques_location, :] cap_location = cap_len.view(-1).cpu().numpy() - 1 cap_encoded = c_output[range(batch_size), cap_location, :] his_feat = his_feat.view(batch_size, rnd, -1) fuse_feat = self.backbone(ques_encoded, cap_encoded, his_feat, q_output, c_output, ques_len, cap_len, ques_embed, cap_emb, img, batch_size) scores = self.head(fuse_feat, data) return scores
python
import numpy as np from mchap import mset from mchap.assemble import inheritence def test_gamete_probabilities__hom(): genotypes = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], np.int8) probabilities = np.array([1]) gametes_expect = np.array([[[0, 0, 0], [0, 0, 0]]], np.int8) probs_expect = np.array([1]) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_gamete_probabilities__het(): genotypes = np.array([[[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1]]], np.int8) probabilities = np.array([1]) gametes_expect = np.array( [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]], np.int8, ) probs_expect = np.array([1 / 6, 4 / 6, 1 / 6]) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_gamete_probabilities__distribution(): genotypes = np.array( [ [[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]], [[0, 0, 0], [0, 0, 0], [1, 1, 1], [1, 1, 1]], [[0, 0, 0], [0, 0, 0], [0, 1, 1], [1, 1, 1]], ], np.int8, ) probabilities = np.array([0.6, 0.3, 0.1]) gametes_expect = np.array( [ [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]], [[0, 0, 0], [0, 1, 1]], [[0, 1, 1], [1, 1, 1]], ], dtype=np.int8, ) probs_expect = np.array( [ (0.6 * 3 / 6) + (0.3 * 1 / 6) + (0.1 * 1 / 6), (0.6 * 3 / 6) + (0.3 * 4 / 6) + (0.1 * 2 / 6), (0.6 * 0 / 6) + (0.3 * 1 / 6) + (0.1 * 0 / 6), (0.6 * 0 / 6) + (0.3 * 0 / 6) + (0.1 * 2 / 6), (0.6 * 0 / 6) + (0.3 * 0 / 6) + (0.1 * 1 / 6), ] ) gametes_actual, probs_actual = inheritence.gamete_probabilities( genotypes, probabilities, ) assert mset.equal(gametes_expect, gametes_actual) np.testing.assert_array_equal(probs_expect, probs_actual) def test_cross_probabilities__hom_x_het(): maternal_gametes = np.array([[[0, 0, 0], [0, 0, 0]]], np.int8) maternal_probs = np.array([1]) maternal_probs = np.array([1]) paternal_gametes = np.array( [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [1, 1, 1]]], np.int8 ) paternal_probs = np.array([0.5, 0.5]) genotypes_expect = np.array( [ [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 1, 1]], ], dtype=np.int8, ) probs_expect = np.array([0.5, 0.5]) genotypes_actual, probs_actual = inheritence.cross_probabilities( maternal_gametes, maternal_probs, paternal_gametes, paternal_probs, ) assert mset.equal(genotypes_expect, genotypes_actual) np.testing.assert_array_equal(probs_expect, probs_actual)
python
# Copyright 2019 New Relic, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import setup # Best practice: package name should be prefixed with `newrelic_extension_` INSTRUMENTED_PACKAGE = "sampleproject" PACKAGE_NAME = "newrelic_extension_{}".format(INSTRUMENTED_PACKAGE) HOOKS = [ # package_to_intercept = instrumentation_hook "sample = {}.example:instrument".format(PACKAGE_NAME) ] setup( name=PACKAGE_NAME, version="0.1", packages=[PACKAGE_NAME], package_dir={PACKAGE_NAME: "src"}, entry_points={"newrelic.hooks": HOOKS}, license="Apache-2.0", classifiers=["License :: OSI Approved :: Apache Software License"], install_requires=[ "newrelic", # Always require the package being instrumented INSTRUMENTED_PACKAGE, ], )
python
import altair as alt from data import get_nullity_matrix_data def nullity_matrix_chart( data, keep_original_col_order=True, show_tooltip=False, threshold=0.5, h=400 ): nm_data, n_rows = get_nullity_matrix_data(data) text_font_size = 10 base = alt.Chart(nm_data, height=h) chart = base.mark_rect(cursor="context-menu" if show_tooltip else "default").encode( x=alt.X( "column:N", sort=None if keep_original_col_order else "ascending", axis=alt.Axis( orient="top", labelAngle=-90, labelColor="#44475A", domain=False, tickColor="transparent", title=None, ), ), y=alt.Y( "row:Q", axis=alt.Axis( grid=False, domain=False, tickColor="transparent", labelColor="#44475A", title=None, values=[0, n_rows], ), scale=alt.Scale(nice=False, domain=[n_rows, 0]), ), color=alt.Color( "isnull:N", legend=None, scale=alt.Scale(domain=[True, False], range=["white", "#44475A"]), ), ) if show_tooltip: chart = chart.encode( tooltip=[ alt.Tooltip("row:Q", title="Row"), alt.Tooltip("isnull:N", title="Null value?"), alt.Tooltip("column:N", title="Column"), alt.Tooltip( "percentage_null_values_per_column:Q", format=".2~%", title="% of null values in this column", ), ] ) # Altair/Vega-Lite: # Default `labelFontSize` = 10 # Default `tickSize` = 5 # Default `labelPadding` = 2 # Default `translate` = 0.5 text = base.mark_text( baseline="middle", align="right", fontSize=text_font_size, angle=270 ).encode( x=alt.X("column:N"), y=alt.value(h + (text_font_size / 2) + 5 + 2 + 0.5), text=alt.Text("percentage_null_values_per_column:Q", format=".2~%"), color=alt.condition( f"datum.percentage_null_values_per_column > {threshold}", alt.value("#E84A5F"), alt.value("#44475A"), ), ) return ( alt.layer(chart, text) .configure_view(strokeWidth=0) .configure_scale(bandPaddingInner=0.1) )
python
#!encoding=utf-8 from textblob import TextBlob import os, sys, re def textblob_process(line): blob = TextBlob(line) return blob.tags def process_tag_result(tag_res): nps = [] i = 0 while i < len(tag_res): while i < len(tag_res) and not tag_res[i][1].startswith('NN'): i += 1 np = [] while i < len(tag_res) and (tag_res[i][1] == 'NN' or tag_res[i][1] == 'NNS' or tag_res[i][1] == 'NNP'): np.append(tag_res[i][0]) i += 1 if len(np) == 1 and tag_res[i-2][1] == 'JJ': np.insert(0, tag_res[i-2][0]) nps.append(" ".join(np)) i += 1 return nps def is_valid_np(np): if re.search(r'\d+', np): return False if not re.match(r'\w+', np): return False for brand in BRANDS: if np.find(brand) >=0: return False if np.find('/') >= 0: return False for token in np.split(' '): if len(token) <= 2: return False if token[-1] == u'®' or token[-1] == u'™': return False return True def extract(line): nps = list() tag_res = textblob_process(line) nps.extend(process_tag_result(tag_res)) return nps if __name__ == '__main__': s = "Lower cut design with a square shaped neckline" print extract_np(s)
python
from ..crypto import Nonce from . import constants from io import BytesIO from datetime import datetime import binascii import struct import base58 import json FIELDS = { 'i64le': [8, '<q'], 'i64be': [8, '>q'], 'u64le': [8, '<Q'], 'u64be': [8, '>Q'], 'i32le': [4, '<i'], 'i32be': [4, '>i'], 'u32le': [4, '<I'], 'u32be': [4, '>I'], 'u16le': [2, '<H'], 'u16be': [2, '>H'], 'u8le': [1, '<B'], 'u8be': [1, '>B'], 'bool': [1, '?'] } class EncoderInstance: ''' This class keep decoded data ''' def __init__(self, encoder, fields, data, tag, dynamic): self.fields = fields self.data = data self.tag = tag self.dynamic = dynamic self.encoder = encoder def __repr__(self): return str(self.data) def __str__(self): s = self.encoder.name if 'messages' in self.data: s += ' [ ' for m in self.data['messages']: s += str(m) + ' ' s += ']' return s def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def __iter__(self): if 'messages' in self.data: return iter(self.data['messages']) else: return None def encoder(self): return self.encoder def encoder_name(self): return self.encoder.name def serialize(self, skipSize=False): bio = BytesIO() if type(self.fields) == list: fields = self.fields else: fields = [ self.fields ] for f in fields: if f['name'] == 'noname': fdata = self.data else: fdata = self.data[f['name']] if type(f['type']) != str: bio.write(fdata.serialize()) elif f['type'] == 'bytes': if f['length'] == 'dynamic': bio.write(struct.pack('>I', len(fdata))) bio.write(binascii.unhexlify(fdata)) elif f['type'] == 'nonce': bio.write(fdata.get()) elif f['type'] == 'time': ff = FIELDS['i64be'] bio.write(struct.pack(ff[1], int(fdata.timestamp()))) elif f['type'] == 'string': bio.write(struct.pack('>H', len (fdata))) bio.write(fdata.encode('ascii')) elif f['type'] == 'hash' and f['of'] == 'block': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['b'])::]) elif f['type'] == 'hash' and f['of'] == 'chain_id': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['Net'])::]) elif f['type'] == 'hash' and f['of'] == 'context': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['Co'])::]) elif f['type'] == 'hash' and f['of'] == 'operationlist': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['LLo'])::]) elif f['type'] == 'hash' and f['of'] == 'operation': bio.write(base58.b58decode_check(fdata)[len(constants.PREFIXES['o'])::]) elif f['type'] == 'list': bio.write(struct.pack('>H', len(fdata) - 1)) for lelem in fdata: if type(f['of']) == str: ff = FIELDS[f['of']] bio.write(struct.pack(ff[1], lelem)) else: bio.write(lelem.serialize()) elif f['type'] == 'tlist': bio.write(struct.pack('>H', len(fdata) - 1)) for lelem in fdata: elser = lelem.serialize() bio.write(struct.pack('>H', len(elser) + 2)) bio.write(struct.pack('>H', int(lelem.tag, 16))) bio.write(elser) else: bio.write(struct.pack(FIELDS[f['type']][1], fdata)) bio.seek(0) data = bio.read() if self.dynamic and not skipSize: osize = struct.pack('>I', len(data)) return osize + data else: return data class Encoder: def __init__(self, name, fields, tag = None, instance = None, dynamic=False): self.name = name self.fields = fields self.tag = tag self.dynamic = dynamic if instance: self.instance = instance else: self.instance = EncoderInstance def __repr__(self): return str(self) def __str__(self): return self.name def from_data(self, data): parsed = {} for f in self.fields: parsed[f['name']] = data[f['name']] return self.instance(self, self.fields, parsed, self.tag, self.dynamic) def parse(self, data, skipSize=False): parsed = {} if data.__class__ == bytes: bio = BytesIO(data) else: bio = data if self.dynamic and not skipSize: osize = struct.unpack('>I', bio.read(4))[0] data2 = bio.read(osize) bio = BytesIO(data2) elif self.dynamic and skipSize: osize = len(data) if type(self.fields) == list: fields = self.fields else: fields = [ self.fields ] ptell = bio.tell() for f in fields: if not ('name' in f): f['name'] = 'noname' if type(f['type']) != str: parsed[f['name']] = f['type'].parse(bio) elif f['type'] == 'bytes': if self.dynamic and len(fields) == 1: l = osize elif f['length'] == 'dynamic': l = struct.unpack('>I', bio.read(4))[0] else: l = f['length'] parsed[f['name']] = binascii.hexlify(bio.read(l)) elif f['type'] == 'nonce': parsed[f['name']] = Nonce.from_bin(bio.read(24)) elif f['type'] == 'time': ff = FIELDS['i64be'] parsed[f['name']] = datetime.fromtimestamp(struct.unpack(ff[1], bio.read(ff[0]))[0]) elif f['type'] == 'string': l = struct.unpack('>H', bio.read(2))[0] parsed[f['name']] = bio.read(l).decode('ascii') elif f['type'] == 'hash' and f['of'] == 'block': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['b'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'chain_id': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['Net'] + bio.read(4)) elif f['type'] == 'hash' and f['of'] == 'context': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['Co'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'operationlist': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['LLo'] + bio.read(32)) elif f['type'] == 'hash' and f['of'] == 'operation': parsed[f['name']] = base58.b58encode_check(constants.PREFIXES['o'] + bio.read(32)) elif f['type'] == 'list': l = struct.unpack('>H', bio.read(2))[0] ll = [] for i in range(l + 1): if type(f['of']) == str: ff = FIELDS[f['of']] ll.append(struct.unpack(ff[1], bio.read(ff[0]))[0]) else: ll.append(f['of'].parse(bio)) parsed[f['name']] = ll # Tagged list, a list where elements are tags of other types elif f['type'] == 'tlist': l = struct.unpack('>H', bio.read(2))[0] ll = [] for i in range(l + 1): # Read the type elsize = struct.unpack('>H', bio.read(2))[0] t = hex(struct.unpack('>H', bio.read(2))[0]) # Get the data if t in f['of']: ll.append (f['of'][t].parse(bio)) else: bio.read(elsize - 2) # skip data if message is not recognized parsed['messages'] = ll else: ff = FIELDS[f['type']] parsed[f['name']] = struct.unpack(ff[1], bio.read(ff[0]))[0] if type(self.fields) != list: parsed = parsed[self.fields['name']] #ptell_end = bio.tell() return self.instance(self, self.fields, parsed, self.tag, self.dynamic)
python
#---------- #author:someone120 #---------- import pypinyin as py import lxml import sqlite3 as sql from urllib import request as url #导包结束 def run(): print(get(1).decode('gbk')) def get(num): """ num为页码 """ header={ 'User-Agent':'Mozilla/5.0 (Linux; Android 8.1.0; Redmi 5 Build/OPM1.171019.026; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/75.0.3770.143 Mobile Safari/537.36' } resp=url.Request('http://www.hydcd.com/cy/chengyu/cy%s.htm'%(str(num).zfill(5)),headers=header) resp=url.urlopen(resp) return resp.read() if (__name__ == '__main__'): run()
python
# pylint: disable=not-callable # pylint: disable=no-member import torch import torch.nn as nn from torch.nn import functional as F class RecurrentDynamics(nn.Module): def __init__( self, hidden_size, state_size, action_size, node_size, embedding_size, act_fn="relu", min_std=0.01, ): super().__init__() self.act_fn = getattr(torch, act_fn) self.min_std = min_std self.fc_embed_state_action = nn.Linear(state_size + action_size, hidden_size) self.rnn = nn.GRUCell(hidden_size, hidden_size) self.fc_embed_prior = nn.Linear(hidden_size, node_size) self.fc_prior = nn.Linear(node_size, 2 * state_size) self.fc_embed_posterior = nn.Linear(hidden_size + embedding_size, node_size) self.fc_posterior = nn.Linear(node_size, 2 * state_size) def forward(self, prev_hidden, prev_state, actions, encoder_output=None, non_terms=None): """ prev_hidden (batch, hidden_size) prev_state (batch, hidden_size) actions (seq_len, batch, hidden_size) encoder_output (seq_len, batch, hidden_size) non_terms (seq_len, batch, hidden_size) """ T = actions.size(0) + 1 hiddens = [torch.empty(0)] * T prior_states = [torch.empty(0)] * T prior_means = [torch.empty(0)] * T prior_stds = [torch.empty(0)] * T posterior_states = [torch.empty(0)] * T posterior_means = [torch.empty(0)] * T posterior_stds = [torch.empty(0)] * T hiddens[0] = prev_hidden prior_states[0] = prev_state posterior_states[0] = prev_state for t in range(T - 1): _state = prior_states[t] if encoder_output is None else posterior_states[t] _state = _state if non_terms is None else _state * non_terms[t] """ compute deterministic hidden state """ #print('cat in dynamic@', t, _state.shape, actions[t].shape) out = torch.cat([_state, actions[t]], dim=1) out = self.act_fn(self.fc_embed_state_action(out)) hiddens[t + 1] = self.rnn(out, hiddens[t]) """ compute latent state prior """ out = self.act_fn(self.fc_embed_prior(hiddens[t + 1])) prior_means[t + 1], _prior_std = torch.chunk(self.fc_prior(out), 2, dim=1) prior_stds[t + 1] = F.softplus(_prior_std) + self.min_std """ sample from state prior """ sample = prior_means[t + 1] + prior_stds[t + 1] * torch.randn_like( prior_means[t + 1] ) prior_states[t + 1] = sample if encoder_output is not None: """ encoder_output observations have different time index """ t_ = t - 1 """ calculate latent state posterior """ out = torch.cat([hiddens[t + 1], encoder_output[t_ + 1]], dim=1) out = self.act_fn(self.fc_embed_posterior(out)) posterior_means[t + 1], _posterior_std = torch.chunk( self.fc_posterior(out), 2, dim=1 ) posterior_stds[t + 1] = F.softplus(_posterior_std) + self.min_std """ sample from state posterior """ sample = posterior_means[t + 1] + posterior_stds[ t + 1 ] * torch.randn_like(posterior_means[t + 1]) posterior_states[t + 1] = sample hiddens = torch.stack(hiddens[1:], dim=0) prior_states = torch.stack(prior_states[1:], dim=0) prior_means = torch.stack(prior_means[1:], dim=0) prior_stds = torch.stack(prior_stds[1:], dim=0) if encoder_output is None: return { "hiddens": hiddens, "prior_means": prior_means, "prior_stds": prior_stds, "prior_states": prior_states, } else: posterior_means = torch.stack(posterior_means[1:], dim=0) posterior_stds = torch.stack(posterior_stds[1:], dim=0) posterior_states = torch.stack(posterior_states[1:], dim=0) return { "hiddens": hiddens, "prior_means": prior_means, "prior_stds": prior_stds, "prior_states": prior_states, "posterior_means": posterior_means, "posterior_stds": posterior_stds, "posterior_states": posterior_states, }
python
import vcr import zlib import json import six.moves.http_client as httplib from assertions import assert_is_json def _headers_are_case_insensitive(host, port): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/cookies/set?k1=v1") r1 = conn.getresponse() cookie_data1 = r1.getheader("set-cookie") conn = httplib.HTTPConnection(host, port) conn.request("GET", "/cookies/set?k1=v1") r2 = conn.getresponse() cookie_data2 = r2.getheader("Set-Cookie") return cookie_data1 == cookie_data2 def test_case_insensitivity(tmpdir, httpbin): testfile = str(tmpdir.join("case_insensitivity.yml")) # check if headers are case insensitive outside of vcrpy host, port = httpbin.host, httpbin.port outside = _headers_are_case_insensitive(host, port) with vcr.use_cassette(testfile): # check if headers are case insensitive inside of vcrpy inside = _headers_are_case_insensitive(host, port) # check if headers are case insensitive after vcrpy deserializes headers inside2 = _headers_are_case_insensitive(host, port) # behavior should be the same both inside and outside assert outside == inside == inside2 def _multiple_header_value(httpbin): conn = httplib.HTTPConnection(httpbin.host, httpbin.port) conn.request("GET", "/response-headers?foo=bar&foo=baz") r = conn.getresponse() return r.getheader("foo") def test_multiple_headers(tmpdir, httpbin): testfile = str(tmpdir.join("multiple_headers.yaml")) outside = _multiple_header_value(httpbin) with vcr.use_cassette(testfile): inside = _multiple_header_value(httpbin) assert outside == inside def test_original_decoded_response_is_not_modified(tmpdir, httpbin): testfile = str(tmpdir.join("decoded_response.yml")) host, port = httpbin.host, httpbin.port conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") outside = conn.getresponse() with vcr.use_cassette(testfile, decode_compressed_response=True): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") inside = conn.getresponse() # Assert that we do not modify the original response while appending # to the casssette. assert "gzip" == inside.headers["content-encoding"] # They should effectively be the same response. inside_headers = (h for h in inside.headers.items() if h[0].lower() != "date") outside_headers = (h for h in outside.getheaders() if h[0].lower() != "date") assert set(inside_headers) == set(outside_headers) inside = zlib.decompress(inside.read(), 16 + zlib.MAX_WBITS) outside = zlib.decompress(outside.read(), 16 + zlib.MAX_WBITS) assert inside == outside # Even though the above are raw bytes, the JSON data should have been # decoded and saved to the cassette. with vcr.use_cassette(testfile): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/gzip") inside = conn.getresponse() assert "content-encoding" not in inside.headers assert_is_json(inside.read()) def _make_before_record_response(fields, replacement="[REDACTED]"): def before_record_response(response): string_body = response["body"]["string"].decode("utf8") body = json.loads(string_body) for field in fields: if field in body: body[field] = replacement response["body"]["string"] = json.dumps(body).encode() return response return before_record_response def test_original_response_is_not_modified_by_before_filter(tmpdir, httpbin): testfile = str(tmpdir.join("sensitive_data_scrubbed_response.yml")) host, port = httpbin.host, httpbin.port field_to_scrub = "url" replacement = "[YOU_CANT_HAVE_THE_MANGO]" conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") outside = conn.getresponse() callback = _make_before_record_response([field_to_scrub], replacement) with vcr.use_cassette(testfile, before_record_response=callback): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") inside = conn.getresponse() # The scrubbed field should be the same, because no cassette existed. # Furthermore, the responses should be identical. inside_body = json.loads(inside.read().decode("utf-8")) outside_body = json.loads(outside.read().decode("utf-8")) assert not inside_body[field_to_scrub] == replacement assert inside_body[field_to_scrub] == outside_body[field_to_scrub] # Ensure that when a cassette exists, the scrubbed response is returned. with vcr.use_cassette(testfile, before_record_response=callback): conn = httplib.HTTPConnection(host, port) conn.request("GET", "/get") inside = conn.getresponse() inside_body = json.loads(inside.read().decode("utf-8")) assert inside_body[field_to_scrub] == replacement
python
# uncompyle6 version 3.2.4 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)] # Embedded file name: lib.coginvasion.toon.NameTag from panda3d.core import TextNode from direct.fsm import ClassicFSM, State from lib.coginvasion.globals import CIGlobals class NameTag(TextNode): NameTagColors = {CIGlobals.Suit: {'fg': (0.2, 0.2, 0.2, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}, CIGlobals.Toon: {'fg': (0.8, 0.4, 0.0, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}, CIGlobals.CChar: {'fg': (0.2, 0.5, 0.0, 1.0), 'bg': (0.8, 0.8, 0.8, 0.5)}} NameTagBackgrounds = {'rollover': (1.0, 1.0, 1.0, 0.65), 'down': (0.3, 0.3, 0.3, 0.5), 'up': (0.8, 0.8, 0.8, 0.5)} LocalNameTagColor = (0.3, 0.3, 0.7, 1.0) def __init__(self, name, avatarType): self.avatarType = avatarType self.fsm = ClassicFSM.ClassicFSM('NameTag', [State.State('off', self.enterOff, self.exitOff), State.State('rollover', self.enterRollover, self.exitRollover), State.State('down', self.enterDown, self.exitDown), State.State('up', self.enterUp, self.exitUp)], 'off', 'off') self.fsm.enterInitialState() TextNode.__init__(self, 'nameTag-' + str(id(self))) self.setText(name) self.setTextColor(0.191406, 0.5625, 0.773438, 1.0) self.setWordwrap(8) self.setCardAsMargin(0.1, 0.1, 0.1, 0.1) self.setCardDecal(True) self.setAlign(self.ACenter) self.nodePath = hidden.attachNewNode(self) self.nodePath.setBillboardPointEye() self.clickable = 0 def getNodePath(self): return self.nodePath def setColorLocal(self): self.setTextColor(self.LocalNameTagColor) def setClickable(self, value): self.clickable = value def getClickable(self): return self.clickable def setPickerState(self, state): self.fsm.request(state) def enterOff(self): pass def exitOff(self): pass def enterRollover(self): self.setCardColor(self.NameTagBackgrounds['rollover']) def exitRollover(self): pass def enterDown(self): self.setCardColor(self.NameTagBackgrounds['down']) def makeDefaultFG(self): self.setTextColor(self.NameTagColors[self.avatarType]['fg']) def exitDown(self): pass def enterUp(self): self.setCardColor(self.NameTagBackgrounds['up']) def exitUp(self): pass def destroy(self): self.fsm.requestFinalState() del self.fsm del self.avatarType del self.clickable self.nodePath.removeNode() self.nodePath = None return
python
# Copyright 2016-2021 IBM Corp. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :term:`LPAR` (Logical Partition) is a subset of the hardware resources of a :term:`CPC` in classic mode (or ensemble mode), virtualized as a separate computer. LPARs cannot be created or deleted by the user; they can only be listed. LPAR resources are contained in CPC resources. LPAR resources only exist in CPCs that are in classic mode (or ensemble mode). CPCs in DPM mode have :term:`Partition` resources, instead. """ from __future__ import absolute_import import time import copy from ._manager import BaseManager from ._resource import BaseResource from ._exceptions import StatusTimeout from ._logging import logged_api_call from ._utils import matches_filters, divide_filter_args, RC_LOGICAL_PARTITION __all__ = ['LparManager', 'Lpar'] class LparManager(BaseManager): """ Manager providing access to the :term:`LPARs <LPAR>` in a particular :term:`CPC`. Derived from :class:`~zhmcclient.BaseManager`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are accessible via the following instance variable of a :class:`~zhmcclient.Cpc` object (in DPM mode): * :attr:`~zhmcclient.Cpc.lpars` """ def __init__(self, cpc): # This function should not go into the docs. # Parameters: # cpc (:class:`~zhmcclient.Cpc`): # CPC defining the scope for this manager. # Resource properties that are supported as filter query parameters. # If the support for a resource property changes within the set of HMC # versions that support this type of resource, this list must be set up # for the version of the HMC this session is connected to. query_props = [ 'name', ] super(LparManager, self).__init__( resource_class=Lpar, class_name=RC_LOGICAL_PARTITION, session=cpc.manager.session, parent=cpc, base_uri='/api/logical-partitions', oid_prop='object-id', uri_prop='object-uri', name_prop='name', query_props=query_props) @property def cpc(self): """ :class:`~zhmcclient.Cpc`: :term:`CPC` defining the scope for this manager. """ return self._parent @logged_api_call def list(self, full_properties=False, filter_args=None): """ List the LPARs in this CPC. Authorization requirements: * Object-access permission to this CPC. * Object-access permission to any LPAR to be included in the result. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.Lpar` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ resource_obj_list = [] resource_obj = self._try_optimized_lookup(filter_args) if resource_obj: resource_obj_list.append(resource_obj) # It already has full properties else: query_parms, client_filters = divide_filter_args( self._query_props, filter_args) resources_name = 'logical-partitions' uri = '{}/{}{}'.format(self.cpc.uri, resources_name, query_parms) result = self.session.get(uri) if result: props_list = result[resources_name] for props in props_list: resource_obj = self.resource_class( manager=self, uri=props[self._uri_prop], name=props.get(self._name_prop, None), properties=props) if matches_filters(resource_obj, client_filters): resource_obj_list.append(resource_obj) if full_properties: resource_obj.pull_full_properties() self._name_uri_cache.update_from(resource_obj_list) return resource_obj_list class Lpar(BaseResource): """ Representation of an :term:`LPAR`. Derived from :class:`~zhmcclient.BaseResource`; see there for common methods and attributes. Objects of this class are not directly created by the user; they are returned from creation or list functions on their manager object (in this case, :class:`~zhmcclient.LparManager`). """ def __init__(self, manager, uri, name=None, properties=None): # This function should not go into the docs. # manager (:class:`~zhmcclient.LparManager`): # Manager object for this resource object. # uri (string): # Canonical URI path of the resource. # name (string): # Name of the resource. # properties (dict): # Properties to be set for this resource object. May be `None` or # empty. assert isinstance(manager, LparManager), \ "Lpar init: Expected manager type %s, got %s" % \ (LparManager, type(manager)) super(Lpar, self).__init__(manager, uri, name, properties) @logged_api_call def update_properties(self, properties): """ Update writeable properties of this LPAR. This method serializes with other methods that access or change properties on the same Python object. Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Change Object Definition" task. * Since HMC 2.14.1: If the "next-activation-profile-name" property is to be updated, task permission for the "Change Object Options" task or the "Customize/Delete Activation Profiles" task. * Before HMC 2.15.0: For an LPAR whose activation-mode is "zaware", task permission for the "Firmware Details" task. * Since HMC 2.15.0: If any of the "ssc-*" or "zaware-*" properties is to be updated, task permission for the "Firmware Details" task. * Since HMC 2.15.0: If any of the numbers of allocated or reserved cores is to be updated, task permission for the "Logical Processor Add" task. Parameters: properties (dict): New values for the properties to be updated. Properties not to be updated are omitted. Allowable properties are the properties with qualifier (w) in section 'Data model' in section 'Logical Partition object' in the :term:`HMC API` book. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ # pylint: disable=protected-access self.manager.session.post(self.uri, body=properties) # Attempts to change the 'name' property will be rejected by the HMC, # so we don't need to update the name-to-URI cache. assert self.manager._name_prop not in properties self.update_properties_local(copy.deepcopy(properties)) @logged_api_call def activate(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, activation_profile_name=None, force=False): """ Activate (start) this LPAR, using the HMC operation "Activate Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-operating" (which indicates that the LPAR is active but no operating system is running), or "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Activate" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "not-operating" or "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. activation_profile_name (:term:`string`): Name of the image :class:`ActivationProfile` to use for activation. `None` means that the activation profile specified in the `next-activation-profile-name` property of the LPAR is used. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if activation_profile_name: body['activation-profile-name'] = activation_profile_name if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/activate', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-operating", "operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def deactivate(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): """ De-activate (stop) this LPAR, using the HMC operation "Deactivate Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-activated", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Deactivate" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "non-activated" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/deactivate', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-activated"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def scsi_load(self, load_address, wwpn, lun, load_parameter=None, disk_partition_id=None, operating_system_specific_load_parameters=None, boot_record_logical_block_address=None, force=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, secure_boot=False): # pylint: disable=invalid-name """ Load (boot) this LPAR from a designated SCSI device, using the HMC operation "SCSI Load". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "SCSI Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. secure_boot (bool): Bollean controlling whether the system checks the software signature of what is loaded against what the distributor signed it with. Requires z15 or later. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} body['load-address'] = load_address body['world-wide-port-name'] = wwpn body['logical-unit-number'] = lun if load_parameter: body['load-parameter'] = load_parameter if disk_partition_id is not None: body['disk-partition-id'] = disk_partition_id if operating_system_specific_load_parameters: body['operating-system-specific-load-parameters'] = \ operating_system_specific_load_parameters if boot_record_logical_block_address: body['boot-record-logical-block-address'] = \ boot_record_logical_block_address if force: body['force'] = force if secure_boot: body['secure-boot'] = secure_boot result = self.manager.session.post( self.uri + '/operations/scsi-load', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def scsi_dump(self, load_address, wwpn, lun, load_parameter=None, disk_partition_id=None, operating_system_specific_load_parameters=None, boot_record_logical_block_address=None, os_ipl_token=None, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): # pylint: disable=invalid-name """ Load a standalone dump program from a designated SCSI device in this LPAR, using the HMC operation "SCSI Dump". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "SCSI Dump" task. Parameters: load_address (:term:`string`): Device number of the boot device. wwpn (:term:`string`): Worldwide port name (WWPN) of the target SCSI device to be used for this operation, in hexadecimal. lun (:term:`string`): Hexadecimal logical unit number (LUN) to be used for the SCSI Load. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. disk_partition_id (:term:`integer`): Optional disk-partition-id (also called the boot program selector) to be used for the SCSI Load. If `None`, it is not passed to the HMC. operating_system_specific_load_parameters (:term:`string`): Optional operating system specific load parameters to be used for the SCSI Load. boot_record_logical_block_address (:term:`string`): Optional hexadecimal boot record logical block address to be used for the SCSI Load. os_ipl_token (:term:`string`): Optional hexadecimal value to be used for the SCSI dump. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} body['load-address'] = load_address body['world-wide-port-name'] = wwpn body['logical-unit-number'] = lun if load_parameter: body['load-parameter'] = load_parameter if disk_partition_id is not None: body['disk-partition-id'] = disk_partition_id if operating_system_specific_load_parameters: body['operating-system-specific-load-parameters'] = \ operating_system_specific_load_parameters if boot_record_logical_block_address: body['boot-record-logical-block-address'] = \ boot_record_logical_block_address if os_ipl_token is not None: body['os-ipl-token'] = os_ipl_token if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/scsi-dump', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def load(self, load_address=None, load_parameter=None, clear_indicator=True, store_status_indicator=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False, force=False): """ Load (boot) this LPAR from a load address (boot device), using the HMC operation "Load Logical Partition". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Load" task. Parameters: load_address (:term:`string`): Device number of the boot device. Up to z13, this parameter is required. Starting with z14, this parameter is optional and defaults to the load address specified in the 'last-used-load-address' property of the Lpar. load_parameter (:term:`string`): Optional load control string. If empty string or `None`, it is not passed to the HMC. clear_indicator (bool): Optional boolean controlling whether the memory should be cleared before performing the load or not cleared. The default value is `True`. store_status_indicator (bool): Optional boolean controlling whether the status should be stored before performing the Load. The default value is `False`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. TBD: What will happen with the LPAR in that case (deactivated then activated? nothing?) Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if load_address: body['load-address'] = load_address if load_parameter: body['load-parameter'] = load_parameter if force: body['force'] = force if not clear_indicator: body['clear-indicator'] = clear_indicator if store_status_indicator: body['store-status-indicator'] = store_status_indicator result = self.manager.session.post( self.uri + '/operations/load', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def stop(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Stop this LPAR, using the HMC operation "Stop Logical Partition". The stop operation stops the processors from processing instructions. This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "not-operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Stop" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "not-operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = None result = self.manager.session.post( self.uri + '/operations/stop', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["not-operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def reset_clear(self, force=False, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Initialize this LPAR by clearing its pending interruptions, resetting its channel subsystem, and resetting its processors, using the HMC operation "Reset Clear". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "Reset Clear" task. Parameters: force (bool): Boolean controlling whether this operation is permitted when the LPAR is in the "operating" status. The default is `False`. wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} if force: body['force'] = force result = self.manager.session.post( self.uri + '/operations/reset-clear', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def open_os_message_channel(self, include_refresh_messages=True): """ Open a JMS message channel to this LPAR's operating system, returning the string "topic" representing the message channel. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Operating System Messages" task at least in view-only mode. Parameters: include_refresh_messages (bool): Boolean controlling whether refresh operating systems messages should be sent, as follows: * If `True`, refresh messages will be recieved when the user connects to the topic. The default. * If `False`, refresh messages will not be recieved when the user connects to the topic. Returns: :term:`string`: Returns a string representing the os-message-notification JMS topic. The user can connect to this topic to start the flow of operating system messages. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'include-refresh-messages': include_refresh_messages} result = self.manager.session.post( self.uri + '/operations/open-os-message-channel', body) return result['topic-name'] @logged_api_call def send_os_command(self, os_command_text, is_priority=False): """ Send a command to the operating system running in this LPAR. Authorization requirements: * Object-access permission to this Partition. * Task permission to the "Operating System Messages" task in modification mode. Parameters: os_command_text (string): The text of the operating system command. is_priority (bool): Boolean controlling whether this is a priority operating system command, as follows: * If `True`, this message is treated as a priority operating system command. * If `False`, this message is not treated as a priority operating system command. The default. Returns: None Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` """ body = {'is-priority': is_priority, 'operating-system-command-text': os_command_text} self.manager.session.post( self.uri + '/operations/send-os-cmd', body) @logged_api_call def psw_restart(self, wait_for_completion=True, operation_timeout=None, status_timeout=None, allow_status_exceptions=False): """ Restart this LPAR, using the HMC operation "PSW Restart". This HMC operation has deferred status behavior: If the asynchronous job on the HMC is complete, it takes a few seconds until the LPAR status has reached the desired value. If `wait_for_completion=True`, this method repeatedly checks the status of the LPAR after the HMC operation has completed, and waits until the status is in the desired state "operating", or if `allow_status_exceptions` was set additionally in the state "exceptions". Authorization requirements: * Object-access permission to this LPAR. * Before HMC API version 3.6 in an update to HMC 2.15.0: Object-access permission to the CPC of this LPAR. * Task permission for the "PSW Restart" task. Parameters: wait_for_completion (bool): Boolean controlling whether this method should wait for completion of the requested asynchronous HMC operation, as follows: * If `True`, this method will wait for completion of the asynchronous job performing the operation, and for the status becoming "operating" (or in addition "exceptions", if `allow_status_exceptions` was set. * If `False`, this method will return immediately once the HMC has accepted the request to perform the operation. operation_timeout (:term:`number`): Timeout in seconds, for waiting for completion of the asynchronous job performing the operation. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.OperationTimeout` is raised. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached the desired status, after the HMC operation has completed. The special value 0 means that no timeout is set. `None` means that the default async operation timeout of the session is used. If the timeout expires when `wait_for_completion=True`, a :exc:`~zhmcclient.StatusTimeout` is raised. allow_status_exceptions (bool): Boolean controlling whether LPAR status "exceptions" is considered an additional acceptable end status when `wait_for_completion` is set. Returns: `None` or :class:`~zhmcclient.Job`: If `wait_for_completion` is `True`, returns `None`. If `wait_for_completion` is `False`, returns a :class:`~zhmcclient.Job` object representing the asynchronously executing job on the HMC. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.OperationTimeout`: The timeout expired while waiting for completion of the operation. :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ body = {} result = self.manager.session.post( self.uri + '/operations/psw-restart', body, wait_for_completion=wait_for_completion, operation_timeout=operation_timeout) if wait_for_completion: statuses = ["operating"] if allow_status_exceptions: statuses.append("exceptions") self.wait_for_status(statuses, status_timeout) return result @logged_api_call def wait_for_status(self, status, status_timeout=None): """ Wait until the status of this LPAR has a desired value. Parameters: status (:term:`string` or iterable of :term:`string`): Desired LPAR status or set of status values to reach; one or more of the following values: * ``"not-activated"`` - The LPAR is not active. * ``"not-operating"`` - The LPAR is active but no operating system is running in the LPAR. * ``"operating"`` - The LPAR is active and an operating system is running in the LPAR. * ``"exceptions"`` - The LPAR or its CPC has one or more unusual conditions. Note that the description of LPAR status values in the :term:`HMC API` book (as of its version 2.13.1) is partly confusing. status_timeout (:term:`number`): Timeout in seconds, for waiting that the status of the LPAR has reached one of the desired status values. The special value 0 means that no timeout is set. `None` means that the default status timeout will be used. If the timeout expires , a :exc:`~zhmcclient.StatusTimeout` is raised. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError` :exc:`~zhmcclient.StatusTimeout`: The timeout expired while waiting for the desired LPAR status. """ if status_timeout is None: status_timeout = \ self.manager.session.retry_timeout_config.status_timeout if status_timeout > 0: end_time = time.time() + status_timeout if isinstance(status, (list, tuple)): statuses = status else: statuses = [status] while True: # Fastest way to get actual status value: lpars = self.manager.cpc.lpars.list( filter_args={'name': self.name}) assert len(lpars) == 1 this_lpar = lpars[0] actual_status = this_lpar.get_property('status') if actual_status in statuses: return if status_timeout > 0 and time.time() > end_time: raise StatusTimeout( "Waiting for LPAR {} to reach status(es) '{}' timed out " "after {} s - current status is '{}'". format(self.name, statuses, status_timeout, actual_status), actual_status, statuses, status_timeout) time.sleep(1) # Avoid hot spin loop
python
# --------------------------------------------------------------------- # Project "Track 3D-Objects Over Time" # Copyright (C) 2020, Dr. Antje Muntzinger / Dr. Andreas Haja. # # Purpose of this file : Parameter file for tracking # # You should have received a copy of the Udacity license together with this program. # # https://www.udacity.com/course/self-driving-car-engineer-nanodegree--nd013 # ---------------------------------------------------------------------- # # general parameters dim_state = 6 # process model dimension # Kalman filter parameters (Step 1) dt = 0.1 # time increment q=3 # process noise variable for Kalman filter Q # track management parameters (Step 2) confirmed_threshold = 0.8 # track score threshold to switch from 'tentative' to 'confirmed' delete_threshold = 0.6 # track score threshold to delete confirmed tracks window = 6 # number of frames for track score calculation max_P = 3**2 # delete track if covariance of px or py bigger than this sigma_p44 = 50 # initial setting for estimation error covariance P entry for vx sigma_p55 = 50 # initial setting for estimation error covariance P entry for vy sigma_p66 = 5 # initial setting for estimation error covariance P entry for vz weight_dim = 0.1 # sliding average parameter for dimension estimation # association parameters (Step 3) gating_threshold = 0.995 # percentage of correct measurements that shall lie inside gate gating_threshold_lidar = 0.995 # measurement parameters (Step 4) sigma_lidar_x = 0.1 # measurement noise standard deviation for lidar x position sigma_lidar_y = 0.1 # measurement noise standard deviation for lidar y position sigma_lidar_z = 0.1 # measurement noise standard deviation for lidar z position sigma_cam_i = 5 # measurement noise standard deviation for image i coordinate sigma_cam_j = 5 # measurement noise standard deviation for image j coordinate
python
#-*- encoding=utf-8 -*- #a example to demo multi threading python app for mq handling #ganben import Queue import threading import time import paho.mqtt.client as mqtt queueLock = threading.Lock() posiQueue = Queue.Queue(100) callQueue = Queue.Queue(100) threads = [] threadID = 1 def on_connect(client, userdata, rc): client.subscribe('position') client.subscribe('nursecall') #maybe here can be configured print('Connected with result code {0}'.format(str(rc))) def on_message(client, userdata, msg): print('Topic={0}, Message={1}'.format(msg.topic, str(msg.payload))) if msg.topic == 'position': queueLock.acquire() posiQueue.put(str(msg.payload)) queueLock.release() elif msg.topic == 'nursecall': queueLock.acquire() callQueue.put(str(msg.payload)) queueLock.release() class MqttListener(threading.Thread): def __init__(self, threadID, name, q): threading.Thread.__init__(self) self.threadID = threadID self.name = name self.q = q def run(self): process_data(self.name, self.q) print('exiting ... {0}'.format(self.name)) def process_data(threadName, q): while True: queueLock.acquire() if not q.empty(): data = q.get() queueLock.release() print('get {0} by {1}'.format(data, threadName)) else: queueLock.release() time.sleep(5) #create threads thread1 = MqttListener(1, 'thread1', posiQueue) thread1.start() threads.append(thread1) thread2 = MqttListener(2, 'thread2', callQueue) thread2.start() threads.append(thread2) #set up mqtt client client = mqtt.Client('server-listener') client.on_connect = on_connect client.on_message = on_message client.connect('192.168.1.100', 1883, 60) client.loop_forever()
python
from flask_plugin import Plugin from flask import redirect, url_for, abort plugin = Plugin() @plugin.route('/say/<string:name>', methods=['GET']) def say(name: str): return 'Hello ' + name @plugin.route('/admin', methods=['GET']) def hello2admin(): return redirect(url_for('.say', name='Doge')) @plugin.route('/403', methods=['GET']) def test_forbidden(): abort(403) @plugin.errorhandler(403) def forbidden(error): return 'My Forbidden!', 403 @plugin.before_request def before_request(): print('Handled before request.')
python
import os from chr.core import chr_compile_module chr_compile_module(os.path.dirname(__file__), verbose=False, overwrite=True)
python
from django.test import TestCase # Create your tests here. from selenium import webdriver from selenium.webdriver.common.keys import Keys class MultiSelectFunctionalTests(TestCase): base_url = 'http://localhost:8000/tests' fixtures=['publications'] def setUp(self): self.driver = webdriver.Firefox() self.driver.implicitly_wait(10) #username_input = self.driver.find_element_by_name("username") #username_input.send_keys(user) def tearDown(self): self.driver.close() def testAddArticlePage(self): """As a visitor to the site, when I load the articles page, I see the publications in Selectize.js multiselect theme.""" self.driver.get('{0}{1}'.format(self.base_url,'/articles/')) self.assertIn("Headline:", self.driver.find_element_by_tag_name('body').text)
python
import tkinter as tkinter from tkinter import filedialog as FileDialog from Properties import Properties class Main(): def __init__(self): self.Window = tkinter.Tk() self.Properties = Properties() self.setTitle('Bloc Note') self.setSize(self.Properties.x, self.Properties.y) self.Frame = tkinter.Frame(self.Window).pack(fill="x", padx=1, pady=1) self.TextScroll = tkinter.Scrollbar(self.Frame) self.Text = tkinter.Text(self.Frame, width=97, height=25, font=("Helvetica", self.Properties.TextSize, "bold"), selectbackground="gray", selectforeground="black", undo=True, yscrollcommand=self.TextScroll.set ) self.TextScroll.config(command=self.Text.yview) self.Text.pack() self.Menu = tkinter.Menu(self.Window) self.Window.config(menu=self.Menu) self.Files = tkinter.Menu(self.Window, tearoff=False) self.Menu.add_cascade(label='File', menu=self.Files) self.Files.add_command(label='New File', command=self.newFile) self.Files.add_command(label='Open File', command=self.openFile) self.Files.add_command(label='Save File', command=self.saveFile) self.Files.add_command(label='Save As', command=self.saveAsFile) self.Files.add_command(label='Exit', command=self.Window.quit) def setTitle(self, str): self.Window.title(str) def setSize(self, x, y): self.Window.geometry(f'{x}x{y}') self.Properties.x = x self.Properties.y = y def newFile(self): self.Text.delete('1.0', 'end') self.setTitle('New File - Bloc Note') self.Properties.File = False def openFile(self): TextFile = FileDialog.askopenfilename(defaultextension=".*", title="Open File") if TextFile: self.Text.delete('1.0', 'end') self.Properties.File = TextFile File = TextFile self.setTitle(f'{File} - Bloc Note') TextFile = open(TextFile, 'r') Lines = enumerate(TextFile) for index, key in Lines: self.Text.insert('end', key) TextFile.close() def saveFile(self): if self.Properties.File: TextFile = open(self.Properties.File, 'w') TextFile.write(self.Text.get('1.0', 'end')) TextFile.close() else: self.saveAsFile() def saveAsFile(self): TextFile = FileDialog.asksaveasfilename(defaultextension=".*", title="Save As") if TextFile: self.Properties.File = TextFile File = TextFile self.setTitle(f'{File} - Bloc Note') TextFile = open(TextFile, 'w') TextFile.write(self.Text.get('1.0', 'end')) TextFile.close() Main = Main() Main.Window.mainloop()
python
import unittest from unittest.mock import MagicMock import builtins class micropython: def const(self, number): return number class TestCase(unittest.TestCase): orig_import = __import__ module_mock = MagicMock() @classmethod def import_mock(cls, name, *args): if name == "uasyncio": return cls.orig_import("asyncio") if name in ("machine", "bluetooth"): print(f"{name} mocked by MagicMoc") return cls.module_mock() if name in ("micropython",): print(f"{name} mocked by file") return micropython() if ("___") in name: print(f"hugo_test {name} redirected to devel.{name}") return cls.orig_import("devel." + name, *args) return cls.orig_import(name, *args) builtins.__import__ = TestCase.import_mock
python
# http://github.com/timestocome # adapted from http://natureofcode.com/book/chapter-9-the-evolution-of-code/ # 3 letter match ~ 20 generations # 4 letters ~ 120 generations import string as st import re import numpy as np import copy bots = [] new_bots = [] scores = [] n_letters = 4 n_bots = 100 target = ['c', 'a', 't', 's'] # def letters and symbols allowed world = ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' ', '.', ',', '?') # create a random string from world options def create_random_string(length): random_array = [] for i in range(length): l = np.random.randint(len(world)) random_array.append(world[l]) return random_array # compute number of possible strings def possibilities(length): return np.power(len(world), length) # create starting generation def init_generation(): for b in range(n_bots): letters = create_random_string(n_letters) bots.append(letters) # fitness test def fitness(bot): score = 0 for i in range(n_letters): if bot[i] == target[i]: score += 1 return score # use probabilistic fitness to chose next generation def choose_fittest(): candidates_array = [] # add one vote for each score point per bot for i in range(n_bots): for j in range(int(scores[i]) + 1): # include everyone, remove one to remove zero scoring bots candidates_array.append(i) # add bot id to array once for each fitness point # shuffle array np.random.shuffle(candidates_array) # select first n_bots candidates_array = candidates_array[0:n_bots] # collect parents parents = [] for i in range(n_bots): parents.append(bots[candidates_array[i]]) np.random.shuffle(parents) return parents # randomly choose 2 and combine def mate_parents(parents): m = parents[0] d = parents[1] new_bot1 = [] new_bot2 = [] i = 0 while i < n_letters: if i % 2 == 0: new_bot1.append(m[i]) new_bot2.append(d[i]) else: new_bot1.append(d[i]) new_bot2.append(m[i]) i += 1 new_bots.append(new_bot1) new_bots.append(new_bot2) parents.pop(0) # remove mom parents.pop(0) # remove dad def mutation(b): location = np.random.randint(n_letters) new_letter = np.random.randint(len(world)) b[location] = world[new_letter] return b ########################################################################## possible = possibilities(n_letters) print('%ld combinations of length 5 can be formed from world possibilities' % possible) # start a random collection of bots init_generation() #### main loop ### generation = 0 best_score = -1 goal = 0 scores = np.zeros(n_bots) #for z in range(10): while goal == 0: # score bots for b in range(n_bots): s = fitness(bots[b]) scores[b] = s if s == n_letters: print('Winner') print(bots[b], scores[b]) goal = 1 print('--------------------') for z in range(n_bots): print(bots[z]) break if s > best_score: best_score = s # choose fittest parents = choose_fittest() # mate fittest new_bots = [] for b in range(n_bots//2): mate_parents(parents) # re-set bots to new group bots = copy.copy(new_bots) new_bots = [] # random mutations for b in range(n_bots): r = np.random.randint(20) if r == 14: bots[b] = mutation(bots[b]) generation += 1 print('Generation %d Best score %d ' % (generation, best_score))
python
#encoding:utf-8 subreddit = 'CryptoMoonShots' t_channel = '@r_CryptoMoonShot' def send_post(submission, r2t): return r2t.send_simple(submission)
python
from seagulls.engine import ActiveSceneClient class FakeGameScene: pass class TestActiveSceneClient: def test_apply(self) -> None: fake_scene = FakeGameScene() def callback(scene: FakeGameScene) -> None: assert scene == fake_scene client = ActiveSceneClient(fake_scene) # type: ignore client.apply(callback) # type: ignore def test_set_active_scene(self) -> None: initial_fake_scene = FakeGameScene() second_fake_scene = FakeGameScene() def callback(scene: FakeGameScene) -> None: assert scene == second_fake_scene client = ActiveSceneClient(initial_fake_scene) # type: ignore client.set_active_scene(second_fake_scene) # type: ignore client.apply(callback) # type: ignore
python
import asyncio import logging from struct import Struct from time import time logger = logging.getLogger(__name__) class CyKitClient: def __init__(self, reader, writer, channels=14, sample_rate=128): self.sample_rate = sample_rate self._reader, self._writer = reader, writer self._struct = Struct('>' + 'f' * channels) def stop(self): if self._writer is not None: self._writer.close() def __aiter__(self): return self async def __anext__(self): if self._reader.at_eof(): raise ConnectionError("No more data from peer") data = await self._reader.readexactly(self._struct.size) if not data: raise ConnectionError("No more data from peer") return self._struct.unpack(data) async def _initialize(self, good_packet_threshold=64): last_time = time() good_packets = 0 while good_packets < good_packet_threshold: await self._reader.readexactly(self._struct.size) cur_time = time() delta = cur_time - last_time if delta > (1.0 / self.sample_rate) / 2: good_packets += 1 logger.debug("Good packet: %.4f ms", delta * 1000.0) else: logger.debug("Bad packet: %.4f ms", delta * 1000.0) last_time = cur_time return self async def connect_to_cykit(ip, port, timeout=3) -> CyKitClient: fut = asyncio.open_connection(ip, port) reader, writer = await asyncio.wait_for(fut, timeout) client = CyKitClient(reader, writer) return await client._initialize()
python
__author__ = "Anand Krishnan Prakash" __email__ = "[email protected]" import pymortar import datetime import pandas as pd import argparse def get_error_message(x, resample_minutes=60): dt_format = "%Y-%m-%d %H:%M:%S" st = x.name st_str = st.strftime(dt_format) et_str = (st+datetime.timedelta(minutes=resample_minutes)).strftime(dt_format) site = x.site room = x.room zone = x.zone heat_percent = round(x.heat_percent, 2) cool_percent = round(x.cool_percent, 2) msg = "From {0} to {1}, zone: \'{2}\' in room: \'{3}\' at site: \'{4}\', was heating for {5}% of the time and cooling for {6}% of the time".format( st_str, et_str, zone, room, site, heat_percent, cool_percent ) return msg def tstat_zone_analysis(client, resample_minutes, start_time, end_time): st = start_time.strftime("%Y-%m-%dT%H:%M:%SZ") et = end_time.strftime("%Y-%m-%dT%H:%M:%SZ") print(st) print(et) tstat_query = """ SELECT ?tstat ?room ?zone ?state ?temp ?hsp ?csp WHERE { ?tstat bf:hasLocation ?room . ?zone bf:hasPart ?room . ?tstat bf:hasPoint ?state . ?tstat bf:hasPoint ?temp . ?tstat bf:hasPoint ?hsp . ?tstat bf:hasPoint ?csp . ?zone rdf:type/rdfs:subClassOf* brick:Zone . ?tstat rdf:type/rdfs:subClassOf* brick:Thermostat . ?state rdf:type/rdfs:subClassOf* brick:Thermostat_Status . ?temp rdf:type/rdfs:subClassOf* brick:Temperature_Sensor . ?hsp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Heating_Setpoint . ?csp rdf:type/rdfs:subClassOf* brick:Supply_Air_Temperature_Cooling_Setpoint . }; """ qualify_response = client.qualify([tstat_query]) if qualify_response.error != "": print("ERROR: ", qualify_response.error) os.exit(1) print("Running on {0} sites".format(len(qualify_response.sites))) tstat_view = pymortar.View( name="tstat_points", sites=qualify_response.sites, definition=tstat_query, ) tstat_streams = pymortar.DataFrame( name="thermostat_data", aggregation=pymortar.MAX, window="1m", timeseries=[ pymortar.Timeseries( view="tstat_points", dataVars=["?state", "?temp", "?hsp", "?csp"] ) ] ) time_params = pymortar.TimeParams( start=st, end=et ) request = pymortar.FetchRequest( sites=qualify_response.sites, # from our call to Qualify views=[ tstat_view ], dataFrames=[ tstat_streams ], time=time_params ) result = client.fetch(request) tstat_df = result['thermostat_data'] tstats = [tstat[0] for tstat in result.query("select tstat from tstat_points")] error_df_list = [] for tstat in tstats: q = """ SELECT state_uuid, temp_uuid, hsp_uuid, csp_uuid, room, zone, site FROM tstat_points WHERE tstat = "{0}"; """.format(tstat) res = result.query(q) if len(res) == 0: continue state_col, iat_col, hsp_col, csp_col, room, zone, site = res[0] df = tstat_df[[state_col, iat_col, hsp_col, csp_col]] df.columns = ['state', 'iat', 'hsp', 'csp'] df2 = pd.DataFrame() resample_time = '{0}T'.format(resample_minutes) df2['min_hsp'] = df['hsp'].resample(resample_time).min() df2['min_csp'] = df['csp'].resample(resample_time).min() df2['max_hsp'] = df['hsp'].resample(resample_time).max() df2['max_csp'] = df['csp'].resample(resample_time).max() df2['heat_percent'] = df['state'].resample(resample_time).apply(lambda x: ((x==1).sum() + (x==4).sum())/resample_minutes*100) df2['cool_percent'] = df['state'].resample(resample_time).apply(lambda x: ((x==2).sum() + (x==5).sum())/resample_minutes*100) df2['tstat'] = tstat df2['room'] = room.split('#')[1] df2['zone'] = zone.split('#')[1] df2['site'] = site df2['both_heat_cool'] = False df2.loc[((df2.heat_percent > 0) & (df2.cool_percent > 0)), 'both_heat_cool'] = True if not df2[df2['both_heat_cool'] == True].empty: error_df_list.append(df2[df2['both_heat_cool'] == True]) if len(error_df_list) > 0: error_df = pd.concat(error_df_list, axis=0)[['site', 'zone', 'room', 'heat_percent', 'cool_percent', 'min_hsp', 'min_csp', 'max_hsp', 'max_csp']] error_df.index.name = 'time' error_msgs = error_df.apply(lambda x: get_error_message(x), axis=1).values for msg in error_msgs: print(msg) return error_df else: return pd.DataFrame() if __name__ == "__main__": parser = argparse.ArgumentParser(description='configure app parameters') parser.add_argument("-time_interval", help="length of time interval (in minutes) when you want to check if a zone is both heating and cooling", type=int, default=60, nargs='?') parser.add_argument("-st", help="start time for analysis in yyyy-mm-ddThh:mm:ss format", type=str, default="2018-12-10T00:00:00", nargs='?') parser.add_argument("-et", help="end time for analysis in yyyy-mm-ddThh:mm:ss format", type=str, default="2019-01-01T00:00:00", nargs='?') parser.add_argument("-filename", help="filename to store result of analysis", type=str, default="heat_and_cool_same_period.csv", nargs='?') resample_minutes = parser.parse_args().time_interval try: start_time = datetime.datetime.strptime(parser.parse_args().st, "%Y-%m-%dT%H:%M:%S") end_time = datetime.datetime.strptime(parser.parse_args().et, "%Y-%m-%dT%H:%M:%S") except Exception as e: raise Exception("Incorrect format for st or et. Use yyyy-mm-ddThh:mm:ss") filename = parser.parse_args().filename client = pymortar.Client({}) error_df = tstat_zone_analysis(client=client, resample_minutes=resample_minutes, start_time=start_time, end_time=end_time) if not error_df.empty: print("Writing results to {0}".format(filename)) error_df.to_csv(filename) else: print("No zones match the condition")
python
# Данные для входа LOGIN = "логин" PASSWORD = "пароль" # Некоторые параметры API MAX_COUNT = 100 # Максимальное кол-во записей, которое можно получить по *.wall.get, деленное на 25. LIMIT = 500 AGE1 = 14 AGE2 = 20 AGE3 = 35 AGE4 = 50 """ LIMIT - Максимальное кол-во записей, скачиваемое со страницы по *wall.get (выходит 5 запросов по 100 в каждом). Больше лучше не брать, чтобы не словить бан на кол-во запросов (в будущем изменить на None). """ STOP_WORDS = ['блять', 'http', 'сука', "хуй", "ебать", "ебанина", "ебанько", "]", "ебля", "ебаный", "еблан", "епта", "ебливый", "блядь", "блядство", "блядина", "мудила", "дрочила", "пидор", "пидорас", "пидорасина", "ебучий", "хуеплет", "ебырь", "ебанутый", "пизда", "пиздец", "пиздюк", "пиздопроебина", "пиздуй", "распиздяй", "хуйня", "нахуй", "выблядок", "ебучка", "охуел", "Блять", "Http", "Сука", "Хуй", "Ебать", "Ебанина", "Ебанько", "[", "Ебля", "Ебаный", "Еблан", "Епта", "Ебливый", "Блядь", "Блядство", "Блядина", "Мудила", "Дрочила", "Пидор", "Пидорас", "Пидорасина", "Ебучий", "Хуеплет", "Ебырь", "Ебанутый", "Пизда", "Пиздец", "Пиздюк", "Пиздопроебина", "Пиздуй", "Распиздяй", "Хуйня", "Нахуй", "Выблядок", "Ебучка", "Охуел"]
python
""" Sponge Knowledge Base Provide action arguments - element value set """ class FruitsElementValueSetAction(Action): def onConfigure(self): self.withLabel("Fruits action with argument element value set") self.withArg(ListType("fruits", StringType()).withLabel("Fruits").withUnique().withProvided(ProvidedMeta().withElementValueSet())) self.withResult(IntegerType()) def onCall(self, fruits): return len(fruits) def onProvideArgs(self, context): if "fruits" in context.provide: context.provided["fruits"] = ProvidedValue().withAnnotatedElementValueSet([ AnnotatedValue("apple").withValueLabel("Apple"), AnnotatedValue("banana").withValueLabel("Banana"), AnnotatedValue("lemon").withValueLabel("Lemon") ])
python
# dna.py - DNA class and related functions # RMM, 11 Aug 2018 # # This file contains the implementation of DNA in the txtlsim toolbox. # This includes objects that represent the individual elements of a # DNA assembly as well as the functions required to create the models # associated with gene expression. # # Copyright (c) 2018, Build-A-Cell. All rights reserved. # See LICENSE file in the project root directory for details. import re # use Python's regular expression library from math import log from .component import Component from .sbmlutil import add_species, add_reaction, find_species from .mechanism import Mechanism, get_mechanisms from .pathutil import load_model from .parameter import get_parameters, update_existing, update_missing from .mechanisms import maturation # # DNA assembly # # The DNAassembly class is a non-standard component that consists of a # collection of DNA subcomponents. A mechanism dictionary is # maintained at the assembly level, but can be overriden at the # component level. Parameter dictionaries for DNA assembly are stored # in the individual elements and not at the assembly level, but the # `assemble_dna()` function can process assembly wide parameters. # # DNA elements that are part of an assembly have a data attribute # `assy` that points back to the assembly that the element is part of. # This attribute is initialized by the `DNAassembly.update_species()` # function (before calling the individual update functions for the DNA # elements). Note that this means that the `assy` attribute is not # available in the element initializer (since we don't yet know what # assembly we will be part of). # class DNAassembly(Component): """DNA assembly class The DNA assembly class is used to represent a collection of DNA subcomponents, typically consisting of a promoter, a ribosome binding site (5' UTR), a protein coding sequence (CDS), an optional C-terminus tag (for protein degradation), and a terminator (3' UTR). Subclasses can be used to create specialized types of DNA and predefined subclasses are available for promoters, RBSs, etc. The DNA assembly follows the rules of a Component but it is more complex because each of the elements of the assembly (subcomponents) have their own functions. As a consequence, most of what the assembly construct does is to keep track of the individual subcomponents and calls on those subcomponent to generate species and reactions. Data attributes --------------- name Name of the sequence (str) promoter Promoter sequence (DNA) utr5 5' UTR (DNA) cds Coding sequence (DNA) ctag C-terminus tag (DNA) utr3 3' UTR (DNA) dnalength Length of the entire sequence (int) rnalength Length of the transcribed components (int) peplength Lenth of the translated components (int) rnaname Name of the RNA species (str) [not implemented] rnap RNAP species (SMBLspecies) [not implemented] riboname Name of the ribosome species (str) [not implemented] ribo Ribosome species [not implemented] default_mechanisms default mechanisms for generating models custom_mechanisms customized mechanisms for generating models parameters Parameter values for the assembly (overrides elements) Methods ------- update_species() create/update species associated with construct update_reactions() create/update reactions associated with construct """ def __init__( self, name, promoter=None, utr5=None, cds=None, ctag=None, utr3=None, mechanisms={}, # custom mechanisms config_file=None, parameters={}, # parameter configuration **keywords # parameter keywords ): self.name = name self.promoter = promoter self.utr5 = utr5 self.cds = cds self.ctag = ctag self.utr3 = utr3 # Keep track of the length of DNA, RNA, and protein (peptide) self.dnalength = 0 self.rnalength = 0 self.peplength = 0 # Set up the default mechanisms for a DNA assembly # Note: transcription, translation, degradation are given by extract self.default_mechanisms = { 'maturation' : maturation.protein_basic() } self.custom_mechanisms = mechanisms # Create the config_file name (optional) if config_file == None and isinstance(name, str): config_file = self.name.lower() + ".csv" self.config_file = config_file # Set the assembly parameter values (processed by assemble_dna()) self.parameters = get_parameters( config_file, parameters, None, **keywords) # Create/update all of the species associated with this DNA assembly def update_species(self, mixture, conc, debug=False): # Create the DNA species self.dna = add_species(mixture, "DNA", self.name, conc) # Let the individual DNA elements create the additional species for dna in [self.promoter, self.utr5, self.cds, self.ctag, self.utr3]: if dna != None: # Store the DNA assembly that generated this component dna.assy = self # Update the species required for this component if debug: print("DNAassembly species update:", dna.name) dna.update_species(mixture, conc) # Create/update all of the relevant reactions for this DNA assembly def update_reactions(self, mixture, debug=False): # Go through each subcomponent and update reactions for dna in [self.promoter, self.utr5, self.cds, self.ctag, self.utr3]: if dna != None: dna.update_reactions(mixture) # # DNA component # # DNA elements will generally using the `DNA.__init__()` function to # initialize the object. To accommodate default parameter # dictionaries for tthe subclasses, an additional argument # (`default_parameters`) is available. # class DNA(Component): """DNA class The DNA class is used to represent a DNA sequence that has a given length. Its main purpose is as the parent object for DNA fragments and DNA assemblies. Note: for initialization of members of this class, the arguments should be as follows: DNA(name, length, [mechanisms], [config_file], [prefix]) DNAtype(name, required_arguments, [length], [mechanisms], [config_file], [prefix], [optional_arguments]) DNAelement(name, required_arguments, [length], [mechanisms], [config_file], [optional_arguments]) DNAtypes - DNAelements: Promoter - ConstitutePromoter, RepressedPromoter UTR5 - ConstituteRBS CDS - ProteinCDS Ctag - DegrationTAg UTR3 - Terminator Data attributes --------------- name Name of the sequence (str) length Length of the sequence (int) assy DNA assembly that we are part of mechanisms Local mechanisms for this component (overrides defaults) parameters Parameter dictionary for the DNA element """ def __init__( self, name, length=0, # positional arguments mechanisms={}, # custom mechanisms config_file=None, parameters={}, # customized parameters default_parameters = {}, # element parameters prefix="dna_", **keywords ): self.name = name self.length = length self.mechanisms = mechanisms self.prefix = prefix # Create the config_file name (optional) if config_file == None and isinstance(name, str): config_file = prefix + self.name.lower() + ".csv" self.config_file = config_file # Load and store the parameters for this component self.parameters = get_parameters( config_file, parameters, default_parameters, **keywords) # Set up default update functions to do nothing def update_species(self, mixture, conc): return None def update_reactions(self, mixture): return None # # Promoter subclasses # # The promoter subclasses are used to create standard promoters # (constitutive, repressed, activated). When creating an instance of # one of these subclasses, the name of the transcriptional regulator # (if any) is passed as an argument and the appropriate reactions are # instantiated. # # Promoter sequence class Promoter(DNA): "Promoter class - define a promoter sequence" # Default parameters used to describe a promoter default_parameters = { 'RNAPbound_F' : 20, # Default for ptet 'RNAPbound_R' : 400 # Default for ptet } def __init__( self, name, length=50, mechanisms={}, config_file=None, parameters={}, default_parameters = default_parameters, rnapname="RNAP", prefix="prom_", **keywords ): # Promoter initialization (including mechanisms and parameters) DNA.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = default_parameters, prefix=prefix, **keywords) # Set (or reset) values based on function arguments self.rnapname = rnapname # Fill in any missing parameter values with defaults update_missing(self.parameters, Promoter.default_parameters) def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create the mRNA species assy.rnaname = assy.utr5.name + "--" + assy.cds.name if (assy.ctag != None): assy.rnaname += "--" + assy.ctag.name assy.rna = add_species(mixture, "RNA", assy.rnaname, 0) # Create RNA polymerase bound to DNA assy.rnap_bound = add_species(mixture, "Complex", self.rnapname + ":" + assy.name, 0) # Create any other species needed by the transcriptional machinery mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['transcription'].update_species(mixture, assy, conc) # Default action of a promoter is to implement transcription def update_reactions(self, mixture, debug=False): model = mixture.model # Get the model where we will store results assy = self.assy # Get the DNA assembly we are part of # Create the reactions required for transcription mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['transcription'].update_reactions(mixture, assy) # Constitute promoter class ConstitutivePromoter(Promoter): "ConstitutivePromoter - define a constitutive promoter" # Repressed promoter class RepressedPromoter(Promoter): #! TODO: add docstring # Default parameters used to describe a repressed promoter default_parameters = { 'RNAPbound_F' : 20, # Default for ptet 'RNAPbound_R' : 400, # Default for ptet 'DNA_Sequestration_F' : 2.5e-1, # Default for ptet 'DNA_Sequestration_R' : 1.11e-4, # Default for ptet } def __init__( self, name, repressor, length=50, mechanisms={}, config_file=None, parameters={}, rnapname="RNAP", dimer=False, **keywords ): # Promoter initialization (including mechanisms and parameters) Promoter.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = RepressedPromoter.default_parameters, rnapname=rnapname, **keywords) # Store additional information related to repression self.tfname = "Protein " + repressor if dimer: self.tfname += " dimer" self.dimer = dimer def update_species(self, mixture, conc): assy = self.assy # Get the DNA assembly we are part of # Create species for unrepressed promoter Promoter.update_species(self, mixture, conc) # Create repressor bound to DNA self.tf_bound = add_species(mixture, "Complex", self.tfname + ":" + assy.name, 0) # mechanisms = get_mechanisms(mixture, assy, self.mechanisms) # mechanisms['process'].update_species(mixture, assy, conc) def update_reactions(self, mixture, debug=False): model = mixture.model # Get the model where we will store results assy = self.assy # Get the DNA assembly we are part of params = self.parameters # Get the parameter dictionary # Create the reactions for the unbound promoter Promoter.update_reactions(self, mixture) # Create the reaction for the transcription factor binding to DNA tf_species = find_species(mixture, self.tfname) if tf_species == None: raise NameError("RepressedPromoter: %s not found" % self.tfname) add_reaction(mixture, [tf_species, assy.dna], [self.tf_bound], kf = params['DNA_Sequestration_F'], kr = params['DNA_Sequestration_R'], prefix = "repr_") # mechanisms = get_mechanisms(mixture, assy, self.mechanisms) # mechanisms['process'].update_reactions(mixture, assy) # # UTR5 subclasses # # The UTR5 subclasses are used to create ribosome binding sites (RBSs). class UTR5(DNA): "UTR5 class - define 5' untranslated region sequence" # Default parameters used to describe a UTR5 (empty) default_parameters = {} def __init__( self, name, length=20, mechanisms={}, config_file=None, parameters={}, default_parameters = default_parameters, prefix="utr5_", **keywords ): DNA.__init__( self, name, length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = default_parameters, prefix=prefix, **keywords) # Constitutive RBS class ConstitutiveRBS(UTR5): #! TODO: add docstring # Default parameters used to describe a constitutive RBS (TODO) default_parameters = { 'Ribosome_Binding_F' : 0.1, # TODO: add source information 'Ribosome_Binding_R' : 4, # TODO: add source information } def __init__( self, name, length=20, mechanisms={}, config_file=None, parameters={}, riboname = 'Ribo', # Ribosome species name **keywords # Additional keywords ): UTR5.__init__( self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = ConstitutiveRBS.default_parameters, **keywords) self.riboname = riboname def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create the protein assy.protname = assy.cds.name if (assy.ctag != None): assy.protname += "--" + assy.ctag.name assy.protein = add_species(mixture, "Protein", assy.protname, 0) # Create Ribosome bound to RNA assy.ribo_bound = add_species(mixture, "Complex", self.riboname + ":" + assy.rnaname, 0) # Create any other species needed by the transcriptional machinery mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['translation'].update_species(mixture, assy, conc) # Default action of a promoter is to implement transcription def update_reactions(self, mixture, debug=False): assy = self.assy # Get the DNA assembly we are part of mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['translation'].update_reactions(mixture, assy) # # CDS subclasses # # The CDS subclasses are used to create proteins and peptides # #! Sort out whether we need anything more than CDS class CDS(DNA): "CDS class - define protein coding sequence" # Default parameters used to describe a repressed promoter default_parameter_values = { 'Dimerization_F' : 1, # Default for TetR 'Dimerization_R' : 1, # Default for TetR 'Protein_Maturation' : log(2)/(5*60) # 5 minutes (GFP) } def __init__( self, name, length=1000, mechanisms={}, config_file=None, parameters={}, dimerize = False, maturation_time=None, **keywords ): # DNA initialization DNA.__init__( self, name, length=length,mechanisms=mechanisms, config_file=config_file, parameters=parameters, default_parameters = CDS.default_parameter_values, prefix="cds_", **keywords) self.dimerize = dimerize self.maturation_time = maturation_time def update_species(self, mixture, conc, parameters={}): assy = self.assy # Get the DNA assembly we are part of # Create species for the protein self.protein = add_species(mixture, "Protein", self.name, 0) if self.dimerize: #! Move to mechanism function? self.dimer = add_species(mixture, "Protein", self.name + " dimer", 0) mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['maturation'].update_species(mixture, assy, conc) # Default action of a protein is to mature and (optionally) dimerize def update_reactions(self, mixture, debug=False): assy = self.assy # Get DNA assembly we are part of parameters = assy.cds.parameters # get parameter values if self.dimerize: #! Move to mechanism function? add_reaction(mixture, [self.protein, self.protein], [self.dimer], kf = parameters['Dimerization_F'], kr = parameters['Dimerization_R'], prefix="cds_") # Allow override of protein maturation time if self.maturation_time != None: parameters['Protein_Maturation'] = log(2)/(self.maturation_time) # Let the individual mechanisms create all of the reactions mechanisms = get_mechanisms(mixture, assy, self.mechanisms) mechanisms['maturation'].update_reactions(mixture, assy) # Protein coding sequence (same as a CDS) class ProteinCDS(CDS): "Protein coding sequence" # # Ctag subclasses # # The Ctag subclasses are used to C-terminus tags class Ctag(DNA): #! TODO: add docstring "Ctag class - define C-terminus protein tag" def __init__(self, name, length=0, mechanisms={}, config_file=None, parameters={}, **keywords): # DNA initialization DNA.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, prefix="ctag_", **keywords) # Degradation tag class DegradationTag(Ctag): #! TODO: add docstring def __init__(self, name, protease="ClpXP", length=9, mechanisms={}, config_file=None, parameters={}, **keywords): Ctag.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, **keywords) self.protease = protease # # UTR3 subclasses # # The UTR3 subclasses are used to create terminators. class UTR3(DNA): "UTR3 class - define 3' untranslated region sequence" def __init__(self, name, length=0, mechanisms={}, config_file=None, parameters={}, **keywords): # DNA initialization DNA.__init__(self, name, length=length, mechanisms=mechanisms, config_file=config_file, parameters=parameters, prefix="utr3_", **keywords) # Terminator class Terminator(UTR3): #! TODO: add docstring def __init__(self, name, length=50, mechanisms={}, config_file=None): UTR3.__init__(self, name, length, mechanisms, config_file, prefix="term_") # # Functions for creatng and manipulating DNA # # Assemble fragments of DNA into a gene def assemble_dna( prom, utr5, cds, # required arguments ctag=None, utr3=None, # optional positional arguments mechanisms = {}, # custom mechanisms config_file = None, # parameter configuration information parameters = {}, # (overrides element defaults) assy_name = None, # component-specific arguments **keywords # parameter keywords (passed to elements) ): # Create a new sequence of DNA assy = DNAassembly( assy_name, mechanisms=mechanisms, config_file=config_file, parameters=parameters, **keywords) # Initialize the name string if nothing was given if assy_name == None: assy.name = "" # Parse and store the promoter sequence if isinstance(prom, str): name, length = parse_DNA_string(prom) # Get component name prom = load_model("prom", name, length) # Load from library if isinstance(prom, Promoter): assy.promoter = prom update_existing(prom.parameters, assy.parameters) assy.dnalength += prom.length if assy_name == None: assy.name += prom.name else: ValueError("invalid promoter specification") # Parse and store the 5' UTR if isinstance(utr5, str): name, length = parse_DNA_string(utr5) # Get component name utr5 = load_model("UTR5", name, length) # Load from library if isinstance(utr5, UTR5): assy.utr5 = utr5 update_existing(utr5.parameters, assy.parameters) assy.dnalength += utr5.length assy.rnalength += utr5.length if assy_name == None: assy.name += "--" + utr5.name else: ValueError("invalid UTR5 specification") # Parse and store the protein coding sequence if isinstance(cds, str): name, length = parse_DNA_string(cds) # Get component name cds = load_model("CDS", name, length) # Load from library if isinstance(cds, CDS): assy.cds = cds update_existing(cds.parameters, assy.parameters) assy.dnalength += cds.length assy.rnalength += cds.length assy.peplength += cds.length if assy_name == None: assy.name += "--" + cds.name else: ValueError("invalid CDS specification") # Parse and store the C-terminus tag if isinstance(ctag, str): name, length = parse_DNA_string(ctag) # Get component name ctag = load_model("ctag", name, length) # Load from library if isinstance(ctag, Ctag): assy.ctag = ctag update_existing(ctag.parameters, assy.parameters) assy.dnalength += ctag.length assy.rnalength += ctag.length assy.peplength += ctag.length if assy_name == None: assy.name += "--" + ctag.name else: ValueError("invalid Ctag specification") # Parse and store the 3' UTR if isinstance(utr3, str): name, length = parse_DNA_string(utr3) # Get component name utr3 = load_model("UTR3", utr3, length) # Load from library if isinstance(utr3, UTR3): assy.utr3 = utr3 update_existing(utr3.parameters, assy.parameters) assy.dnalength += utr3.length assy.rnalength += utr3.length if assy_name == None: assy.name += "--" + utr3.name else: ValueError("invalid UTR3 specification") return assy # Parse a DNA string (from the old MATLAB TX-TL modeling library) def parse_DNA_string(spec): # First check to see if we have a name(length) specification m = re.search("^(\w+)\((\d+)\)$", spec) if m == None: # If not, see if we just find a name m = re.search("^(\w+)$", spec) if m != None: name = m.group(1) length = None else: name = m.group(1) length = int(m.group(2)) # If we didn't get anything, generate an error if m == None: ValueError("Can't parse spec" + spec) # Return name and length as a tuple return name, length
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from services import svcProject from utils.exceptionHandle import DefaultError def list_project(): """ GET /api/projects :return: """ try: return { 'title': 'Succeed to List Project', 'detail': svcProject.list_project() }, 200 except Exception as e: raise DefaultError(title='Failed to List Project', detail=str(e)) def get_project(project_id): """ GET /api/project/{project_id} :param project_id: :return: """ try: return { 'title': 'Succeed to Get Project', 'detail': svcProject.get_project(project_id) }, 200 except Exception as e: raise DefaultError(title='Failed to Get Project', detail=str(e)) def add_project(body): """ POST /api/projects :param body: :return: """ try: return { 'title': 'Succeed to Create Project', 'detail': svcProject.add_project(body) }, 200 except Exception as e: raise DefaultError(title='Failed to Create Project', detail=str(e)) def update_project(project_id, body): """ PUT /api/project/{project_id} :param project_id: :param body: :return: """ try: return { 'title': 'Succeed to Update Project', 'detail': svcProject.update_project(project_id, body) }, 200 except Exception as e: raise DefaultError(title='Failed to Update Project', detail=str(e)) def update_project_status(project_id, body): """ PUT /api/project/{project_id}/status :param project_id: :param body: :return: """ try: return { 'title': 'Succeed to change Project Status', 'detail': svcProject.set_project_status(project_id, body) }, 200 except Exception as e: raise DefaultError(title='Failed to change Project Status', detail=str(e)) def delete_project(project_id): """ DELETE /api/project/{project_id} :param project_id: :return: """ try: svcProject.set_project_status(project_id, {'status': 'delete'}) return { 'title': 'Delete Project Succeed', }, 204 except Exception as e: raise DefaultError(title='Delete Project Failed', detail=str(e)) if __name__ == '__main__': print('This is API for project')
python
import pytz from cogs.Permissions import dm_commands, moderator_perms from GompeiFunctions import load_json, save_json from dateutil.parser import parse from discord.ext import commands from datetime import datetime from config import Config import asyncio import discord import os class Voting(commands.Cog): """ Create votes and let users vote on them. Currently only has support for handling one voting poll in a server """ def __init__(self, bot): self.bot = bot self.settings = load_json(os.path.join("config", "settings.json")) self.votes = None self.vote_open = False self.poll_message = None @commands.Cog.listener() async def on_ready(self): await self.load_voting() async def load_voting(self): self.votes = load_json(os.path.join("config", "votes.json")) # If the poll hasn't been created, nothing to load if self.votes["close"] is None: return else: closes = parse(self.votes["close"]) # If the poll has been closed if datetime.now() > closes: return else: self.vote_open = True await self.load_poll_message() await self.poll_timer(closes) async def load_poll_message(self): guild = self.bot.get_guild(self.settings["main_guild"]) print(guild) channel = guild.get_channel(self.votes["channel_id"]) print(channel) self.poll_message = await channel.fetch_message(self.votes["message_id"]) print(self.poll_message) async def update_poll_message(self): self.votes["votes"] = sorted(self.votes["votes"], key=lambda i: len(i["voters"]), reverse=True) last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) embed.description = leaderboard await self.poll_message.edit(embed=embed) async def poll_timer(self, close_date): self.vote_open = True await asyncio.sleep((close_date - discord.utils.utcnow()).total_seconds()) await self.close_poll(None) @commands.command(pass_context=True, aliases=["closePoll"]) @commands.check(moderator_perms) async def close_poll(self, ctx): """ Closes the poll Usage: .closePoll :param ctx: context object """ last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str( len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) if len(self.votes["votes"]) > 0: embed.description = ":star: " + self.votes["votes"][0]["name"] + " :star:\n" + leaderboard else: embed.description = ":star: Nothing! :star:\n" + leaderboard await self.poll_message.edit(embed=embed) self.vote_open = False self.votes["close"] = None self.votes["title"] = None self.votes["channel_id"] = None self.votes["message_id"] = None self.votes["votes"] = None save_json(os.path.join("config", "votes.json"), self.votes) if ctx is not None: await ctx.send("Closed poll") await self.poll_message.edit() @commands.command(pass_context=True, aliases=['createOpenVote']) @commands.check(moderator_perms) @commands.guild_only() async def create_open_vote(self, ctx, channel: discord.TextChannel, title, close_timestamp, *, message): """ Creates an open poll that users can add options to vote for Usage: .createOpenVote <channel> <title> <closeTime> <message> :param ctx: context object :param channel: channel for the poll :param title: embed title for the poll :param close_timestamp: closing time for the poll :param message: message to accompany the poll """ if str(ctx.guild.id) in self.votes: await ctx.send("A vote is already running for this server") else: closes = parse(close_timestamp) if closes is None: await ctx.send("Not a valid close time") closes = closes.astimezone(pytz.utc) if (closes - discord.utils.utcnow()).total_seconds() < 0: await ctx.send("Close time cannot be before current time") else: modifier = 4 for char in ctx.message.content[:ctx.message.content.find(close_timestamp)]: if char == "\"": modifier += 1 embed = discord.Embed(title=title, color=0x43b581) self.poll_message = await channel.send(message + "```.addOption <option> - Create an option to vote " "for and cast your vote for it\n.vote <option> - " "Cast a vote for an option in the poll\n.removeVote " "<option> - Removes a vote you casted for an " "option\n.sendPoll - sends the poll embed (does not " "update live)```", embed=embed) self.votes = { "type": "open", "close": close_timestamp, "title": title, "channel_id": channel.id, "message_id": self.poll_message.id, "votes": [] } save_json(os.path.join("config", "votes.json"), self.votes) # Create open thread voting_thread = await self.poll_message.create_thread( name=title + " Voting", auto_archive_duration=10080, ) Config.add_command_channel(voting_thread) await self.poll_timer(closes) await voting_thread.edit(archived=True) Config.remove_command_channel(voting_thread) @commands.command(pass_context=True, aliases=['createDecisionVote']) @commands.check(moderator_perms) @commands.guild_only() async def create_decision_vote(self, ctx, channel: discord.TextChannel, title, close_timestamp, *, message): if str(ctx.guild.id) in self.votes: await ctx.send("A vote is already running for this server") else: closes = parse(close_timestamp) if closes is None: await ctx.send("Not a valid close time") elif (closes - datetime.now()).total_seconds() < 0: await ctx.send("Close time cannot be before current time") else: modifier = 4 for char in ctx.message.content[:ctx.message.content.find(close_timestamp)]: if char == "\"": modifier += 1 def check_author(msg): return msg.author.id == ctx.author.id self.votes = { "type": "decision", "close": close_timestamp, "title": title, "channel_id": channel.id, "message_id": None, "votes": [] } await ctx.send("What options would you like to add to this decision poll? (Put each option on a new " "line)") response = await self.bot.wait_for('message', check=check_author) options = response.content.splitlines() for option in options: self.votes["votes"].append({"name": option, "creator": None, "voters": []}) embed = discord.Embed(title=title, color=0x43b581) if len(self.votes["votes"]) == 0: await ctx.send("You need at least one option in your poll") return self.poll_message = await channel.send( message + "```.vote <option> - Cast a vote for an option in the poll" "\n.removeVote <option> - Removes a vote you casted for an option" "\n.sendPoll - sends the poll embed (does not update live)```", embed=embed ) self.votes["message_id"] = self.poll_message.id await self.update_poll_message() save_json(os.path.join("config", "votes.json"), self.votes) await self.poll_timer(closes) @commands.command(pass_context=True, aliases=["addOption"]) @commands.check(dm_commands) async def add_option(self, ctx): """ Adds an option to the poll Usage: .addOption <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return if not self.votes["type"] == "open": await ctx.send("Cannot add options to this type of poll") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] if len(user_option) > 88: await ctx.send("This option is too long") return if not user_option.isalnum(): if "-" in user_option: modified_string = user_option.replace("-", "") if not modified_string.isalnum(): await ctx.send("Channel names have to be alphanumeric") return if not all(c.isdigit() or c.islower() or c == "-" for c in user_option): await ctx.send("Channel names must be lowercase") return elif " " in user_option or "\n" in user_option: await ctx.send("Channel names cannot contain spaces (try using a \"-\" instead)") return else: # Check if the user has an option already or if the option already exists for option in self.votes["votes"]: if option["creator"] == ctx.author.id: await ctx.send("You already added an option to this poll") return if user_option == option["name"]: await ctx.send("This option already exists") return self.votes["votes"].append({"name": user_option, "creator": ctx.author.id, "voters": [ctx.author.id]}) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully added your option") @commands.command(pass_context=True) @commands.check(dm_commands) async def vote(self, ctx): """ Votes for an option in the poll Usage: .vote <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] if self.votes["type"] == "open": for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id in option["voters"]: await ctx.send("You already voted for this option") return option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) return elif self.votes["type"] == "decision": print("got here") for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id in option["voters"]: await ctx.send("You already voted for this option") return else: for other_option in self.votes["votes"]: if user_option != other_option["name"]: if ctx.author.id in other_option["voters"]: def check_author(message): return message.author.id == ctx.author.id await ctx.send( "You already voted for an option (" + other_option["name"] + "). Would you like to switch your vote to " + option["name"] + "? (Y/N)" ) response = await self.bot.wait_for('message', check=check_author) if response.content.lower() == "y" or response.content.lower() == "yes": other_option["voters"].remove(ctx.author.id) option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) else: await ctx.send("Kept your vote for " + other_option["name"]) return option["voters"].append(ctx.author.id) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully voted for " + user_option) return if self.votes["type"] == "open": await ctx.send( "This option doesn't exist. If you'd like to add it do it with `" + self.settings["prefix"] + "addOption <option>`" ) else: await ctx.send("This option doesn't exist.") @commands.command(pass_context=True, aliases=["removeVote"]) @commands.check(dm_commands) async def remove_vote(self, ctx): """ Removes your vote for an option in the poll Usage: .removeVote <option> :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] count = 0 for option in self.votes["votes"]: if user_option == option["name"]: if ctx.author.id not in option["voters"]: await ctx.send("You haven't voted for this option") return option["voters"].remove(ctx.author.id) if len(option["voters"]) == 0 and self.votes["type"] == "open": self.votes["votes"].pop(count) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully removed vote for " + user_option) return count += 1 await ctx.send("This option doesn't exist") @commands.command(pass_context=True, aliases=["removeOption"]) @commands.check(moderator_perms) async def remove_option(self, ctx): """ Removes an option from the poll entirely Usage: .removeOption <option> :param ctx: context object """ user_option = ctx.message.content[ctx.message.content.find(" ") + 1:] count = 0 for option in self.votes["votes"]: if user_option == option["name"]: self.votes["votes"].pop(count) save_json(os.path.join("config", "votes.json"), self.votes) await self.update_poll_message() await ctx.send("Successfully removed option " + user_option) return count += 1 @commands.command(pass_context=True, aliases=["sendPoll"]) @commands.check(dm_commands) async def send_poll(self, ctx): """ Sends the poll Usage: .sendPoll :param ctx: context object """ if not self.vote_open: await ctx.send("There is no poll currently open") return last_votes = 0 last_count = 1 count = 1 leaderboard = "" for option in self.votes["votes"]: if len(option["voters"]) == last_votes: leaderboard += "**" + str(last_count) + ". **" + option["name"] + " - " + str( len(option["voters"])) + "\n" count += 1 else: leaderboard += "**" + str(count) + ". **" + option["name"] + " - " + str(len(option["voters"])) + "\n" last_votes = len(option["voters"]) last_count = count count += 1 embed = discord.Embed(title=self.votes["title"], color=0x43b581) embed.description = leaderboard await ctx.send("This poll does not update live", embed=embed) def setup(bot): bot.add_cog(Voting(bot))
python
from typing import List, Type import warnings import numpy as np import matplotlib.patches as mpatches import matplotlib.pyplot as plt from astropy import wcs from astropy.coordinates import SkyCoord from astropy.io import fits from astropy.modeling import models from astropy.utils.exceptions import AstropyWarning from astropy.visualization import LogStretch from matplotlib.offsetbox import AnchoredText from .result import Result __all__ = ["make_figure"] def _normalise(image: np.ndarray): '''Function normalises an array s.t it is over a range[0., 1.] Parameters ---------- image : np.ndarray Image to be normalised. Returns ------- Normalised image: np.ndarray. ''' m, M = np.min(image), np.max(image) return (image - m) / (M - m) def _supressAxs(ax): '''Function that removes all labels and ticks from a figure Parameters ---------- ax: matplotlib axis object Returns ------- ax : matplotlib axis object Now with no ticks or labels ''' ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) ax.spines["left"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) return ax def RADECtopixel(objList: List[List[float]], header) -> List[List[float]]: '''Function to convert RA DEC in objList to pixel coordinates using wcs in header of image Parameters ---------- objList : List[List[float]] List of list of RA, DEC, object type and psfMag_r header : Returns ------- occludingStars : List[List[float]] List of RA, DEC in pixel coordinates. ''' occludingStars = [] with warnings.catch_warnings(): # ignore invalid card warnings warnings.simplefilter('ignore', category=AstropyWarning) w = wcs.WCS(header) RAS = [item[0] for item in objList] DECS = [item[1] for item in objList] for ra, dec in zip(RAS, DECS): skyCoordPos = SkyCoord(ra, dec, unit="deg") x, y = wcs.utils.skycoord_to_pixel(skyCoordPos, wcs=w) occludingStars.append([x, y]) return occludingStars def make_oneone(ax, img, result): '''Function plots the cleaned image Parameters ---------- ax : matplotlip axis object img : np.ndarray image data to be plotted results : Result dataclass dataclass of calculated results for object Returns ------- ''' log_stretch = LogStretch(10000.) ax.imshow(log_stretch(_normalise(img)), origin="lower", aspect="auto") ax.scatter(result.apix[0], result.apix[1], label="Asym. centre") ax.set_xlim([-0.5, img.shape[0]+0.5]) ax.set_title("Cleaned Image") text = f"Sky={result.sky:.2f}\n" fr"Sky $\sigma$={result.sky_err:.2f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) def make_onetwo(ax, mask, result): '''Function plots the object map Parameters ---------- ax : matplotlib axis object mask : np.ndarray object mask data to be plotted results : Result dataclass dataclass of calculated results for object Returns ------- ''' ax.imshow(mask, origin="lower", aspect="auto", cmap="gray") ax.scatter(result.apix[0], result.apix[1], label="Asym. centre") ax.set_xlim([-0.5, mask.shape[0]+0.5]) ax.set_ylim([-0.5, mask.shape[1]+0.5]) ax.set_title("Object mask") text = f"A={result.A[0]:.3f}\nA_bgr={result.A[1]:.3f}\n" rf"$A_s$={result.As[0]:.3f}" text += "\n" fr"$A_s90$={result.As90[0]:.3f}" if len(result.objList) > 0: text += f"\nmaskedFraction={result.maskedPixelFraction*100.:.1f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) text = f"C={result.C:.3f}\nS={result.S:.3f}\n" rf"Gini={result.gini:.3f}" text += "\n" fr"m20={result.m20:.3f}" textbox = AnchoredText(text, frameon=True, loc=4, pad=0.5) ax.add_artist(textbox) circle = mpatches.Circle(((mask.shape[0]/2)+1, (mask.shape[1]/2)+1), result.rmax, fill=False, label="Rmax", color="white") ax.add_patch(circle) def make_twoone(ax, shape, result): '''Function plots the Sersic fit Parameters ---------- ax : matplotlib axis object axis instance to plot to shape : Tuple[int] Shape of image results : Result dataclass dataclass of calculated results for object Returns ------- modelimage : np.ndarray fitted model Sersic image ''' log_stretch = LogStretch(10000.) ny, nx = shape y, x = np.mgrid[0:ny, 0:nx] modelimage = models.Sersic2D.evaluate(x, y, result.sersic_amplitude, result.sersic_r_eff, result.sersic_n, result.sersic_x_0, result.sersic_y_0, result.sersic_ellip, result.sersic_theta) modelimage += np.random.normal(result.sky, result.sky_err, size=shape) ax.imshow(log_stretch(_normalise(modelimage)), origin="lower", aspect="auto") ax.scatter(result.sersic_x_0, result.sersic_y_0, label="Sersic centre") ax.set_title("Sersic fit") text = f"Ellip.={result.sersic_ellip:.3f}\n" text += f"n={result.sersic_n:.3f}\n r_eff={result.sersic_r_eff:.3f}\n" text += f"Amplitude={result.sersic_amplitude:.3f}" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) a = result.sersic_r_eff b = a * np.abs(1. - result.sersic_ellip) x0 = result.sersic_x_0 y0 = result.sersic_y_0 theta = result.sersic_theta * 180./np.pi ellipse = mpatches.Ellipse(xy=(x0, y0), width=a, height=b, angle=theta, fill=False, label="Sersic half light", color="red") ax.add_patch(ellipse) return modelimage def make_twotwo(ax, img, modelImage, listofStarstoPlot, result): ''' function plots sersic fit residual Parameters ---------- ax : matplotlip axis object axis instance to plot to img : np.ndarray image data to be plotted modelImage : np.ndarray model sersic image listofStarstoPlot : List[List[float]] list of stars to that occlude the main object. [RA, DEC, name, psfMag_r] results : Result dataclass dataclasss of calculated results for object Returns ------- ''' if len(listofStarstoPlot) > 0: imageMask = np.where(result.starMask == 1, img, np.rot90(img)) residual = (imageMask - modelImage) ax.imshow(residual, origin="lower", aspect="auto") else: residual = (img - modelImage) ax.imshow(residual, origin="lower", aspect="auto") text = f"Range={np.amin(residual):.3e} => {np.amax(residual):.3e}\n" textbox = AnchoredText(text, frameon=True, loc=3, pad=0.5) ax.add_artist(textbox) ax.set_title("Sersic fit residual") def make_figure(result: Type[Result], folder: bool, save=False, show=False) -> None: '''Function plots results from image analysis. Plots two or four images. Top row: original image and object map with stars overplotted if any. bottom row: Sersic fit and residual with stars overplotted if any. Parameters ---------- result : Type[Result] Data class container of calculated results. Must have clean image and pixelmap in order to run this function. folder : bool If True then adjusts path to read file from. save : bool, optional If true function saves generated figure. show: bool, optional If true open interactive matplotlib plot. Returns ------- None ''' with warnings.catch_warnings(): # ignore invalid card warnings warnings.simplefilter('ignore', category=AstropyWarning) try: img, header = fits.getdata(result.cleanImage, header=True) except ValueError: if folder: img, header = fits.getdata(result.outfolder.parent / ("data/" + result.file), header=True) else: img, header = fits.getdata(result.outfolder.parent / (result.file), header=True) try: mask = fits.getdata(result.pixelMapFile) except ValueError: mask = fits.getdata(result.outfolder / ("pixelmap_" + result.file)) if result.sersic_r_eff != -99 and result.sky != -99: fig, axs = plt.subplots(2, 2) axs = axs.ravel() make_oneone(axs[0], img, result) make_onetwo(axs[1], mask, result) modelImage = make_twoone(axs[2], img.shape, result) make_twotwo(axs[3], img, modelImage, result.objList, result) else: fig, axs = plt.subplots(1, 2) make_oneone(axs[0], img, result) axs[0].set_ylim([-0.5, img.shape[1]+0.5]) make_onetwo(axs[1], mask, result) axs[1].set_ylim([-0.5, mask.shape[1]+0.5]) fig.set_figheight(11.25) fig.set_figwidth(20) if len(result.objList) > 0: occludingStars = RADECtopixel(result.objList, header) for i, ax in enumerate(axs): ax = _supressAxs(ax) if(len(result.objList) > 0): if i != 2: ax.scatter(*zip(*occludingStars), label="STAR", color="orange") if i != 3: ax.legend(loc=2) plt.subplots_adjust(top=0.975, bottom=0.005, left=0.003, right=0.997, hspace=0.050, wspace=0.006) if save: plt.savefig("results/result_" + result.file[11:-11] + ".png", dpi=96) if show: plt.show() plt.close()
python
import socket, time from kubism.util.dkr import PyApp_Image import docker import kubism.util.dkr as dkr SERVER = '172.24.12.161' CLIENT = '172.24.12.160' echo_port = 8080 # Echo Test # Create Echo Server print('Building and pushing images...') echo_srv = PyApp_Image('./examples/py/echo_server.py', parent_image = 'arm32v6/python:3-alpine', repo='echo', tag='server-v6', automate=True) echo_srv.docker = docker.DockerClient(base_url=f'ssh://pi@{SERVER}') echo_cli = PyApp_Image('./examples/py/echo_client.py', parent_image = 'arm32v7/python:3-buster', repo='echo', tag='client-v7', automate=True) echo_cli.docker = docker.DockerClient(base_url=f'ssh://pi@{CLIENT}') print(f'Run Server on server {SERVER} ...') echo_srv.run(ports={f'{echo_port}/tcp':echo_port}) print('Waiting 3 seconds...') time.sleep(3) print(f'Run Client on client {CLIENT} ...') print('Calling Server...') echo_cli.run(ports={f'{echo_port}/tcp':echo_port}) #echo_srv.stop() # Not necessary #echo_cli.stop() print('DONE!')
python
from flask_security_bundle import FlaskSecurityBundle class SecurityBundle(FlaskSecurityBundle): pass
python
from .parse_html_index import parse_html_index from .parse_html_raceindex import parse_html_raceindex from .parse_html_racelist import parse_html_racelist from .parse_html_oddstf import parse_html_oddstf from .parse_html_oddsk import parse_html_oddsk from .parse_html_odds2tf import parse_html_odds2tf from .parse_html_odds3t import parse_html_odds3t from .parse_html_odds3f import parse_html_odds3f from .parse_html_beforeinfo import parse_html_beforeinfo from .parse_html_raceresult import parse_html_raceresult __all__ = [ 'parse_html_index', 'parse_html_raceindex', 'parse_html_racelist', 'parse_html_oddstf', 'parse_html_oddsk', 'parse_html_odds2tf', 'parse_html_odds3t', 'parse_html_odds3f', 'parse_html_beforeinfo', 'parse_html_raceresult', ]
python
from .common import * # NOQA import pytest HUAWEI_CCE_ACCESS_KEY = os.environ.get('RANCHER_HUAWEI_CCE_ACCESS_KEY', "") HUAWEI_CCE_SECRET_KEY = os.environ.get('RANCHER_HUAWEI_CCE_SECRET_KEY', "") HUAWEI_CCE_PROJECT = os.environ.get('RANCHER_HUAWEI_CCE_PROJECT', "") HUAWEI_CCE_AMI = os.environ.get('RANCHER_HUAWEI_CCE_AMI', "") huaweiccecredential = pytest.mark.skipif(not (HUAWEI_CCE_ACCESS_KEY and HUAWEI_CCE_SECRET_KEY and HUAWEI_CCE_PROJECT), reason='HUAWEI CCE Credentials not provided, ' 'cannot create cluster') @huaweiccecredential def test_create_huaei_cce_cluster(): client = get_admin_client() huawei_cceConfig = get_huawei_cce_config() print("Cluster creation") cluster = client.create_cluster(huawei_cceConfig) print(cluster) cluster = validate_cluster(client, cluster, check_intermediate_state=True, skipIngresscheck=True) print(cluster) cluster_cleanup(client, cluster) def get_huawei_cce_config(): name = random_test_name("tl-test-auto-huawei-cce") huawei_cceConfig = { "accessKey":HUAWEI_CCE_ACCESS_KEY, "apiServerElbId":"", "authentiactionMode":"rbac", "authenticatingProxyCa":None, "availableZone":"cn-north-1a", "billingMode":0, "bmsIsAutoRenew":"false", "bmsPeriodNum":1, "bmsPeriodType":"month", "clusterBillingMode":0, "clusterEipId":"", "clusterFlavor":"cce.s2.small", "clusterType":"VirtualMachine", "containerNetworkCidr":"10.0.0.0/16", "containerNetworkMode":"overlay_l2", "dataVolumeSize":100, "dataVolumeType":"SATA", "description":"", "displayName":"", "driverName":"huaweicontainercloudengine", "eipBandwidthSize":100, "eipChargeMode":"traffic", "eipCount":3, "eipShareType":"PER", "eipType":"5_bgp", "externalServerEnabled":False, "highwaySubnet":"", "masterVersion":"v1.15.6", "nodeCount":3, "nodeFlavor":"c3.large.2", "nodeOperationSystem":"CentOS 7.6", "password":"", "projectId":HUAWEI_CCE_PROJECT, "region":"cn-north-1", "rootVolumeSize":40, "rootVolumeType":"SATA", "secretKey":HUAWEI_CCE_SECRET_KEY, "sshKey":"tanglei", "subnetId":"c3a34386-5212-4484-be9c-1220807c4cfa", "userName":"root", "vipSubnetId":"09fb7641-3958-47d7-b5fb-dd92a19ef7ee", "vpcId":"d5842876-29a6-4751-87bd-7c4af4cf2f47", "type":"huaweiEngineConfig", "keypairs":"cn-north-1a", } if HUAWEI_CCE_AMI is not None: huawei_cceConfig.update({"ami": HUAWEI_CCE_AMI}) # Generate the config for CCE cluster huawei_cceConfig = { "huaweiEngineConfig": huawei_cceConfig, "name": name, "type": "cluster" } print("\nHUAWEI CCE Configuration") print(huawei_cceConfig) return huawei_cceConfig
python
from jewelry import Jewelry class Necklace(Jewelry): DEFAULT_METAL : str = "gold" DEFAULT_GEM : str = "diamond" def __init__(self, metal : str = DEFAULT_METAL, gem : str = DEFAULT_GEM): super(Necklace,self).__init__(polished = True) self._metal = metal self._gem = gem @property def metal(self) -> str: return self._metal @metal.setter def metal(self, value : str) -> None: self._metal = value @property def gem(self) -> str: return self._gem @gem.setter def gem(self, value : str) -> None: self._gem = value
python
# Copyright (c) 2013 Shotgun Software Inc. # # CONFIDENTIAL AND PROPRIETARY # # This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit # Source Code License included in this distribution package. See LICENSE. # By accessing, using, copying or modifying this work you indicate your # agreement to the Shotgun Pipeline Toolkit Source Code License. All rights # not expressly granted therein are reserved by Shotgun Software Inc. """ This module contains all the implementations for the different folder types that can be created. """ from .errors import EntityLinkTypeMismatch from .static import Static from .listfield import ListField from .entity import Entity from .project import Project from .user import UserWorkspace from .step import ShotgunStep from .task import ShotgunTask
python
import functools from collections import OrderedDict, namedtuple from http import HTTPStatus from types import FunctionType from typing import Callable, Iterable, Optional from werkzeug.routing import Map, MethodNotAllowed, NotFound, RequestRedirect, Rule from PythonPlug import Conn from PythonPlug.plug import Plug Forward = namedtuple("Forward", ["to", "change_path"]) class RouterPlug(Plug): def __init__(self): super().__init__() self.url_map = Map() self.endpoint_to_plug = {} self.forwards = OrderedDict() def route(self, rule, methods=None, name=""): methods = set(methods) if methods is not None else None if methods and not "OPTIONS" in methods: methods.add("OPTIONS") def decorator(name: Optional[str], plug: Callable): self.add_route(rule_string=rule, plug=plug, methods=methods, name=name) return plug return functools.partial(decorator, name) async def call(self, conn: Conn): try: rule, args = self.url_adapter(conn).match( return_rule=True, method=conn.scope.get("method") ) except RequestRedirect as e: return await conn.redirect(e.new_url, code=302) except MethodNotAllowed as e: return await conn.send_resp(b"", HTTPStatus.METHOD_NOT_ALLOWED, halt=True) except NotFound as e: def prefix_matcher(prefix): return conn.private["remaining_path"].startswith(prefix) forward_matches = sorted(filter(prefix_matcher, self.forwards), key=len) if forward_matches: match = forward_matches[0] router, change_path = self.forwards[match] conn.private.setdefault("consumed_path", []).append(match) conn.private["remaining_path"] = conn.private["remaining_path"][ len(match) : ] if change_path: conn._scope["path"] = conn.private["remaining_path"] return await router(conn) return conn else: plug = self.endpoint_to_plug.get(rule.endpoint) conn.private.setdefault("router_args", {}).update(args) return await plug(conn) def url_adapter(self, conn: Conn): scope = conn.scope remaining_path = conn.private.get("remaining_path") if remaining_path is None: remaining_path = conn.private["remaining_path"] = scope.get("path") return self.url_map.bind( conn.req_headers.get("host"), path_info=remaining_path, script_name=scope.get("root_path", "") or None, url_scheme=scope.get("scheme"), query_args=scope.get("query_string", b""), ) def add_route( self, *, rule_string: str, plug: Callable, name: Optional[str] = None, methods: Optional[Iterable[str]] = None, ): if not name: if isinstance(plug, FunctionType): name = plug.__name__ if isinstance(plug, Plug): name = type(plug).__name__ assert name not in self.endpoint_to_plug, ( "a plug is overwriting an existing plug: %s" % name ) self.url_map.add(Rule(rule_string, endpoint=name, methods=methods)) self.endpoint_to_plug[name] = plug def forward(self, prefix, router=None, change_path=False): assert prefix not in self.forwards, ( "Cannot forward same prefix to different routers: %s" % prefix ) self.forwards[prefix] = Forward(router, change_path) return router
python
import unittest from test_support import run_unittest, TESTFN import glob import os def mkdirs(fname): if os.path.exists(fname) or fname == '': return base, file = os.path.split(fname) mkdirs(base) os.mkdir(fname) def touchfile(fname): base, file = os.path.split(fname) mkdirs(base) f = open(fname, 'w') f.close() def deltree(fname): for f in os.listdir(fname): fullname = os.path.join(fname, f) if os.path.isdir(fullname): deltree(fullname) else: try: os.unlink(fullname) except: pass try: os.rmdir(fname) except: pass class GlobTests(unittest.TestCase): def norm(self, *parts): return os.path.normpath(os.path.join(self.tempdir, *parts)) def mktemp(self, *parts): touchfile(self.norm(*parts)) def setUp(self): self.tempdir = TESTFN+"_dir" self.mktemp('a', 'D') self.mktemp('aab', 'F') self.mktemp('aaa', 'zzzF') self.mktemp('ZZZ') self.mktemp('a', 'bcd', 'EF') self.mktemp('a', 'bcd', 'efg', 'ha') def tearDown(self): deltree(self.tempdir) def glob(self, *parts): if len(parts) == 1: pattern = parts[0] else: pattern = os.path.join(*parts) p = os.path.join(self.tempdir, pattern) return glob.glob(p) def assertSequencesEqual_noorder(self, l1, l2): l1 = list(l1) l2 = list(l2) l1.sort() l2.sort() self.assertEqual(l1, l2) def test_glob_literal(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('a'), [self.norm('a')]) eq(self.glob('a', 'D'), [self.norm('a', 'D')]) eq(self.glob('aab'), [self.norm('aab')]) eq(self.glob('zymurgy'), []) def test_glob_one_directory(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('a*'), map(self.norm, ['a', 'aab', 'aaa'])) eq(self.glob('*a'), map(self.norm, ['a', 'aaa'])) eq(self.glob('aa?'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('aa[ab]'), map(self.norm, ['aaa', 'aab'])) eq(self.glob('*q'), []) def test_glob_nested_directory(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) if os.path.normcase("abCD") == "abCD": # case-sensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF')]) else: # case insensitive filesystem eq(self.glob('a', 'bcd', 'E*'), [self.norm('a', 'bcd', 'EF'), self.norm('a', 'bcd', 'efg')]) eq(self.glob('a', 'bcd', '*g'), [self.norm('a', 'bcd', 'efg')]) def test_glob_directory_names(self): eq = self.assertSequencesEqual_noorder np = lambda *f: norm(self.tempdir, *f) eq(self.glob('*', 'D'), [self.norm('a', 'D')]) eq(self.glob('*', '*a'), []) eq(self.glob('a', '*', '*', '*a'), [self.norm('a', 'bcd', 'efg', 'ha')]) eq(self.glob('?a?', '*F'), map(self.norm, [os.path.join('aaa', 'zzzF'), os.path.join('aab', 'F')])) def test_main(): run_unittest(GlobTests) if __name__ == "__main__": test_main()
python
from selenium import webdriver import os import subprocess driver = webdriver.Chrome(service_log_path=os.path.devnull) driver.set_window_size(1500, 900) fname = "file://" + os.getcwd() + "/opcodes.html" driver.get(fname) driver.save_screenshot("../images/opcode_map.png") driver.quit() subprocess.check_output([ "convert", "../images/opcode_map.png", "-trim", "../images/opcode_map.png"])
python
# 增加的一个类属性用于统计Student的数量,每创建一个实例,该属性自动加一 class Student(object): count = 0 def __init__(self, name, score): Student.name = name Student.score = score if Student.name != []: Student.count += 1 # 测试: if Student.count != 0: print('测试失败!') else: bart = Student('Bart', 90) if Student.count != 1: print('测试失败!') else: lisa = Student('Bart',80) if Student.count != 2: print('测试失败!') else: print('Students:', Student.count) print('测试通过!') Michael = Student('Michael', 90) Jane = Student('Jane', 90) KangKang = Student('KangKang', 90) # 说明每当创建一个实例并不会执行初始化语句 count = 0,仅在首次执行,但__init__()函数 # 每创建一个实例都会执行 print(Student.count)
python
#User function Template for python3 class Solution: #Function to find if there exists a triplet in the #array A[] which sums up to X. def find3Numbers(self,A, n, X): # Your Code Here A.sort() for i in range(n-2): start=i+1 end=n-1 sum1=0 while(sum1!=X and end>start): sum1=A[i]+A[start]+A[end] if(sum1>X): end-=1 elif(sum1<X): start+=1 else: return 1 return 0 #{ # Driver Code Starts #Initial Template for Python 3 import atexit import io import sys _INPUT_LINES = sys.stdin.read().splitlines() input = iter(_INPUT_LINES).__next__ _OUTPUT_BUFFER = io.StringIO() sys.stdout = _OUTPUT_BUFFER @atexit.register def write(): sys.__stdout__.write(_OUTPUT_BUFFER.getvalue()) if __name__=='__main__': t = int(input()) for i in range(t): n,X=map(int,input().strip().split()) A=list(map(int,input().strip().split())) ob=Solution() if(ob.find3Numbers(A,n,X)): print(1) else: print(0) # } Driver Code Ends
python
# Created: 17.05.2019 # Copyright (c) 2019, Manfred Moitzi # License: MIT License from typing import TYPE_CHECKING, Iterable, List, Mapping, Set import json from ezdxf.sections.tables import TABLENAMES from ezdxf.lldxf.tags import Tags if TYPE_CHECKING: from ezdxf.eztypes import Insert, MText, LWPolyline, Polyline, Spline, Leader, Dimension, Image, Mesh, Hatch from ezdxf.eztypes import DXFEntity, Linetype, DXFTag, BlockLayout __all__ = ['entities_to_code', 'block_to_code', 'table_entries_to_code'] def entities_to_code(entities: Iterable['DXFEntity'], layout: str = 'layout', ignore: Iterable[str] = None) -> 'Code': """ Translates DXF entities into Python source code to recreate this entities by ezdxf. Args: entities: iterable of DXFEntity layout: variable name of the layout (model space or block) as string ignore: iterable of entities types to ignore as strings like ``['IMAGE', 'DIMENSION']`` Returns: :class:`Code` """ code = _SourceCodeGenerator(layout=layout) code.translate_entities(entities, ignore=ignore) return code.code def block_to_code(block: 'BlockLayout', drawing: str = 'doc', ignore: Iterable[str] = None) -> 'Code': """ Translates a BLOCK into Python source code to recreate the BLOCK by ezdxf. Args: block: block definition layout drawing: variable name of the drawing as string ignore: iterable of entities types to ignore as strings like ['IMAGE', 'DIMENSION'] Returns: :class:`Code` """ dxfattribs = _purge_handles(block.block.dxfattribs()) block_name = dxfattribs.pop('name') base_point = dxfattribs.pop('base_point') code = _SourceCodeGenerator(layout='b') prolog = 'b = {}.blocks.new("{}", base_point={}, dxfattribs={{'.format(drawing, block_name, str(base_point)) code.add_source_code_line(prolog) code.add_source_code_lines(_fmt_mapping(dxfattribs, indent=4)) code.add_source_code_line(' }') code.add_source_code_line(')') code.translate_entities(block, ignore=ignore) return code.code def table_entries_to_code(entities: Iterable['DXFEntity'], drawing='doc') -> 'Code': code = _SourceCodeGenerator(doc=drawing) code.translate_entities(entities) return code.code class Code: """ Source code container. """ def __init__(self): self.code = [] # type: List[str] self.imports = set() # type: Set[str] # global imports -> indention level 0 self.layers = set() # type: Set[str] # layer names as string self.styles = set() # type: Set[str] # text style name as string, requires a TABLE entry self.linetypes = set() # type: Set[str] # line type names as string, requires a TABLE entry self.dimstyles = set() # type: Set[str] # dimension style names as string, requires a TABLE entry self.blocks = set() # type: Set[str] # block names as string, requires a BLOCK definition def code_str(self, indent: int = 0) -> str: """ Returns the source code as a single string. Args: indent: source code indentation count by spaces """ lead_str = ' ' * indent return '\n'.join(lead_str + line for line in self.code) def __str__(self) -> str: """ Returns the source code as a single string. """ return self.code_str() def import_str(self, indent: int = 0) -> str: """ Returns required imports as a single string. Args: indent: source code indentation count by spaces """ lead_str = ' ' * indent return '\n'.join(lead_str + line for line in self.imports) def add_import(self, statement: str) -> None: """ Add import statement, identical import statements are merged together. """ self.imports.add(statement) def add_line(self, code: str, indent: int = 0) -> None: """ Add a single source code line without line ending ``\\n``. """ self.code.append(' ' * indent + code) def add_lines(self, code: Iterable[str], indent: int = 0) -> None: """ Add multiple source code lines without line ending ``\\n``. """ for line in code: self.add_line(line, indent=indent) def merge(self, code: 'Code', indent: int = 0) -> None: """ Add another :class:`Code` object. """ # merge used resources self.imports.update(code.imports) self.layers.update(code.layers) self.linetypes.update(code.linetypes) self.styles.update(code.styles) self.dimstyles.update(code.dimstyles) self.blocks.update(code.blocks) # append source code lines self.add_lines(self.code, indent=indent) _PURGE_DXF_ATTRIBUTES = {'handle', 'owner', 'paperspace', 'material_handle', 'visualstyle_handle', 'plotstyle_handle'} def _purge_handles(attribs: dict) -> dict: """ Purge handles from DXF attributes which will be invalid in a new document, or which will be set automatically by adding an entity to a layout (paperspace). Args: attribs: entity DXF attributes dictionary """ return {k: v for k, v in attribs.items() if k not in _PURGE_DXF_ATTRIBUTES} def _fmt_mapping(mapping: Mapping, indent: int = 0) -> Iterable[str]: # key is always a string fmt = ' ' * indent + "'{}': {}," for k, v in mapping.items(): assert isinstance(k, str) if isinstance(v, str): v = json.dumps(v) # for correct escaping of quotes else: v = str(v) # format uses repr() for Vectors yield fmt.format(k, v) def _fmt_list(l: Iterable, indent: int = 0) -> Iterable[str]: fmt = ' ' * indent + '{},' for v in l: yield fmt.format(str(v)) def _fmt_api_call(func_call: str, args: Iterable[str], dxfattribs: dict) -> List[str]: attributes = dict(dxfattribs) args = list(args) if args else [] def fmt_keywords() -> Iterable[str]: for arg in args: if arg not in attributes: continue value = attributes.pop(arg) if isinstance(value, str): valuestr = json.dumps(value) # quoted string! else: valuestr = str(value) yield " {}={},".format(arg, valuestr) s = [func_call] s.extend(fmt_keywords()) s.append(' dxfattribs={') s.extend(_fmt_mapping(attributes, indent=8)) s.extend([ " },", ")", ]) return s def _fmt_dxf_tags(tags: Iterable['DXFTag'], indent: int = 0): fmt = ' ' * indent + 'dxftag({}, {}),' for code, value in tags: assert isinstance(code, int) if isinstance(value, str): value = json.dumps(value) # for correct escaping of quotes else: value = str(value) # format uses repr() for Vectors yield fmt.format(code, value) class _SourceCodeGenerator: """ The :class:`_SourceCodeGenerator` translates DXF entities into Python source code for creating the same DXF entity in another model space or block definition. :ivar code: list of source code lines without line endings :ivar required_imports: list of import source code lines, which are required to create executable Python code. """ def __init__(self, layout: str = 'layout', doc: str = 'doc'): self.doc = doc self.layout = layout self.code = Code() def translate_entity(self, entity: 'DXFEntity') -> None: """ Translates one DXF entity into Python source code. The generated source code is appended to the attribute `source_code`. Args: entity: DXFEntity object """ dxftype = entity.dxftype() try: entity_translator = getattr(self, '_' + dxftype.lower()) except AttributeError: self.add_source_code_line('# unsupported DXF entity "{}"'.format(dxftype)) else: entity_translator(entity) def translate_entities(self, entities: Iterable['DXFEntity'], ignore: Iterable[str] = None) -> None: """ Translates multiple DXF entities into Python source code. The generated source code is appended to the attribute `source_code`. Args: entities: iterable of DXFEntity ignore: iterable of entities types to ignore as strings like ['IMAGE', 'DIMENSION'] """ ignore = set(ignore) if ignore else set() for entity in entities: if entity.dxftype() not in ignore: self.translate_entity(entity) def add_used_resources(self, dxfattribs: Mapping) -> None: """ Register used resources like layers, line types, text styles and dimension styles. Args: dxfattribs: DXF attributes dictionary """ if 'layer' in dxfattribs: self.code.layers.add(dxfattribs['layer']) if 'linetype' in dxfattribs: self.code.linetypes.add(dxfattribs['linetype']) if 'style' in dxfattribs: self.code.styles.add(dxfattribs['style']) if 'dimstyle' in dxfattribs: self.code.dimstyles.add(dxfattribs['dimstyle']) def add_import_statement(self, statement: str) -> None: self.code.add_import(statement) def add_source_code_line(self, code: str) -> None: self.code.add_line(code) def add_source_code_lines(self, code: Iterable[str]) -> None: self.code.add_lines(code) def add_list_source_code(self, values: Iterable, prolog: str = '[', epilog: str = ']', indent: int = 0) -> None: fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_list(values, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def add_dict_source_code(self, mapping: Mapping, prolog: str = '{', epilog: str = '}', indent: int = 0) -> None: fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_mapping(mapping, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def add_tags_source_code(self, tags: Tags, prolog='tags = Tags(', epilog=')', indent=4): fmt_str = ' ' * indent + '{}' self.add_source_code_line(fmt_str.format(prolog)) self.add_source_code_lines(_fmt_dxf_tags(tags, indent=4 + indent)) self.add_source_code_line(fmt_str.format(epilog)) def generic_api_call(self, dxftype: str, dxfattribs: dict, prefix: str = 'e = ') -> Iterable[str]: """ Returns the source code strings to create a DXF entity by a generic `new_entity()` call. Args: dxftype: DXF entity type as string, like 'LINE' dxfattribs: DXF attributes dictionary prefix: prefix string like variable assignment 'e = ' """ dxfattribs = _purge_handles(dxfattribs) self.add_used_resources(dxfattribs) s = [ "{}{}.new_entity(".format(prefix, self.layout), " '{}',".format(dxftype), " dxfattribs={", ] s.extend(_fmt_mapping(dxfattribs, indent=8)) s.extend([ " },", ")", ]) return s def api_call(self, api_call: str, args: Iterable[str], dxfattribs: dict, prefix: str = 'e = ') -> Iterable[str]: """ Returns the source code strings to create a DXF entity by the specialised API call. Args: api_call: API function call like 'add_line(' args: DXF attributes to pass as arguments dxfattribs: DXF attributes dictionary prefix: prefix string like variable assignment 'e = ' """ dxfattribs = _purge_handles(dxfattribs) func_call = '{}{}.{}'.format(prefix, self.layout, api_call) return _fmt_api_call(func_call, args, dxfattribs) def new_table_entry(self, dxftype: str, dxfattribs: dict) -> Iterable[str]: """ Returns the source code strings to create a new table entity by ezdxf. Args: dxftype: table entry type as string, like 'LAYER' dxfattribs: DXF attributes dictionary """ table = '{}.{}'.format(self.doc, TABLENAMES[dxftype]) dxfattribs = _purge_handles(dxfattribs) name = dxfattribs.pop('name') s = [ "if '{}' not in {}:".format(name, table), " t = {}.new(".format(table), " '{}',".format(name), " dxfattribs={", ] s.extend(_fmt_mapping(dxfattribs, indent=12)) s.extend([ " },", " )", ]) return s # simple graphical types def _line(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_line(', ['start', 'end'], entity.dxfattribs())) def _point(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_point(', ['location'], entity.dxfattribs())) def _circle(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_circle(', ['center', 'radius'], entity.dxfattribs())) def _arc(self, entity: 'DXFEntity') -> None: self.add_source_code_lines( self.api_call('add_arc(', ['center', 'radius', 'start_angle', 'end_angle'], entity.dxfattribs())) def _text(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_text(', ['text'], entity.dxfattribs())) def _solid(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('SOLID', entity.dxfattribs())) def _trace(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('TRACE', entity.dxfattribs())) def _3dface(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('3DFACE', entity.dxfattribs())) def _shape(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_shape(', ['name', 'insert', 'size'], entity.dxfattribs())) def _attrib(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.api_call('add_attrib(', ['tag', 'text', 'insert'], entity.dxfattribs())) def _attdef(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('ATTDEF', entity.dxfattribs())) def _ellipse(self, entity: 'DXFEntity') -> None: self.add_source_code_lines( self.api_call('add_ellipse(', ['center', 'major_axis', 'ratio', 'start_param', 'end_param'], entity.dxfattribs())) def _viewport(self, entity: 'DXFEntity') -> None: self.add_source_code_lines(self.generic_api_call('VIEWPORT', entity.dxfattribs())) self.add_source_code_line('# Set valid handles or remove attributes ending with "_handle", otherwise the DXF ' 'file is invalid for AutoCAD') # complex graphical types def _insert(self, entity: 'Insert') -> None: self.code.blocks.add(entity.dxf.name) self.add_source_code_lines(self.api_call('add_blockref(', ['name', 'insert'], entity.dxfattribs())) if len(entity.attribs): for attrib in entity.attribs: dxfattribs = attrib.dxfattribs() dxfattribs['layer'] = entity.dxf.layer # set ATTRIB layer to same as INSERT self.add_source_code_lines(self.generic_api_call('ATTRIB', attrib.dxfattribs(), prefix='a = ')) self.add_source_code_lines('e.attribs.append(a)') def _mtext(self, entity: 'MText') -> None: self.add_source_code_lines(self.generic_api_call('MTEXT', entity.dxfattribs())) # mtext content 'text' is not a single DXF tag and therefore not a DXF attribute self.add_source_code_line('e.text = {}'.format(json.dumps(entity.text))) def _lwpolyline(self, entity: 'LWPolyline') -> None: self.add_source_code_lines(self.generic_api_call('LWPOLYLINE', entity.dxfattribs())) # lwpolyline points are not DXF attributes self.add_list_source_code(entity.get_points(), prolog='e.set_points([', epilog='])') def _spline(self, entity: 'Spline') -> None: self.add_source_code_lines(self.api_call('add_spline(', ['degree'], entity.dxfattribs())) # spline points, knots and weights are not DXF attributes if len(entity.fit_points): self.add_list_source_code(entity.fit_points, prolog='e.fit_points = [', epilog=']') if len(entity.control_points): self.add_list_source_code(entity.control_points, prolog='e.control_points = [', epilog=']') if len(entity.knots): self.add_list_source_code(entity.knots, prolog='e.knots = [', epilog=']') if len(entity.weights): self.add_list_source_code(entity.weights, prolog='e.weights = [', epilog=']') def _polyline(self, entity: 'Polyline') -> None: self.add_source_code_lines(self.generic_api_call('POLYLINE', entity.dxfattribs())) # polyline vertices are separate DXF entities and therefore not DXF attributes for v in entity.vertices: attribs = _purge_handles(v.dxfattribs()) location = attribs.pop('location') if 'layer' in attribs: del attribs['layer'] # layer is automatically set to the POLYLINE layer # each VERTEX can have different DXF attributes: bulge, start_width, end_width ... self.add_source_code_line('e.append_vertex({}, dxfattribs={})'.format( str(location), attribs, )) def _leader(self, entity: 'Leader'): self.add_source_code_line('# Dimension style attribute overriding is not supported!') self.add_source_code_lines(self.generic_api_call('LEADER', entity.dxfattribs())) self.add_list_source_code(entity.vertices, prolog='e.set_vertices([', epilog='])') def _dimension(self, entity: 'Dimension'): self.add_import_statement('from ezdxf.dimstyleoverride import DimStyleOverride') self.add_source_code_line('# Dimension style attribute overriding is not supported!') self.add_source_code_lines(self.generic_api_call('DIMENSION', entity.dxfattribs())) self.add_source_code_lines([ '# You have to create the required graphical representation for the DIMENSION entity as anonymous block, ', '# otherwise the DXF file is invalid for AutoCAD (but not for BricsCAD):', '# DimStyleOverride(e).render()', '' ]) def _image(self, entity: 'Image'): self.add_source_code_line('# Image requires IMAGEDEF and IMAGEDEFREACTOR objects in the OBJECTS section!') self.add_source_code_lines(self.generic_api_call('IMAGE', entity.dxfattribs())) if len(entity.boundary_path): self.add_list_source_code( (v[:2] for v in entity.boundary_path), # just x, y axis prolog='e.set_boundary_path([', epilog='])', ) self.add_source_code_line('# Set valid image_def_handle and image_def_reactor_handle, otherwise the DXF file' ' is invalid for AutoCAD') def _mesh(self, entity: 'Mesh'): self.add_source_code_lines(self.api_call('add_mesh(', [], entity.dxfattribs())) if len(entity.vertices): self.add_list_source_code(entity.vertices, prolog='e.vertices = [', epilog=']') if len(entity.edges): # array.array -> tuple self.add_list_source_code((tuple(e) for e in entity.edges), prolog='e.edges = [', epilog=']') if len(entity.faces): # array.array -> tuple self.add_list_source_code((tuple(f) for f in entity.faces), prolog='e.faces = [', epilog=']') if len(entity.creases): self.add_list_source_code(entity.creases, prolog='e.creases = [', epilog=']') def _hatch(self, entity: 'Hatch'): add_line = self.add_source_code_line dxfattribs = entity.dxfattribs() dxfattribs['associative'] = 0 # associative hatch not supported self.add_source_code_lines(self.api_call('add_hatch(', ['color'], dxfattribs)) if len(entity.seeds): add_line("e.set_seed_points({})".format(str(entity.seeds))) if entity.pattern: self.add_list_source_code(entity.pattern.lines, prolog='e.set_pattern_definition([', epilog='])') arg = " {}={}," if entity.has_gradient_data: g = entity.gradient add_line('e.set_gradient(') add_line(arg.format('color1', str(g.color1))) add_line(arg.format('color2', str(g.color2))) add_line(arg.format('rotation', g.rotation)) add_line(arg.format('centered', g.centered)) add_line(arg.format('one_color', g.one_color)) add_line(arg.format('name', json.dumps(g.name))) add_line(')') for count, path in enumerate(entity.paths, start=1): if path.PATH_TYPE == 'PolylinePath': add_line('# {}. polyline path'.format(count)) self.add_list_source_code(path.vertices, prolog='e.paths.add_polyline_path([', epilog=' ],') add_line(arg.format('is_closed', str(path.is_closed))) add_line(arg.format('flags', str(path.path_type_flags))) add_line(')') else: # EdgePath add_line('# {}. edge path: associative hatch not supported'.format(count)) add_line('ep = e.paths.add_edge_path(flags={})'.format(path.path_type_flags)) for edge in path.edges: if edge.EDGE_TYPE == 'LineEdge': add_line('ep.add_line({}, {})'.format(str(edge.start[:2]), str(edge.end[:2]))) elif edge.EDGE_TYPE == 'ArcEdge': add_line('ep.add_arc(') add_line(arg.format('center', str(edge.center[:2]))) add_line(arg.format('radius', edge.radius)) add_line(arg.format('start_angle', edge.start_angle)) add_line(arg.format('end_angle', edge.end_angle)) add_line(arg.format('is_counter_clockwise', edge.is_counter_clockwise)) add_line(')') elif edge.EDGE_TYPE == 'EllipseEdge': add_line('ep.add_ellipse(') add_line(arg.format('center', str(edge.center[:2]))) add_line(arg.format('major_axis', str(edge.major_axis[:2]))) add_line(arg.format('ratio', edge.ratio)) add_line(arg.format('start_angle', edge.start_angle)) add_line(arg.format('end_angle', edge.end_angle)) add_line(arg.format('is_counter_clockwise', edge.is_counter_clockwise)) add_line(')') elif edge.EDGE_TYPE == 'SplineEdge': add_line('ep.add_spline(') if edge.fit_points: add_line(arg.format('fit_points', str([fp[:2] for fp in edge.fit_points]))) if edge.control_points: add_line( arg.format('control_points', str([cp[:2] for cp in edge.control_points]))) if edge.knot_values: add_line(arg.format('knot_values', str(edge.knot_values))) if edge.weights: add_line(arg.format('weights', str(edge.weights))) add_line(arg.format('degree', edge.degree)) add_line(arg.format('rational', edge.rational)) add_line(arg.format('periodic', edge.periodic)) if edge.start_tangent is not None: add_line(arg.format('start_tangent', str(edge.start_tangent))) if edge.end_tangent is not None: add_line(arg.format('end_tangent', str(edge.end_tangent))) add_line(')') # simple table entries def _layer(self, layer: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('LAYER', layer.dxfattribs())) def _ltype(self, ltype: 'Linetype'): self.add_import_statement('from ezdxf.lldxf.tags import Tags') self.add_import_statement('from ezdxf.lldxf.types import dxftag') self.add_import_statement('from ezdxf.entities.ltype import LinetypePattern') self.add_source_code_lines(self.new_table_entry('LTYPE', ltype.dxfattribs())) self.add_tags_source_code(ltype.pattern_tags.tags, prolog='tags = Tags([', epilog='])', indent=4) self.add_source_code_line(' t.pattern_tags = LinetypePattern(tags)') def _style(self, style: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('STYLE', style.dxfattribs())) def _dimstyle(self, dimstyle: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('DIMSTYLE', dimstyle.dxfattribs())) def _appid(self, appid: 'DXFEntity'): self.add_source_code_lines(self.new_table_entry('APPID', appid.dxfattribs()))
python
from astropy.coordinates import Angle from neclib.parameters import PointingErrorData class TestPointingErrorData: expected = { "dAz": Angle("5314.2466754691195arcsec"), "de": Angle("382arcsec"), "chi_Az": Angle("-27.743114809726713arcsec"), "omega_Az": Angle("-10.004233550100272deg"), "eps": Angle("-13.562343977659976arcsec"), "chi2_Az": Angle("-3.2283345930067489arcsec"), "omega2_Az": Angle("-34.73486665318979deg"), "chi_El": Angle("-30.046387189617871arcsec"), "omega_El": Angle("-16.233694100299584deg"), "chi2_El": Angle("-1.1446000035021269arcsec"), "omega2_El": Angle("-41.474874481601418deg"), "g": -0.17220574801726421, "gg": 0.0, "ggg": 0.0, "gggg": 0.0, "dEl": Angle("6520.2376117807198arcsec"), "de_radio": Angle("-394.46arcsec"), "del_radio": Angle("210.7228arcsec"), "cor_v": Angle("27.434arcsec"), "cor_p": Angle("-31.6497deg"), "g_radio": -0.454659, "gg_radio": 0.0128757, "ggg_radio": 0.000000, "gggg_radio": 0.000000, } def test_from_file(self, data_dir): actual = PointingErrorData.from_file(data_dir / "example_pointing_param.toml") for name, value in self.expected.items(): assert getattr(actual, name) == value assert actual[name] == value def test_from_text_file(self, data_dir): actual = PointingErrorData.from_text_file( data_dir / "example_pointing_param.txt" ) for name, value in self.expected.items(): assert getattr(actual, name) == value assert actual[name] == value
python
import pyglet.resource import pyglet.sprite import pyglet.graphics def get_room_wall_image(room): filename = 'res/rooms/walls/{}.jpg'.format(room.wall_variant) return pyglet.resource.image(filename) def get_forniture_image(forniture): filename = 'res/forniture/{}.png'.format(forniture.name) return pyglet.resource.image(filename) class RoomRender: def __init__(self, room): self.room = room self.background_group = pyglet.graphics.OrderedGroup(0) self.foreground_group = pyglet.graphics.OrderedGroup(1) self.info_group = pyglet.graphics.OrderedGroup(2) self.batch = pyglet.graphics.Batch() wall_bg = get_room_wall_image(room) self.wall_sprite = pyglet.sprite.Sprite(wall_bg, x=0, y=0, batch=self.batch, group=self.background_group) forniture_placements = [] for e in room.elements: elem_img = get_forniture_image(e.forniture) fs = pyglet.sprite.Sprite(elem_img, x=e.x, y=e.y, batch=self.batch, group=self.foreground_group) forniture_placements.append(fs) self.forniture_placements = forniture_placements pyglet.text.Label(room.name, font_name='Times New Roman', font_size=16, x=100, y=350, batch=self.batch, group=self.info_group) def render(self): self.batch.draw()
python
# UNDER CONSTRUCTION ! light_metadata = { "name": { "type": "string" }, "version": { "type": "string" }, "data_preparation": { "type": "object", "properties": { "accepted_margin_of_error": { "type": "number" }, "total_row_count": { "type": "number" }, "used_row_count": { "type": "number" }, "test_row_count": { "type": "number" }, "train_row_count": { "type": "number" }, "validation_row_count": { "type": "number" } } }, "data_analysis": { "type": "object", "properties": { "target_columns_metadata": { "type": "array", "items": { "type": "object", "properties": { "column_name": { "type": "string" } } } } } } } scores = ['duplicates_score','empty_cells_score','data_type_distribution_score', 'similarity_score','z_test_based_outlier_score','value_distribution_score' ,'variability_score','redundancy_score','consistency_score','consistency_score','quality_score'] def gen_score(score_name): return [ score_name: { "type": "object", "properties": { "score": { "type": "number" }, "description": { "type": "string" } } } ] "data_analysis": { "target_columns_metadata": [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ], "input_columns_metadata": [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ] }, "model_analysis": [ { "column_name": "string", "overall_input_importance": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "train_accuracy_over_time": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "test_accuracy_over_time": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "accuracy_histogram": { "x": [ "string" ], "y": [ 0 ], "x_explained": [ [ { "column_name": "string", "importance_score": 0, "data_type": "categorical", "data_type_distribution": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "data_distribution": { "data_histogram": { "type": "categorical", "x": [ "string" ], "y": [ 0 ] }, "clusters": [ { "group": "string", "members": [ "string" ] } ], "mean": "string" }, "consistency": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "completeness": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" }, "variability": { "score": "string", "metrics": [ { "type": "error", "score": 0, "description": "string" } ], "description": "string" } } ] ] } } ] })
python
# -*- coding: utf-8 -*- """ Created on Thu Jun 20 22:56:12 2019 @author: Suman JaipurRentals Jaipur’s Real Estate Market is experiencing an incredible resurgence, with property prices soaring by double-digits on an yearly basis since 2013. While home owners have a lot of reasons to laugh about, the same cannot be said of people looking for a home to buy or rent. In Jaipur, property rental market is said to be as crazy as property purchasing market. You are new to Jaipur and want to rent a decent apartment. Since you have the knowledge of Machine Learning you decided to build a model, that could help you out to get a nice apartment at best price. Get Your data from various apartment rental sites and move towards the following observation points like: · How does the general rental prices distribution looks like? (Graphical representation is appreciated) · Which are the hottest areas? · Which area would be more interesting to start hunting? · Are you able to predict rental price of an apartment? """ import pandas as pd data = pd.read_csv('processed_data.csv') from collections import Counter top = Counter(data.location) data.index = range(data.shape[0]) property_type = data.PropertyType.unique() loc_price = {} for i in range(len(data)): if loc_price.get(data.iloc[i].location): loc_price[ data.iloc[i].location] += data.iloc[i].price else: loc_price[data.iloc[i].location] = data.iloc[i].price avg_price = {} for items in loc_price.keys(): avg_price[items] = loc_price.get(items)/top[items] location = loc_price.keys() #import matplotlib.pyplot as plt # #plt.figure(figsize=(30,10)) #plt.bar(height = avg_price.values(), x=avg_price.keys()) #plt.margins(x=0) #plt.xticks(fontsize = 10,fontname = "Comic Sans MS", rotation = 90) #plt.xlabel('Locations') #plt.ylabel('Average Price') #plt.savefig('chart.svg',format='svg',dpi=1500,bbox_inches = 'tight') #plt.show() #· Which are the hottest areas? import operator a = dict(sorted(avg_price.items(), key=operator.itemgetter(1), reverse=True)[:10]) #print('Top 10 Locations\n') #for item in a.keys(): # print(item.title()) # Which area would be more interesting to start hunting? hunt = pd.DataFrame() for loc,num in top.most_common(10): temp = [] for i in range(1,11): try: temp.append(str(str(i)+' BHK Average rate: '+str(int(data['price'][(data.location==loc) & (data.BHK==i)].mean())))) except: temp.append(str(str(i)+' BHK Not Available')) hunt[loc] = temp # #hunt3 = pd.DataFrame() #labels = [] #for loc,num in top.most_common(10): # top3price = [] # for i in range(1,4): # top3price.append(int(data['price'][(data.location==loc) & (data.BHK==i)].mean())) # hunt3[loc] = top3price # labels.append(loc) # # #newhunt3 = pd.DataFrame({'one':hunt3.iloc[0:1].values[0],'two':hunt3.iloc[1:2].values[0],'three':hunt3.iloc[2:3].values[0]}) # #import matplotlib.pyplot as plt # #x = [1,2,3,4,5,6,7,8,9,10] #y = newhunt3.one.values #plt.plot(x, y, label='1 BHK',marker='o') #y = newhunt3.two.values #plt.plot(x, y, label='2 BHK',marker='o') #y = newhunt3.three.values #plt.plot(x, y, label='3 BHK',marker='o') # #plt.xticks(x, labels, rotation='vertical') #plt.xlabel('Locations') #plt.ylabel('Price') #plt.margins(0.1) #plt.subplots_adjust(bottom=0.15) #plt.legend() #plt.savefig('top10loc1.svg',dpi=1500,bbox_inches = 'tight') #plt.show() import pickle with open('model.pkl','rb') as f1: model = pickle.load(f1)
python
import data as tours_data def data_html(): ret = "<h1>Все туры:</h1>"+"\n" for i in tours_data.tours.keys(): ret = ret + "<p>"+\ tours_data.tours[i]["country"]+\ ': <a href="/data/tours/'+str(i)+'/">'+\ tours_data.tours[i]["title"]+\ "</a></p>" return ret
python
# The MIT License (MIT) # # Copyright (c) 2019 Melissa LeBlanc-Williams for Adafruit Industries # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. """ `adafruit_rgb_display.hx8357` ==================================================== A simple driver for the HX8357-based displays. * Author(s): Melissa LeBlanc-Williams """ from micropython import const from adafruit_rgb_display.rgb import DisplaySPI __version__ = "0.0.0-auto.0" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_RGB_Display.git" _SWRESET = const(0x01) _SLPOUT = const(0x11) _NORON = const(0x13) _INVOFF = const(0x20) _INVON = const(0x21) _DISPOFF = const(0x28) _DISPON = const(0x29) _CASET = const(0x2a) _PASET = const(0x2b) _RAMWR = const(0x2c) _RAMRD = const(0x2e) _TEON = const(0x35) _MADCTL = const(0x36) _COLMOD = const(0x3a) _TEARLINE = const(0x44) _SETOSC = const(0xb0) _SETPWR1 = const(0xb1) _SETRGB = const(0xb3) _SETCYC = const(0xb4) _SETCOM = const(0xb6) _SETC = const(0xb9) _SETSTBA = const(0xc0) _SETPANEL = const(0xcc) _SETGAMMA = const(0xe0) class HX8357(DisplaySPI): """ A simple driver for the HX8357-based displays. >>> import busio >>> import digitalio >>> import board >>> from adafruit_rgb_display import color565 >>> import adafruit_rgb_display.hx8357 as hx8357 >>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO) >>> display = hx8357.HX8357(spi, cs=digitalio.DigitalInOut(board.GPIO0), ... dc=digitalio.DigitalInOut(board.GPIO15)) >>> display.fill(0x7521) >>> display.pixel(64, 64, 0) """ _COLUMN_SET = _CASET _PAGE_SET = _PASET _RAM_WRITE = _RAMWR _RAM_READ = _RAMRD _INIT = ( (_SWRESET, None), (_SETC, b'\xFF\x83\x57'), (_SETRGB, b'\x80\x00\x06\x06'), # 0x80 enables SDO pin (0x00 disables) (_SETCOM, b'\x25'), # -1.52V (_SETOSC, b'\x68'), # Normal mode 70Hz, Idle mode 55 Hz (_SETPANEL, b'\x05'), # BGR, Gate direction swapped (_SETPWR1, b'\x00\x15\x1C\x1C\x83\xAA'), # Not deep standby BT VSPR VSNR AP (_SETSTBA, b'\x50\x50\x01\x3C\x1E\x08'), # OPON normal OPON idle STBA GEN (_SETCYC, b'\x02\x40\x00\x2A\x2A\x0D\x78'), # NW 0x02 RTN DIV DUM DUM GDON GDOFF (_SETGAMMA, b'\x02\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x02' \ b'\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x00\x01'), (_COLMOD, b'\x55'), # 16 bit (_MADCTL, b'\xc0'), (_TEON, b'\x00'), (_TEARLINE, b'\x00\x02'), # TW off (_SLPOUT, None), (_MADCTL, b'\xa0'), (_DISPON, None), ) _ENCODE_PIXEL = ">H" _ENCODE_POS = ">HH" #pylint: disable-msg=useless-super-delegation, too-many-arguments def __init__(self, spi, dc, cs, rst=None, width=480, height=320, baudrate=16000000, polarity=0, phase=0, rotation=0): super().__init__(spi, dc, cs, rst, width, height, baudrate=baudrate, polarity=polarity, phase=phase, rotation=rotation)
python
__author__ = 'yinjun' #@see http://www.jiuzhang.com/solutions/longest-common-subsequence/ class Solution: """ @param A, B: Two strings. @return: The length of longest common subsequence of A and B. """ def longestCommonSubsequence(self, A, B): # write your code here x = len(A) y = len(B) dp = [[0 for j in range(y+1)] for i in range(x+1)] for i in range(1, x+1): for j in range(1, y+1): if A[i-1] == B[j-1]: dp[i][j] = dp[i-1][j-1] + 1 else: dp[i][j] = max(dp[i-1][j], dp[i][j-1]) return dp[x][y]
python
import sys from typing import Any, Collection, Dict, List, Optional, Union from pydantic import BaseModel from rest_api.config import DEFAULT_TOP_K_READER, DEFAULT_TOP_K_RETRIEVER MAX_RECURSION_DEPTH = sys.getrecursionlimit() - 1 class Question(BaseModel): questions: List[str] filters: Optional[Dict[str, Optional[Union[str, List[str]]]]] = None top_k_reader: int = DEFAULT_TOP_K_READER top_k_retriever: int = DEFAULT_TOP_K_RETRIEVER @classmethod def from_elastic_query_dsl(cls, query_request: Dict[str, Any], top_k_reader: int = DEFAULT_TOP_K_READER): # Refer Query DSL # https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html # Currently do not support query matching with field parameter query_strings: List[str] = [] filters: Dict[str, str] = {} top_k_retriever: int = DEFAULT_TOP_K_RETRIEVER if "size" not in query_request else query_request["size"] cls._iterate_dsl_request(query_request, query_strings, filters) if len(query_strings) != 1: raise SyntaxError('Only one valid `query` field required expected, ' 'refer https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-match-query.html') return cls(questions=query_strings, filters=filters if len(filters) else None, top_k_retriever=top_k_retriever, top_k_reader=top_k_reader) @classmethod def _iterate_dsl_request(cls, query_dsl: Any, query_strings: List[str], filters: Dict[str, str], depth: int = 0): if depth == MAX_RECURSION_DEPTH: raise RecursionError('Parsing incoming DSL reaching current value of the recursion limit') # For question: Only consider values of "query" key for "match" and "multi_match" request. # For filter: Only consider Dict[str, str] value of "term" or "terms" key if isinstance(query_dsl, List): for item in query_dsl: cls._iterate_dsl_request(item, query_strings, filters, depth + 1) elif isinstance(query_dsl, Dict): for key, value in query_dsl.items(): # "query" value should be "str" type if key == 'query' and isinstance(value, str): query_strings.append(value) elif key in ["filter", "filters"]: cls._iterate_filters(value, filters, depth + 1) elif isinstance(value, Collection): cls._iterate_dsl_request(value, query_strings, filters, depth + 1) @classmethod def _iterate_filters(cls, filter_dsl: Any, filters: Dict[str, str], depth: int = 0): if depth == MAX_RECURSION_DEPTH: raise RecursionError('Parsing incoming DSL reaching current value of the recursion limit') if isinstance(filter_dsl, List): for item in filter_dsl: cls._iterate_filters(item, filters, depth + 1) elif isinstance(filter_dsl, Dict): for key, value in filter_dsl.items(): if key in ["term", "terms"]: if isinstance(value, Dict): for filter_key, filter_value in value.items(): # Currently only accepting Dict[str, str] if isinstance(filter_value, str): filters[filter_key] = filter_value elif isinstance(value, Collection): cls._iterate_filters(value, filters, depth + 1)
python
from hyperadmin.links import LinkPrototype class FormStepLinkPrototype(LinkPrototype): def get_link_kwargs(self, **kwargs): link_kwargs = {'on_submit':self.handle_submission, 'method':'POST', 'url':self.get_url(), 'form_class': self.get_form_class(), 'prompt':'step', 'rel':'step',} link_kwargs.update(kwargs) return super(FormStepLinkPrototype, self).get_link_kwargs(**link_kwargs) def handle_submission(self, link, submit_kwargs): """ Called when the link is submitted. Returns a link representing the response. :rtype: Link """ form = link.get_form(**submit_kwargs) if form.is_valid(): self.endpoint.form_valid(form) return self.on_success() self.endpoint.form_invalid(form) return link.clone(form=form) def get_next_step_kwargs(self): return { 'skip_steps': self.endpoint.get_skip_steps(), 'desired_step': self.endpoint.get_desired_step(), } def on_success(self, item=None): params = self.get_next_step_kwargs() return self.endpoint.wizard.next_step(**params) class ControlStepLinkPrototype(LinkPrototype): def get_link_kwargs(self, **kwargs): link_kwargs = {'on_submit':self.handle_submission, 'method':'POST', 'url':self.get_url(), 'form_class': self.get_form_class(), 'prompt':'step', 'rel':'step',} link_kwargs.update(kwargs) return super(ControlStepLinkPrototype, self).get_link_kwargs(**link_kwargs) def handle_submission(self, link, submit_kwargs): """ Called when the link is submitted. Returns a link representing the response. :rtype: Link """ form = link.get_form(**submit_kwargs) if form.is_valid(): return self.on_success(form) return link.clone(form=form) def get_next_step_kwargs(self, form): return { 'skip_steps': form.cleaned_data.get('skip_steps', []), 'desired_step': form.cleaned_data.get('desired_step', None), } def on_success(self, form): params = self.get_next_step_kwargs(form) return self.endpoint.wizard.next_step(**params)
python
from tests import PMGLiveServerTestCase from mock import patch import unittest from pmg.models import db, User from tests.fixtures import dbfixture, UserData, RoleData, OrganisationData class TestAdminUsersPage(PMGLiveServerTestCase): def setUp(self): super(TestAdminUsersPage, self).setUp() self.fx = dbfixture.data(UserData, RoleData, OrganisationData) self.fx.setup() self.user = self.fx.UserData.admin self.create_user_data = { "email": "[email protected]", "name": "Test user", "active": "y", "roles": self.fx.RoleData.admin.id, "organisation": self.fx.OrganisationData.pmg.id, "expiry": "2065-02-06", } def tearDown(self): self.delete_created_objects() self.fx.teardown() super(TestAdminUsersPage, self).tearDown() def test_admin_users_page(self): """ Test admin users page (/admin/user/) """ self.make_request("/admin/user/", self.user, follow_redirects=True) self.assertIn("Users", self.html) self.assertIn(self.fx.UserData.admin.email, self.html) self.assertIn(self.fx.UserData.editor.email, self.html) self.assertIn(self.fx.UserData.inactive.email, self.html) def test_admin_user_new_page(self): """ Test admin get new user page (/admin/user/new) """ url = "/admin/user/new" self.make_request( url, self.user, follow_redirects=True, ) self.assertIn("Email", self.html) self.assertIn("Email address confirmed at", self.html) self.assertIn("Subscribe Daily Schedule", self.html) def test_post_admin_users_new_page(self): """ Test admin new users page (/admin/user/new) """ before_count = len(User.query.all()) url = "/admin/user/new/?url=%2Fadmin%2Fuser%2F" response = self.make_request( url, self.user, data=self.create_user_data, method="POST", follow_redirects=True, ) self.assertEqual(200, response.status_code) after_count = len(User.query.all()) self.assertLess(before_count, after_count) created_user = User.query.filter( User.email == self.create_user_data["email"] ).scalar() self.assertTrue(created_user) self.created_objects.append(created_user)
python
from model import db, Product, Accounts def deleteUser(rowid) user = db.session.query(User).filter(User.id == user_id).first() if user: db.session.query(User).filter(User.id==user_id).delete() db.session().commit() def deleteProduct(rowid): user = db.session.query(User).filter(User.id == user_id).first() if user: db.session.query(User).filter(User.id==user_id).delete() db.session().commit()
python
"""Hata Yönetimi - Raise Deyimi.""" # Python dili kırmızı yazılar ile kendine has hata mesajları yayınlamaktadır # Bizde bir hata meydana geldiğinde bu şekilde mesajlar yayınlayabiliriz. # Bunun için Raise deyimi kullanılır. sayi = 5 try: if sayi == 5: raise Exception('Sayı 5\'e eşit olamaz!') else: print(sayi) except Exception as e: print('ERROR! =>', e)
python
# -*- coding: utf-8 -*- import copy __author__ = "Grant Hulegaard" __copyright__ = "Copyright (C) Nginx, Inc. All rights reserved." __license__ = "" __maintainer__ = "Grant Hulegaard" __email__ = "[email protected]" def collect_cache_size(collector, data, stamp): collector.object.statsd.gauge('plus.cache.size', data['size'], stamp=stamp) if 'max_size' in data: collector.object.statsd.gauge('plus.cache.max_size', data['max_size'], stamp=stamp) def collect_cache_metrics(collector, data, stamp): types = [ 'bypass', 'expired', 'hit', 'miss', 'revalidated', 'stale', 'updating' ] for label in types: data_bucket = data[label] metric_base = 'plus.cache.%s' % label filtered_names = filter( lambda name: name not in ('responses_written', 'bytes_written'), data_bucket.keys() ) counted_vars = {} for name in filtered_names: metric_name = metric_base + '.%s' % name counted_vars[metric_name] = data_bucket[name] # metric base is used to store total responses counted_vars.update({ metric_base: data_bucket['responses'] }) collector.aggregate_counters(copy.deepcopy(counted_vars), stamp=stamp) CACHE_COLLECT_INDEX = [ collect_cache_size, collect_cache_metrics, ]
python
from abc import ABCMeta import numpy as np from falconcv.decor import typeassert import logging logger = logging.getLogger(__name__) class ApiModel(metaclass=ABCMeta): def train(self, *args, **kwargs): return self def freeze(self, *args, **kwargs): return self def eval(self, *args, **kwargs): return self @typeassert(input_image=[str, np.ndarray], size=tuple, threshold=float, top_k=int) def __call__(self, input_image, size=None, threshold=0.5, top_k=10): pass
python
import sqlite3 CREATE_QUERY = """ CREATE TABLE IF NOT EXISTS chat_table ( chat_id INT PRIMARY KEY ) """ SELECT_ALL_QUERY = """ SELECT chat_id from chat_table """ INSERT_ONE_QUERY = """ INSERT INTO chat_table (chat_id) VALUES (%s) """ def execute_query(query): try: sqlite_connection = sqlite3.connect('sqlite.db') cursor = sqlite_connection.cursor() cursor.execute(query) result = cursor.fetchall() sqlite_connection.commit() cursor.close() return result except sqlite3.Error as error: print("Error while connecting:", error) finally: if (sqlite_connection): sqlite_connection.close() def add_id(chat_id): # print("q - ", INSERT_ONE_QUERY % chat_id) a = execute_query(INSERT_ONE_QUERY % chat_id) # print("res of insert - ", a) def get_all_ids(): return execute_query(SELECT_ALL_QUERY) execute_query(CREATE_QUERY)
python
import pandas from openpyxl import load_workbook from openpyxl.utils.dataframe import dataframe_to_rows wb = load_workbook('data/regions.xlsx') ws = wb.active df = pandas.read_excel('data/all_shifts.xlsx') df1 = df[['Sales Rep', 'Cost per', 'Units Sold']] df1['Total'] = df1['Cost per'] * df1['Units Sold'] rows = dataframe_to_rows(df1, index=False) for row_index, row in enumerate(rows, 1): for column_index, col in enumerate(row, 6): ws.cell(row=row_index, column=column_index, value=col) wb.save('output/combinded.xlsx')
python
from __future__ import unicode_literals, print_function, division import os import matplotlib.pyplot as plt from compute_scores import compute_scores # Draw figures in Figure 2. for rel in ['mirgene', 'ppi', 'ploc']: # Heuristic of trigger words. scores = compute_scores(rel, 'h2') precisions = [s[2] for s in scores] recalls = [s[3] for s in scores] fscores = [s[4] for s in scores] prec_color, recall_color, fscore_color = 'black', 'black', 'black' x_axis = [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, ] fig = plt.figure() ax1 = fig.add_subplot(111) ax1.set_xlabel('# of trigger stems', fontsize=24) ax1.plot(x_axis, precisions[:10], marker='o', color=prec_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, recalls[:10], marker='s', color=recall_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, fscores[:10], marker='^', color=fscore_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, precisions[10:], color=prec_color, markersize=15, lw=2, marker='o', linestyle='--', fillstyle='none') ax1.plot(x_axis, recalls[10:], color=recall_color, markersize=15, lw=2, marker='s', linestyle='--', fillstyle='none') ax1.plot(x_axis, fscores[10:], color=fscore_color, markersize=15, lw=2, marker='^', linestyle='--', fillstyle='none') plt.tick_params(labelsize=20) # Fig 2. a-c. figfile = os.path.join('eval/figures/{}_trigger.png'.format(rel)) fig.savefig(figfile, dpi=300, pad_inches=0 ,bbox_inches='tight') plt.show() # High-confidence patterns. scores = compute_scores(rel, 'h3') precisions = [s[2] for s in scores] recalls = [s[3] for s in scores] fscores = [s[4] for s in scores] x_axis = [ 20, 40, 60, 80, 100, 120, 140, 160, 180, 200 ] fig = plt.figure() ax1 = fig.add_subplot(111) ax1.set_xlabel('# of patterns', fontsize=24) ax1.plot(x_axis, precisions[:10], marker='o', color=prec_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, recalls[:10], marker='s', color=recall_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, fscores[:10], marker='^', color=fscore_color, markersize=15, lw=2, linestyle='-', fillstyle='none') ax1.plot(x_axis, precisions[10:], color=prec_color, markersize=15, lw=2, marker='o', linestyle='--', fillstyle='none') ax1.plot(x_axis, recalls[10:], color=recall_color, markersize=15, lw=2, marker='s', linestyle='--', fillstyle='none') ax1.plot(x_axis, fscores[10:], color=fscore_color, markersize=15, lw=2, marker='^', linestyle='--', fillstyle='none') plt.tick_params(labelsize=20) # Fig. 2 d-f. figfile = os.path.join('eval/figures/{}_pattern.png'.format(rel)) fig.savefig(figfile, dpi=300, pad_inches=0 ,bbox_inches='tight') plt.show()
python
#You are given a data structure of employee information, which includes the employee's unique id, his importance value and his direct subordinates' id. # For example, employee 1 is the leader of employee 2, and employee 2 is the leader of employee 3. They have importance value 15, 10 and 5, respectively. Then employee 1 has a data structure like [1, 15, [2]], and employee 2 has [2, 10, [3]], and employee 3 has [3, 5, []]. Note that although employee 3 is also a subordinate of employee 1, the relationship is not direct. # Now given the employee information of a company, and an employee id, you need to return the total importance value of this employee and all his subordinates. class Solution(object): def getImportance(self, employees, id): """ :type employees: Employee :type id: int :rtype: int """ # Time: O(n) # Space: O(n) emps = {employee.id: employee for employee in employees} def dfs(id): subordinates_importance = sum([dfs(sub_id) for sub_id in emps[id].subordinates]) return subordinates_importance + emps[id].importance return dfs(id)
python
import copy import cv2 # import torch from mindspore import Tensor import numpy as np from PIL import Image from util.config import config as cfg from util.misc import find_bottom, find_long_edges, split_edge_seqence, \ norm2, vector_sin, split_edge_seqence_by_step, sample, fourier_transform, \ clockwise, find_start_point def pil_load_img(path): image = Image.open(path) image = np.array(image) return image class TextInstance(object): def __init__(self, points, orient, text): self.orient = orient self.text = text self.bottoms = None self.e1 = None self.e2 = None if self.text != "#": self.label = 1 else: self.label = -1 remove_points = [] self.points = np.array(points) if len(points) > 4: # remove point if area is almost unchanged after removing it ori_area = cv2.contourArea(points) for p in range(len(points)): # attempt to remove p index = list(range(len(points))) index.remove(p) area = cv2.contourArea(points[index]) if np.abs(ori_area - area)/ori_area < 0.0017 and len(points) - len(remove_points) > 4: remove_points.append(p) self.points = np.array([point for i, point in enumerate(points) if i not in remove_points]) else: self.points = np.array(points) def find_bottom_and_sideline(self): self.bottoms = find_bottom(self.points) # find two bottoms of this Text self.e1, self.e2 = find_long_edges(self.points, self.bottoms) # find two long edge sequence def disk_cover(self, n_disk=15): """ cover text region with several disks :param n_disk: number of disks :return: """ inner_points1 = split_edge_seqence(self.points, self.e1, n_disk) inner_points2 = split_edge_seqence(self.points, self.e2, n_disk) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center radii = norm2(inner_points1 - center_points, axis=1) # disk radius return inner_points1, inner_points2, center_points, radii def equal_width_bbox_cover(self, step=16.0): inner_points1, inner_points2 = split_edge_seqence_by_step(self.points, self.e1, self.e2, step=step) inner_points2 = inner_points2[::-1] # innverse one of long edge center_points = (inner_points1 + inner_points2) / 2 # disk center return inner_points1, inner_points2, center_points def __repr__(self): return str(self.__dict__) def __getitem__(self, item): return getattr(self, item) class TextDataset(object): def __init__(self, transform, is_training=False): super().__init__() self.transform = transform self.is_training = is_training @staticmethod def fill_polygon(mask, pts, value): cv2.fillPoly(mask, [pts.astype(np.int32)], color=(value,)) def make_text_region(self, img, polygon, tr_mask, train_mask, x_map, y_map, k, scale=1/2): [h, w] = img.shape[:2] h = int(h * scale) w = int(w * scale) deal_mask = np.zeros((h, w), np.uint8) points = (polygon.points * scale).astype(np.int32) cv2.fillPoly(tr_mask, [points], color=(1,)) cv2.fillPoly(deal_mask, [points], color=(1,)) if polygon.text == '#': cv2.fillPoly(train_mask, [points], color=(0,)) pts = sample(polygon.points * scale) pts = find_start_point(pts) c = fourier_transform(pts, k) c = clockwise(c, k) vector_x = np.real(c) vector_y = np.imag(c) for i in range(-k, k+1): if i != 0: x_map[:, :, i + k] = deal_mask * vector_x[i + k] + (1 - deal_mask) * x_map[:, :, i + k] y_map[:, :, i + k] = deal_mask * vector_y[i + k] + (1 - deal_mask) * y_map[:, :, i + k] else: for y, x in np.argwhere(deal_mask > 0.5): x_map[y, x, k] = vector_x[k] - x y_map[y, x, k] = vector_y[k] - y def make_text_center_line(self, sideline1, sideline2, center_line, tcl_msk1, expand=0.3, shrink=1): p1 = np.mean(sideline1, axis=0) p2 = np.mean(sideline2, axis=0) vpp = vector_sin(p1 - p2) if vpp >= 0: top_line = sideline2 bot_line = sideline1 else: top_line = sideline1 bot_line = sideline2 if len(center_line) < 5: shrink = 0 for i in range(shrink, len(center_line) - 1 - shrink): c1 = center_line[i] c2 = center_line[i + 1] top1 = top_line[i] top2 = top_line[i + 1] bottom1 = bot_line[i] bottom2 = bot_line[i + 1] p1 = c1 + (top1 - c1) * expand p2 = c1 + (bottom1 - c1) * expand p3 = c2 + (bottom2 - c2) * expand p4 = c2 + (top2 - c2) * expand ploy1 = np.stack([p1, p2, p3, p4]) self.fill_polygon(tcl_msk1, ploy1, value=1) def get_training_data(self, image, polygons, k, image_id, image_path): H, W, _ = image.shape if self.transform: image, polygons = self.transform(image, copy.copy(polygons)) h, w, _ = image.shape tr_mask_3 = np.zeros((int(h/8), int(w/8), 1), np.uint8) train_mask_3 = np.ones((int(h/8), int(w/8), 1), np.uint8) tcl_mask_3 = np.zeros((int(h / 8), int(w / 8), 1), np.uint8) x_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) y_map_3 = np.zeros((int(h/8), int(w/8), 2 * k + 1), np.float32) tr_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) train_mask_4 = np.ones((int(h/16), int(w/16), 1), np.uint8) tcl_mask_4 = np.zeros((int(h/16), int(w/16), 1), np.uint8) x_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) y_map_4 = np.zeros((int(h/16), int(w/16), 2 * k + 1), np.float32) tr_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) train_mask_5 = np.ones((int(h/32), int(w/32), 1), np.uint8) tcl_mask_5 = np.zeros((int(h/32), int(w/32), 1), np.uint8) x_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) y_map_5 = np.zeros((int(h/32), int(w/32), 2 * k + 1), np.float32) if polygons is not None: for polygon in polygons: x_max = polygon.points[:, 0].max() x_min = polygon.points[:, 0].min() y_max = polygon.points[:, 1].max() y_min = polygon.points[:, 1].min() dx = x_max - x_min dy = y_max - y_min criterion = max(dx, dy) / (h + 1e-5) polygon.find_bottom_and_sideline() sideline1, sideline2, center_points = polygon.equal_width_bbox_cover(step=4.0) if criterion < 0.4: self.make_text_region(image, polygon, tr_mask_3, train_mask_3, x_map_3, y_map_3, k, scale=1 / 8) self.make_text_center_line(sideline1/8, sideline2/8, center_points/8, tcl_mask_3) if criterion > 0.3 and criterion < 0.7: self.make_text_region(image, polygon, tr_mask_4, train_mask_4, x_map_4, y_map_4, k, scale=1 / 16) self.make_text_center_line(sideline1/16, sideline2/16, center_points/16, tcl_mask_4) if criterion > 0.6: self.make_text_region(image, polygon, tr_mask_5, train_mask_5, x_map_5, y_map_5, k, scale=1 / 32) self.make_text_center_line(sideline1/32, sideline2/32, center_points/32, tcl_mask_5) # clip value (0, 1) tr_mask_3 = np.clip(tr_mask_3, 0, 1) train_mask_3 = np.clip(train_mask_3, 0, 1) tcl_mask_3 = np.clip(tcl_mask_3, 0, 1) tr_mask_4 = np.clip(tr_mask_4, 0, 1) train_mask_4 = np.clip(train_mask_4, 0, 1) tcl_mask_4 = np.clip(tcl_mask_4, 0, 1) tr_mask_5 = np.clip(tr_mask_5, 0, 1) train_mask_5 = np.clip(train_mask_5, 0, 1) tcl_mask_5 = np.clip(tcl_mask_5, 0, 1) label_3 = np.concatenate([tr_mask_3, train_mask_3, x_map_3, y_map_3, tcl_mask_3], axis=2) label_4 = np.concatenate([tr_mask_4, train_mask_4, x_map_4, y_map_4, tcl_mask_4], axis=2) label_5 = np.concatenate([tr_mask_5, train_mask_5, x_map_5, y_map_5, tcl_mask_5], axis=2) # # to pytorch channel sequence image = image.transpose(2, 0, 1) # image = Tensor.from_numpy(image).astype("float32") # label_3 = Tensor.from_numpy(label_3).astype("float32") # label_4 = Tensor.from_numpy(label_4).astype("float32") # label_5 = Tensor.from_numpy(label_5).astype("float32") if not self.is_training: points = np.zeros((cfg.max_annotation, cfg.max_points, 2)) length = np.zeros(cfg.max_annotation, dtype=int) if polygons is not None: for i, polygon in enumerate(polygons): pts = polygon.points points[i, :pts.shape[0]] = polygon.points length[i] = pts.shape[0] meta = { 'image_id': image_id, 'image_path': image_path, 'annotation': points, 'n_annotation': length, 'Height': H, 'Width': W } # meta = [image_id, image_path, points, length, H, W] # meta = np.array(meta) return image, label_3, label_4, label_5, meta return image, label_3, label_4, label_5 def get_test_data(self, image, image_id, image_path): H, W, _ = image.shape if self.transform: image, polygons = self.transform(image) # to pytorch channel sequence image = image.transpose(2, 0, 1) meta = { 'image_id': image_id, 'image_path': image_path, 'Height': H, 'Width': W } return image, meta def __len__(self): raise NotImplementedError()
python
#!/usr/bin/python import broker import select ## demo receiver that is subscribed to the topic "demo/select_fd" ep = broker.Endpoint() subscriber = ep.make_subscriber("demo/select_fd") ep.listen("127.0.0.1", 9999) while(True): ## this will block until we have read-readiness on the file descriptor # print("wait ...") fd_sets = select.select([subscriber.fd()], [], []) # print ("go on...") if not fd_sets[0]: print("boom. this is the end.") (topic, data) = subscriber.get() #// we could also subscriber.poll() and handle array of messages received_event = broker.bro.Event(data) print("received on topic: {} event name: {} content: {}".format(topic, received_event.name(), received_event.args())) ## in fact, with a blocking select this is pretty similar to the "simple" example. The call to `subscriber.get()` blocks as well. To handle this nicely, we have to wrap it into a thread.
python
# # @lc app=leetcode.cn id=275 lang=python3 # # [275] H 指数 II # # https://leetcode-cn.com/problems/h-index-ii/description/ # # algorithms # Medium (41.25%) # Likes: 139 # Dislikes: 0 # Total Accepted: 39K # Total Submissions: 85.6K # Testcase Example: '[0,1,3,5,6]' # # 给定一位研究者论文被引用次数的数组(被引用次数是非负整数),数组已经按照 升序排列 。编写一个方法,计算出研究者的 h 指数。 # # h 指数的定义: “h 代表“高引用次数”(high citations),一名科研人员的 h 指数是指他(她)的 (N 篇论文中)总共有 h # 篇论文分别被引用了至少 h 次。(其余的 N - h 篇论文每篇被引用次数不多于 h 次。)" # # # # 示例: # # 输入: citations = [0,1,3,5,6] # 输出: 3 # 解释: 给定数组表示研究者总共有 5 篇论文,每篇论文相应的被引用了 0, 1, 3, 5, 6 次。 # 由于研究者有 3 篇论文每篇至少被引用了 3 次,其余两篇论文每篇被引用不多于 3 次,所以她的 h 指数是 3。 # # # # 说明: # # 如果 h 有多有种可能的值 ,h 指数是其中最大的那个。 # # # # 进阶: # # # 这是 H 指数 的延伸题目,本题中的 citations 数组是保证有序的。 # 你可以优化你的算法到对数时间复杂度吗? # # # # @lc code=start class Solution: def hIndex(self, citations: List[int]) -> int: n = len(citations) l,r = 0,n-1 while l < r: m = l + (r - l) // 2 if citations[m] >= n - m: r = m else: l = m + 1 return n - l if citations[l] >= n - l else 0 # @lc code=end def hIndex(self, citations: List[int]) -> int: h = 0 n = len(citations) i = 0 citations.sort(reverse=True) while i < n and citations[i] > h: h += 1 i += 1 return h def hIndex(self, citations: List[int]) -> int: if len(citations) == 1: return citations[0] if citations[0] <= 1 else 1 l, r = 0, len(citations) - 1 n = len(citations) while l <= r: m = l + (r - l) // 2 if citations[m] >= n - m: r = m - 1 else: l = m + 1 return n - l
python
from twotest.fixtures import client, django_client from wheelcms_axle.tests.fixtures import localtyperegistry, localtemplateregistry, root
python
resposta = 'S' soma = count = maior = menor = 0 while resposta in 'Ss': num = int(input('Digite um numero: ')) soma += num count += 1 if count == 1: maior = menor = num else: if num > maior: maior = num else: menor = num resposta = str(input('Quer continuar ? S/N ')).upper().strip()[0] media = soma / count print('A media foi: {}'.format(media)) print('O maior e menor número foram:{} {} '.format(maior, menor))
python
#!/usr/bin/env python # to be used with 'rps_pico_client.py' # Example to illustrate RPS feature to run tiny (pico) services, such as reading sensor data or # controlling a device; here, time at server is requested. In this example a new service task is # created to serve a request. Compare this to 'task_pico_service.py' where requests are processed # by the same server task. import sys import time import random import pycos # import netpycos to add networking to pycos import pycos.netpycos # PyPI / pip packaging adjusts assertion below for Python 3.7+ if sys.version_info.major == 3: assert sys.version_info.minor < 7, \ ('"%s" is not suitable for Python version %s.%s; use file installed by pip instead' % (__file__, sys.version_info.major, sys.version_info.minor)) def pico_service(req, task=None): if not isinstance(req, dict): raise Exception('request must be a dictionary') client = req.get('client', None) if req.get('name', None) != 'time': raise Exception('request should have "name" set to "time"') if not isinstance(client, pycos.Task): raise Exception('request should have "client" set to task of requester') delay = random.uniform(0.5, 2) # simulate delay in getting result (e.g., reading a sensor or computation) yield task.sleep(delay) raise StopIteration({'result': time.asctime(), 'server': task}) if __name__ == '__main__': # pycos.logger.setLevel(pycos.Logger.DEBUG) # 'secret' is set so only peers that use same secret can communicate; # SSL can be used for encryption if required; see 'rps_node_*.py' for authentication of peers scheduler = pycos.Pycos(name='pico_server', secret='PycOS') # create RPS and register it so remote clients can request execution pycos.RPS(pico_service).register() if sys.version_info.major > 2: read_input = input else: read_input = raw_input while 1: try: line = read_input('Enter "quit" or "exit" to terminate pico_service: ').strip().lower() if line in ('quit', 'exit'): break except Exception: break
python
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals __doc__ = """ hgvs.edit -- representation of edit operations in HGVS variants NARefAlt and AARefAlt are abstractions of several major variant types. They are distinguished by whether the ref and alt elements of the structure. The HGVS grammar for NA and AA are subtly different (e.g., the ref AA in a protein substitution is part of the location). """ import recordtype from bioutils.sequences import aa_to_aa1, aa1_to_aa3 from hgvs.exceptions import HGVSError class Edit(object): pass class NARefAlt( Edit, recordtype.recordtype('NARefAlt', [('ref',None),('alt',None),('uncertain',False)]) ): """ represents substitutions, deletions, insertions, and indels. :ivar ref: reference sequence or length :ivar alt: alternate sequence :ivar uncertain: boolean indicating whether the variant is uncertain/undetermined """ @property def ref_s(self): """ returns a string representing the ref sequence, if it is not None and smells like a sequence >>> NARefAlt('ACGT').ref_s u'ACGT' >>> NARefAlt('7').ref_s >>> NARefAlt(7).ref_s """ return self.ref if (isinstance(self.ref,basestring) and self.ref and self.ref[0] in 'ACGTUN') else None @property def ref_n(self): """ returns an integer, either from the `ref` instance variable if it's a number, or the length of ref if it's a string, or None otherwise >>> NARefAlt('ACGT').ref_n 4 >>> NARefAlt('7').ref_n 7 >>> NARefAlt(7).ref_n 7 """ try: return int(self.ref) except ValueError: return len(self.ref) if self.ref else None def __str__(self): if self.ref is None and self.alt is None: raise HGVSError('RefAlt: ref and alt sequences are both undefined') # subst and delins if self.ref is not None and self.alt is not None: if self.ref == self.alt: s = '=' elif len(self.alt) == 1 and len(self.ref) == 1 and not self.ref.isdigit(): # don't turn del5insT into 5>T s = '{self.ref}>{self.alt}'.format(self=self) else: s = 'del{self.ref}ins{self.alt}'.format(self=self) # del case elif self.ref is not None: s = 'del{self.ref}'.format(self=self) # ins case else: # self.alt is not None s = 'ins{self.alt}'.format(self=self) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ if self.ref is not None and self.alt is not None: if self.ref == self.alt: edit_type = 'identity' elif len(self.alt) == 1 and len(self.ref) == 1 and not self.ref.isdigit(): edit_type = 'sub' else: edit_type = 'delins' elif self.ref is not None: edit_type = 'del' else: edit_type = 'ins' return edit_type class AARefAlt( Edit, recordtype.recordtype('AARefAlt', [('ref',None),('alt',None), ('uncertain',False)]) ): def __init__(self,ref, alt, uncertain=False): super(AARefAlt, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), uncertain=uncertain) def __str__(self): if self.ref is None and self.alt is None: #raise HGVSError('RefAlt: ref and alt sequences are both undefined') return '=' # subst and delins if self.ref is not None and self.alt is not None: if self.ref == self.alt: s = '=' elif len(self.ref) == 1 and len(self.alt) == 1: s = aa1_to_aa3(self.alt) else: s = 'delins{alt}'.format(alt = aa1_to_aa3(self.alt)) # del case elif self.ref is not None and self.alt is None: s = 'del' # ins case elif self.ref is None and self.alt is not None: s = 'ins{alt}'.format(alt=aa1_to_aa3(self.alt)) else: raise RuntimeError("Should not be here") return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ if self.ref is not None and self.alt is not None: if self.ref == self.alt: edit_type = 'identity' elif len(self.ref) == 1 and len(self.alt) == 1: edit_type = 'sub' else: edit_type = 'delins' elif self.ref is not None and self.alt is None: edit_type = 'del' elif self.ref is None and self.alt is not None: edit_type = 'ins' return edit_type class AASub( AARefAlt ): def __str__(self): s = aa1_to_aa3(self.alt) if self.alt != '?' else self.alt return '('+s+')' if self.uncertain else s @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'sub' class AAFs(Edit, recordtype.recordtype('AAFs', [('ref',None),('alt',None),('length',None),('uncertain',False)])): def __init__(self,ref,alt,length=None,uncertain=False): super(AAFs, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), length=length, uncertain=uncertain) def __str__(self): st_length = self.length or '' s = "{alt}fsTer{length}".format(alt=aa1_to_aa3(self.alt), length=st_length) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'fs' class AAExt(Edit, recordtype.recordtype('AAExt', [('ref',None),('alt',None), ('aaterm', None), ('length',None), ('uncertain',False)])): def __init__(self,ref,alt,aaterm=None, length=None,uncertain=False): super(AAExt, self).__init__(ref=aa_to_aa1(ref), alt=aa_to_aa1(alt), aaterm=aa_to_aa1(aaterm), length=length, uncertain=uncertain) def __str__(self): st_alt = self.alt or '' st_aaterm = self.aaterm or '' st_length = self.length or '' s = "{alt}ext{term}{length}".format(alt=aa1_to_aa3(st_alt), term=aa1_to_aa3(st_aaterm), length=st_length) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'ext' class Dup( Edit, recordtype.recordtype('Dup', [('seq',None),('uncertain',False)]) ): def __str__(self): return 'dup' + (self.seq or '') def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'dup' class Repeat( Edit, recordtype.recordtype('Repeat', [('seq',None),('min',None),('max',None),('uncertain',False)]) ): def __str__(self): if self.min > self.max: raise HGVSError('Repeat min count must be less than or equal to max count') if self.min == self.max: return '{self.seq}[{self.min}]'.format(self=self) return '{self.seq}({self.min}_{self.max})'.format(self=self) def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'repeat' class NACopy(Edit, recordtype.recordtype('NACopy', ['copy', ('uncertain', False)])): def __str__(self): s = 'copy{}'.format(self.copy) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'copy' class NADupN(Edit, recordtype.recordtype('NADupN', ['n', ('uncertain', False)])): def __str__(self): s = 'dup{}'.format(self.n) return '('+s+')' if self.uncertain else s def _set_uncertain(self): """sets the uncertain flag to True; used primarily by the HGVS grammar :returns: self """ self.uncertain = True return self @property def type(self): """return the type of this Edit :returns: edit type (str) """ return 'dup' if __name__ == "__main__": import doctest doctest.testmod() # class Inv( Edit, recordtype.recordtype('Inv', [], default=None) ): # def __str__(self): # return '' # # class Con( Edit, recordtype.recordtype('Con', ['con'], default=None) ): # def __str__(self): # return self.con # # class ComplexVariant( Edit, recordtype.recordtype('ComplexVariant', ['edits','rel'], default=None) ): # def __str__(self): # return '[' + self.rel.join( self.edits ) + ']' # # class CompoundVariant( Edit, recordtype.recordtype('CompoundVariant', ['edits'], default=None) ): # def __str__(self): # return ';'.join( [ '['+e+']' for e in self.edits ] ) # # class MosaicVariant( Edit, recordtype.recordtype('Edit', ['edit'], default=None) ): # def __str__(self): # return '[=/{self.edit}]'.format(self=self) # # class ChimericVariant( Edit, recordtype.recordtype('Edit', ['edit'], default=None) ): # def __str__(self): # return '[=//{self.edit}]'.format(self=self) ## <LICENSE> ## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs) ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## </LICENSE>
python
#!/usr/bin/env python """"Downloads and unzips the KITTI tracking data. Warning: This can take a while, and use up >100Gb of disk space.""" #ref from https://github.com/utiasSTARS/pykitti/blob/master/pykitti/downloader/tracking.py from __future__ import print_function import argparse import os import sys from subprocess import call import glob #Object tracking 2012 #http://www.cvlibs.net/datasets/kitti/eval_tracking.php URL_BASE="https://s3.eu-central-1.amazonaws.com/avg-kitti/" tracking_dir_names = ['image_02', 'image_03', 'velodyne', 'calib', 'oxts', 'label_02', 'det_02'] #folder name under tracking/training tracking_dir_zip_tags = ['image_2', 'image_3', 'velodyne', 'calib', 'oxts', 'label_2', 'det_2_lsvm'] #original zip file name #lsvm is L-SVM reference detections for training and test set (L-SVM), 108 MB) #data_tracking_oxts.zip is GPS/IMU data, if you want to use map information (8 MB) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--kitti_root', type=str, default=os.path.join('/mnt/DATA5T', 'Kitti')) #/mnt/DATA5T/Kitti, /DATA5T/Datasets # parser.add_argument('--root', type=str, default=None, help='data folder') return parser.parse_args(sys.argv[1:]) ## Need to clean up lsvm as their files have trailing whitespaces def clean_file(filename): f = open(filename, 'r') new_lines = [] for line in f.readlines(): new_lines.append(line.rstrip()) f.close() f = open(filename, 'w') for line in new_lines: f.write(line + '\n') f.close() def clean_lsvm(lsvm_dir): for filename in glob.glob(lsvm_dir + '/*.txt'): print('Cleaning ', filename) clean_file(filename) def main(): args = parse_args() kitti_dir = args.kitti_root # Perform a few sanity checks to make sure we're operating in the right dir # when left with the default args. if not os.path.isabs(kitti_dir): if not os.path.isdir('src'): os.chdir('..') if not os.path.isdir('src'): print("Please make sure to run this tool from the DynSLAM " "project root when using relative paths.") return 1 tracking_dir = os.path.join(kitti_dir, 'tracking') #/DATA5T/Datasets/Kitti/tracking os.makedirs(tracking_dir, exist_ok=True) os.chdir(tracking_dir) tracking_zip_names = ["data_tracking_" + name + ".zip" for name in tracking_dir_zip_tags] for dir_name, zip_name in zip(tracking_dir_names, tracking_zip_names): canary_dir = os.path.join('training', dir_name) if os.path.isdir(canary_dir): print("Directory {} canary dir seems to exist, so I will assume the data is there.".format(canary_dir)) else: if os.path.exists(zip_name): print("File {} exists. Not re-downloading.".format(zip_name)) else: url = URL_BASE + zip_name print("Downloading file {} to folder {}.".format(zip_name, kitti_dir)) call(['wget', url]) call(['unzip', '-o', zip_name]) if str(canary_dir) == 'training/det_02': print("Need to trim whitespaces for lsvm label files") clean_lsvm('training/det_02') return 0 if __name__ == '__main__': sys.exit(main())
python
from collections.abc import Iterable from enum import Enum import json import logging import subprocess from typing import Union, Any, Optional from pathlib import Path from scipy import sparse import scipy from typer.models import NoneType logger = logging.getLogger(__name__) def expand_paths(path_or_pattern): """ Make a list of paths from a glob pattern From https://stackoverflow.com/a/51108375 """ path = Path(path_or_pattern).expanduser() parts = path.parts[1:] if path.is_absolute() else path.parts return list(Path(path.root).glob(str(Path("").joinpath(*parts)))) def get_total_lines(paths: list[Union[Path, str]], encoding: str = "utf-8") -> int: """ Get the total number of lines (read: documents) to process """ logger.info("Calculating total number of documents...") try: # wc is faster than native python return sum( int(subprocess.check_output(f"/usr/bin/wc -l {p}", shell=True).split()[0]) for p in paths ) except subprocess.CalledProcessError: return sum(1 for p in paths for line in open(p, encoding=encoding)) def read_lines(path: Union[Path, str], encoding: str = "utf-8") -> list[str]: """ Read the lines in a file """ with open(path, encoding=encoding) as infile: return [line for line in infile if line.strip()] def save_lines(obj: Iterable, fpath: Union[str, Path]): with open(fpath, "w") as outfile: for i, x in enumerate(obj): if i == 0: outfile.write(x) else: outfile.write(f"\n{x}") def save_json(obj: Any, fpath: Union[str, Path], indent: Optional[int] = None): with open(fpath, "w") as outfile: json.dump(obj, outfile, indent=indent) def save_params(params: dict[str, Any], fpath: Union[str, Path]): safe_params = {} safe_types = (float, int, str, bool, type(None)) for k, v in params.items(): if isinstance(v, Enum): v = v.value if not isinstance(v, (tuple, list) + safe_types): v = str(v) if isinstance(v, (tuple, list)) and any( (not isinstance(i, safe_types)) for i in v ): v = [str(v) for v in v] safe_params[k] = v save_json(safe_params, fpath, indent=2) def save_dtm_as_jsonl( dtm: sparse.csr.csr_matrix, vocab: dict[str, int], ids: list[str], outpath: Union[str, Path], ): """ Save document-term matrix as a dictionary in the following format, where each row is a document: { "id": <doc_1>, "counts": { <word_2>: <count_of_word_2_in_doc_1>, <word_6>: <count_of_word_6_in_doc_1>, ... }, } """ inv_vocab = dict(zip(vocab.values(), vocab.keys())) with open(outpath, mode="w") as outfile: for i, (row, id) in enumerate(zip(dtm, ids)): words_in_doc = [inv_vocab[idx] for idx in row.indices] counts = [int(v) for v in row.data] # int64 not serializable word_counts = dict(zip(words_in_doc, counts)) row_json = json.dumps({"id": id, "counts": word_counts}) if i == 0: outfile.write(row_json) else: outfile.write(f"\n{row_json}") def gen_ngrams(tokens: list[str], min_n: int, max_n: int) -> list[str]: """ Create all ngrams from `tokens` where n is between `min_n`, `max_n`, inclusive. """ return [ "_".join(tokens[i : i + n]) for n in range(min_n, max_n + 1) for i in range(len(tokens) - n + 1) ]
python
#---------------------------------------------------------------------- # This programme provides a simple example of how to use "approach control" # for colour light (and to improve the automation of semaphore signals. # # For Colour Light Signals - this can be used where it is necessary to slow # a train down to take a diverging route. For "Approach on Red", the junction # signal is set to DANGER (and all the signals behind show increasingly # restrictive aspects as appropriate). When the signal is approached, it # automatically changes to PROCEED, enabling the train to continue along # the divergent route. "Approach on Yellow" is used for when the speed # restriction on the divergent route is less restrictive but still slower # than the speed restriction of the main route. In this case the junction # signal shows a CAUTION aspect, and the signals behind show flashing # yellow and double flashing yellow to indicate the divergent route. # # For Semaphore signals, this can be used for simulating/automating the # series of signals within a block section (e.g.outer home, inner home, # starter, advanced starter etc). A home signal should show a PROCEED aspect # for an approaching train if a subsequent home signal (in the same 'Block # Section') is set to DANGER. In this case all preceding home signals (and # the distant for the block section) would remain ON to slow down the train # on the approach to the first home signal. As each signal is approached, # the signal would then be cleared to enable the train to proceed (at low # speed) towards the next home signal (which would be ON). As the train # approaches the second Home signal, the signal would be cleared - etc # # This programme also provides an example of a multiple windows application. # showing how all callbacks (from external sensor events) are injected back # into the main Tkinter thread (via the Tkinter event queue) # --------------------------------------------------------------------- from tkinter import * from model_railway_signals import * import logging import threading #---------------------------------------------------------------------- # Configure the log level. If no 'level' is specified specified only warnings and errors # will be generated. A level of 'INFO' will tell you what the various functions are doing # 'under the hood' - useful when developing/debugging a layout signalling Scheme. A level # of 'DEBUG' will additionally report the DCC Bus commands being sent to the Pi-SPROG #---------------------------------------------------------------------- logging.basicConfig(format='%(levelname)s: %(message)s',level=logging.DEBUG) #---------------------------------------------------------------------- # WINDOW 2 - Main Callback for 'Release on Yellow' Approach Control (Colour Light Signals) #---------------------------------------------------------------------- def window1_callback_function(item_id,callback_type): print("Window 1: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 1: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 21: set_section_occupied(21) elif item_id == 22: clear_section_occupied(21) set_section_occupied(22) elif item_id == 23: clear_section_occupied(22) set_section_occupied(23) elif item_id == 24 and point_switched(21): set_section_occupied(25,clear_section_occupied(23)) elif item_id == 24 and not point_switched(1): set_section_occupied(24,clear_section_occupied(23)) elif item_id == 25: trigger_timed_signal (25,0,5) clear_section_occupied(25) elif item_id == 26: trigger_timed_signal (26,0,5) clear_section_occupied(24) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections # Note that we leave the Distant (signal 1) to deal with later if section_occupied(22): set_signal_override(22) else: clear_signal_override(22) if section_occupied(23):set_signal_override(23) else:clear_signal_override(23) if ((point_switched(21) and section_occupied(25))or (not point_switched(21) and section_occupied(24))): set_signal_override(24) else: clear_signal_override(24) # Refresh the route settings if point_switched(21): set_route(24,route=route_type.LH1) else: set_route(24,route=route_type.MAIN) # Process the signal/point interlocking if not fpl_active(21): lock_signal(24) else: unlock_signal(24) if signal_clear(24): lock_point(21) else: unlock_point(21) # Here is the approach control code if callback_type not in (sig_callback_type.sig_released,): if point_switched(21) and signal_state(25) != signal_state_type.PROCEED: set_approach_control(24) elif not point_switched(21) and signal_state(26) != signal_state_type.PROCEED: set_approach_control(24) else: clear_approach_control(24) if signal_state(24) != signal_state_type.PROCEED: set_approach_control(23) else: clear_approach_control(23) if signal_state(23) != signal_state_type.PROCEED: set_approach_control(22) else: clear_approach_control(22) # Finally - Override the distant signal if any of the home signals ahead are set # to DANGER or if the train has just entered the section immediately beyond the # signal. In this case, we only need to check the state of the signal ahead if section_occupied(21) or signal_state(22) != signal_state_type.PROCEED: set_signal_override(21) else: clear_signal_override(21) return() #---------------------------------------------------------------------------------------------- # WINDOW 2 - Main Callback for 'Release on Yellow' Approach Control (Colour Light Signals) #---------------------------------------------------------------------------------------------- def window2_callback_function(item_id,callback_type): print("Window 2: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 2: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 1: set_section_occupied(1) elif item_id == 2: set_section_occupied(2,clear_section_occupied(1)) elif item_id == 3: set_section_occupied(3,clear_section_occupied(2)) elif item_id == 4 and point_switched(1): set_section_occupied(5,clear_section_occupied(3)) elif item_id == 4 and not point_switched(1): set_section_occupied(4,clear_section_occupied(3)) elif item_id == 5: trigger_timed_signal (5,0,5) clear_section_occupied(5) elif item_id == 6: trigger_timed_signal (6,0,5) clear_section_occupied(4) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections if section_occupied(1): set_signal_override(1) else: clear_signal_override(1) if section_occupied(2): set_signal_override(2) else: clear_signal_override(2) if section_occupied(3): set_signal_override(3) else: clear_signal_override(3) if ((point_switched(1) and section_occupied(5)) or (not point_switched(1) and section_occupied(4))): set_signal_override(4) else: clear_signal_override(4) # Refresh the signal aspects based on the route settings - Need to work back # along the route that is set to ensure we are updating based on the signal ahead if point_switched(1): set_route(4,route=route_type.LH1) update_signal(4,sig_ahead_id=5) else: set_route(4,route=route_type.MAIN) update_signal(4,sig_ahead_id=6) update_signal(3,sig_ahead_id=4) update_signal(2,sig_ahead_id=3) update_signal(1,sig_ahead_id=2) # Process the signal/point interlocking if not fpl_active(1):lock_signal(4) else: unlock_signal(4) if signal_clear(4): lock_point(1) else: unlock_point(1) # Here is the approach control code - we only want to SET the approach control when # the route is first set up for the diverging route or when the signal is passed # This is so we don't inadvertantly SET the approach control on other events received # between the train releasing the approach control and the train passing the signal. # We also need to CLEAR the approach control if the route is switched back to main if ((callback_type == point_callback_type.point_switched and item_id==1 and point_switched(1)) or (callback_type == sig_callback_type.sig_passed and item_id==4 and point_switched(1)) ): set_approach_control (4,release_on_yellow=True) if callback_type == point_callback_type.point_switched and item_id==1 and not point_switched(1): clear_approach_control (4) return() #---------------------------------------------------------------------------------------------- # WINDOW 3 - Main Callback for 'Release on Red' Approach Control (Colour Light Signals) #---------------------------------------------------------------------------------------------- def window3_callback_function(item_id,callback_type): print("Window 3: Callback into main program - Item: "+str(item_id)+" - Callback Type: "+str(callback_type)) print("Window 3: Receiving callback event in thread " + str(threading.get_ident())) # Deal with changes to the Track Occupancy (based on "signal Passed" Events) # We use the label of the cleared section (returned by 'clear_section_occupied') # to set the label for the next section (i.e. 'pass' the current train along) if callback_type == sig_callback_type.sig_passed: if item_id == 11: set_section_occupied(11) elif item_id == 12: set_section_occupied(12,clear_section_occupied(11)) elif item_id == 13: set_section_occupied(13,clear_section_occupied(12)) elif item_id == 14 and point_switched(11): set_section_occupied(15,clear_section_occupied(13)) elif item_id == 14 and not point_switched(11): set_section_occupied(14,clear_section_occupied(13)) elif item_id == 15: trigger_timed_signal (15,0,3) clear_section_occupied(15) elif item_id == 16: trigger_timed_signal (16,0,3) clear_section_occupied(14) # Override signals based on track occupancy - we could use signal passed events but # we also need to allow for manual setting/resetting of the track occupancy sections if section_occupied(11): set_signal_override(11) else: clear_signal_override(11) if section_occupied(12): set_signal_override(12) else: clear_signal_override(12) if section_occupied(13): set_signal_override(13) else: clear_signal_override(13) if ((point_switched(11) and section_occupied(15)) or (not point_switched(11) and section_occupied(14))): set_signal_override(14) else: clear_signal_override(14) # Refresh the signal aspects based on the route settings - Need to work back # along the route that is set to ensure we are updating based on the signal ahead if point_switched(11): set_route(14,theatre_text="1") update_signal(14,sig_ahead_id=15) else: set_route(14,theatre_text="2") update_signal(14,sig_ahead_id=16) update_signal(13,sig_ahead_id=14) update_signal(12,sig_ahead_id=13) update_signal(11,sig_ahead_id=12) # Process the signal/point interlocking if not fpl_active(11): lock_signal(14) else: unlock_signal(14) if signal_clear(14): lock_point(11) else: unlock_point(11) # Here is the approach control code - we only want to SET the approach control when # the route is first set up for the diverging route or when the signal is passed # This is so we don't inadvertantly SET the approach control on other events received # between the train releasing the approach control and the train passing the signal. # We also need to CLEAR the approach control if the route is switched back to main if ((callback_type == point_callback_type.point_switched and item_id==1 and point_switched(1)) or (callback_type == sig_callback_type.sig_passed and item_id==4 and point_switched(1)) ): set_approach_control (4,release_on_yellow=True) if callback_type == point_callback_type.point_switched and item_id==1 and not point_switched(1): clear_approach_control (4) if ( (callback_type == point_callback_type.point_switched and item_id==11 and point_switched(11)) or (callback_type == sig_callback_type.sig_passed and item_id==14 and point_switched(11)) ): set_approach_control (14,release_on_yellow=False) if callback_type == point_callback_type.point_switched and item_id==11 and not point_switched(11): clear_approach_control (14) return() #------------------------------------------------------------------------------------ # This is where the code begins #------------------------------------------------------------------------------------ print ("Creating Windows and Canvases") window1 = Tk() window1.title("Root Window 1: An example of using 'Release on Red' Approach Control for Semaphore Signals") canvas1 = Canvas(window1,height=300,width=1100,bg="grey85") canvas1.pack() window2 = Toplevel(window1) window2.title("Window 2: An example of using 'Release on Yellow' Approach Control for Colour Light Signals") canvas2 = Canvas(window2,height=300,width=1100,bg="grey85") canvas2.pack() window3 = Toplevel(window2) window3.title("Window 3: An example of using 'Release on Red' Approach Control for Colour Light Signals") canvas3 = Canvas(window3,height=300,width=1100,bg="grey85") canvas3.pack() print ("Initialising Pi Sprog") initialise_pi_sprog () request_dcc_power_on() #---------------------------------------------------------------------------------------------- # WINDOW 3 - An example of using 'Release on Red' Approach Control for Colour Light Signals #---------------------------------------------------------------------------------------------- print ("Window 3: Drawing Schematic and creating points") # Draw the the Bottom line (up to the first point) canvas3.create_line(0,150,810,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas3,11,point_type.LH, 835,150,"black",point_callback=window3_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas3.create_line(835,125,860,100,fill="black",width=3) # 45 degree line from point to start of loop canvas3.create_line(860,100,1100,100,fill="black",width=3) # Loop line canvas3.create_line(860,150,1100,150,fill="black",width=3) # Main Line print ("Window 3: Creating the track Occupancy Sections") create_section(canvas3,11,175,150,section_callback=window3_callback_function) create_section(canvas3,12,400,150,section_callback=window3_callback_function) create_section(canvas3,13,625,150,section_callback=window3_callback_function) create_section(canvas3,14,925,150,section_callback=window3_callback_function) create_section(canvas3,15,925,100,section_callback=window3_callback_function) print ("Window 2: Creating Signals") create_colour_light_signal (canvas3,11,50,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,12,275,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,13,500,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas3,14,725,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, sig_passed_button = True, refresh_immediately = False, approach_release_button = True, theatre_route_indicator = True) create_colour_light_signal (canvas3,15,1000,100, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, fully_automatic=True, sig_passed_button=True) create_colour_light_signal (canvas3,16,1000,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window3_callback_function, fully_automatic=True, sig_passed_button=True) print ("Window 3: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window3_callback_function(None, None) #---------------------------------------------------------------------------------------------- # WINDOW 2 - An example of using 'Release on Yellow' Approach Control for Colour Light Signals #---------------------------------------------------------------------------------------------- print ("Window 2: Creating DCC Mappings") # Define the DCC mappings for the signals. In this instance, we're only going to generate mappings # for the signals that support flashing aspects (i.e. Traintech 4 aspects with flashing aspects) # Signal 2 (addresses 13,14,15,16) - uses the simplified Train_Tech signal mapping function map_traintech_signal (sig_id = 3, base_address = 13) # Signal 3 (addresses 17,18,19,20) - uses the simplified Train_Tech signal mapping function map_traintech_signal (sig_id = 3, base_address = 17) print ("Window 2: Drawing Schematic and creating points") # Draw the the Top line (up to the first point) canvas2.create_line(0,150,800,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas2,1,point_type.LH, 825,150,"black",point_callback=window2_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas2.create_line(825,125,850,100,fill="black",width=3) # 45 degree line from point to start of loop canvas2.create_line(850,100,1100,100,fill="black",width=3) # Loop line canvas2.create_line(850,150,1100,150,fill="black",width=3) # Main Line print ("Window 2: Creating the track Occupancy Sections") create_section(canvas2,1,175,150,section_callback=window2_callback_function) create_section(canvas2,2,400,150,section_callback=window2_callback_function) create_section(canvas2,3,625,150,section_callback=window2_callback_function) create_section(canvas2,4,925,150,section_callback=window2_callback_function) create_section(canvas2,5,925,100,section_callback=window2_callback_function) print ("Window 2: Creating Signals") create_colour_light_signal (canvas2,1,50,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,2,275,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,3,500,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, fully_automatic = True, refresh_immediately = False) create_colour_light_signal (canvas2,4,725,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, sig_passed_button = True, refresh_immediately = False, approach_release_button = True, lhfeather45 = True) create_colour_light_signal (canvas2,5,1000,100, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, fully_automatic=True, sig_passed_button=True) create_colour_light_signal (canvas2,6,1000,150, signal_subtype = signal_sub_type.four_aspect, sig_callback=window2_callback_function, fully_automatic=True, sig_passed_button=True) print ("Window 2: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window2_callback_function(None, None) #---------------------------------------------------------------------------------------------- # WINDOW 1 - An example of using 'Release on Red' Approach Control for Semaphore Signals #---------------------------------------------------------------------------------------------- print ("Window 1: Drawing Schematic and creating points") # Draw the the Top line (up to the first point) canvas1.create_line(0,150,800,150,fill="black",width=3) # Create (and draw) the first point - a left hand point with a Facing Point Lock create_point(canvas1,21,point_type.LH, 825,150,"black",point_callback=window1_callback_function,fpl=True) # Draw the Main Line and Loop Line canvas1.create_line(825,125,850,100,fill="black",width=3) # 45 degree line from point to start of loop canvas1.create_line(850,100,1100,100,fill="black",width=3) # Loop line canvas1.create_line(850,150,1100,150,fill="black",width=3) # Main Line print ("Window 1: Creating the track Occupancy Sections") create_section(canvas1,21,175,150,section_callback=window1_callback_function) create_section(canvas1,22,400,150,section_callback=window1_callback_function) create_section(canvas1,23,625,150,section_callback=window1_callback_function) create_section(canvas1,24,925,150,section_callback=window1_callback_function) create_section(canvas1,25,925,100,section_callback=window1_callback_function) print ("Window 1: Creating Signals") create_semaphore_signal (canvas1,21,50,150, distant = True, sig_callback=window1_callback_function, sig_passed_button = True) create_semaphore_signal (canvas1,22,275,150, sig_callback=window1_callback_function, approach_release_button = True, sig_passed_button = True) create_semaphore_signal (canvas1,23,500,150, sig_callback=window1_callback_function, approach_release_button = True, sig_passed_button = True) create_semaphore_signal (canvas1,24,725,150, sig_callback=window1_callback_function, sig_passed_button = True, approach_release_button = True, lh1_signal = True) create_semaphore_signal (canvas1,25,1000,100, sig_callback=window1_callback_function, sig_passed_button=True) create_semaphore_signal (canvas1,26,1000,150, sig_callback=window1_callback_function, sig_passed_button=True) print ("Window 1: Setting Initial Route and Interlocking") # Set the initial interlocking conditions by running the main callback function window1_callback_function(None, None) #---------------------------------------------------------------------------------------- print("Entering Main Event Loop") print("Main Thread is: " + str(threading.get_ident())) # Now enter the main event loop and wait for a button press (which will trigger a callback) window1.mainloop()
python
import subprocess import json from datetime import datetime from pydruid.client import PyDruid from pydruid.utils.aggregators import (longmax, doublemax) from pydruid.utils.filters import Dimension from kafka import KafkaProducer from iso8601utils import validators class KafkaAccessLayer(object): def __init__(self): self.connection = None def connect(self, uri): try: def serializer(v): return json.dumps(v).encode('utf-8') self.connection = KafkaProducer(bootstrap_servers=uri, value_serializer=serializer) except Exception: raise Exception('Kafka connection error: {0}'.format(uri)) def write_stats(self, id, name, stats, **kwargs): for stat in stats: msg = {'agent_id': id, 'process_name': name, 'timestamp': datetime.utcfromtimestamp(stat[0]) .strftime("%Y-%m-%dT%H:%M:%S.%fZ"), 'cpu': stat[1], 'mem': stat[2]} self.connection.send('supervisor', msg) self.connection.flush() kafka = KafkaAccessLayer() class PlyQLError(Exception): def __init__(self, expr, msg): self.expr = expr self.message = msg class PlyQLConnectionError(PlyQLError): def __init__(self, expr, msg, uri): super(PlyQLConnectionError, self).__init__(expr, msg) self.uri = uri class PlyQL(object): def __init__(self, uri): self.uri = uri def query(self, q, interval=None): command = ['plyql', '-h', str(self.uri), '-q', str(q), '-o', 'json'] if interval: command.extend(['-i', interval]) process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() if err: try: (_, _, uri) = err.split(' ') raise PlyQLConnectionError(err, 'Could not connect to Druid.', uri) except ValueError: raise PlyQLError(err, 'Error executing query.') else: return json.loads(out) class DruidAccessLayer(object): timeseries_granularities = ['none', 'second', 'minute', 'fifteen_minute', 'thirty_minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'] select_granularities = ['all', 'second', 'minute', 'fifteen_minute', 'thirty_minute', 'hour', 'day', 'week', 'month', 'quarter', 'year'] def __init__(self): self.connection = None self.plyql = None def connect(self, uri): self.connection = PyDruid('http://{0}'.format(uri), 'druid/v2/') self.plyql = PlyQL(uri) try: tables = self.tables() if {'Tables_in_database': 'supervisor'} not in tables: raise Exception('Druid connection error: missing ' '"supervisor" table') except Exception: raise Exception('Druid connection error: {0}'.format(uri)) def __validate_granularity__(self, granularity, supported_granularities): if granularity in self.timeseries_granularities: query_granularity = granularity elif validators.duration(granularity): query_granularity = {'type': 'period', 'period': granularity} else: raise ValueError( 'Unsupported granularity "{0}"'.format(granularity)) return query_granularity def __validate_intervals__(self, intervals): if not validators.interval(intervals): raise ValueError('Unsupported interval "{0}"'.format(intervals)) return intervals def tables(self): return self.plyql.query('SHOW TABLES') def processes(self, agent_id, period='P6W'): return self.plyql.query('SELECT process_name AS process, ' 'COUNT() AS count, MAX(__time) AS time ' 'FROM supervisor WHERE agent_id = "{0}" ' 'GROUP BY process_name;' .format(agent_id), period) def timeseries(self, agent_id, process_name, granularity='none', intervals='P6W', descending=False): query_granularity = self.__validate_granularity__( granularity, self.timeseries_granularities) intervals = self.__validate_intervals__(intervals) return self.connection.timeseries( datasource='supervisor', granularity=query_granularity, descending=descending, intervals=intervals, aggregations={'cpu': doublemax('cpu'), 'mem': longmax('mem')}, context={'skipEmptyBuckets': 'true'}, filter=(Dimension('agent_id') == agent_id) & (Dimension('process_name') == process_name)) def select(self, agent_id, process_name, granularity='all', intervals='P6W', descending=True): query_granularity = self.__validate_granularity__( granularity, self.select_granularities) intervals = self.__validate_intervals__(intervals) return self.connection.select( datasource='supervisor', granularity=query_granularity, intervals=intervals, descending=descending, dimensions=['process_name'], metrics=['cpu', 'mem'], filter=(Dimension('agent_id') == agent_id) & (Dimension('process_name') == process_name), paging_spec={'pagingIdentifiers': {}, "threshold": 1} ) druid = DruidAccessLayer()
python