content
stringlengths
0
894k
type
stringclasses
2 values
from clang.cindex import Index from .sample import Sample from .context import Context from .path import Path from .ast_utils import ast_to_graph, is_function, is_class, is_operator_token, is_namespace, make_ast_err_message from networkx.algorithms import shortest_path from networkx.drawing.nx_agraph import to_agraph from itertools import combinations import uuid import os import re import random def debug_save_graph(func_node, g): file_name = func_node.spelling + ".png" num = 0 while os.path.exists(file_name): file_name = func_node.spelling + str(num) + ".png" num += 1 a = to_agraph(g) a.draw(file_name, prog='dot') a.clear() def tokenize(name, max_subtokens_num): if is_operator_token(name): return [name] first_tokens = name.split('_') str_tokens = [] for token in first_tokens: internal_tokens = re.findall('[a-z]+|[A-Z]+[a-z]*|[0-9.]+|[-*/&|%=()]+', token) str_tokens += [t for t in internal_tokens if len(t) > 0] assert len(str_tokens) > 0, "Can't tokenize expr: {0}".format(name) if max_subtokens_num != 0: str_tokens = str_tokens[:max_subtokens_num] return str_tokens class AstParser: def __init__(self, max_contexts_num, max_path_len, max_subtokens_num, max_ast_depth, out_path): self.validate = False self.save_buffer_size = 1000 self.out_path = out_path self.max_subtokens_num = max_subtokens_num self.max_contexts_num = max_contexts_num self.max_path_len = max_path_len self.max_ast_depth = max_ast_depth self.index = Index.create() self.samples = set() self.header_only_functions = set() def __del__(self): self.save() def __parse_node(self, node): try: namespaces = [x for x in node.get_children() if is_namespace(x)] for n in namespaces: # ignore standard library functions if n.displayname != 'std' and not n.displayname.startswith('__'): self.__parse_node(n) functions = [x for x in node.get_children() if is_function(x)] for f in functions: self.__parse_function(f) classes = [x for x in node.get_children() if is_class(x)] for c in classes: methods = [x for x in c.get_children() if is_function(x)] for m in methods: self.__parse_function(m) except Exception as e: if 'Unknown template argument kind' not in str(e): msg = make_ast_err_message(str(e), node) raise Exception(msg) self.__dump_samples() def parse(self, compiler_args, file_path=None): ast = self.index.parse(file_path, compiler_args) self.__parse_node(ast.cursor) def __dump_samples(self): if len(self.samples) >= self.save_buffer_size: self.save() def save(self): if not self.out_path: return if not os.path.exists(self.out_path): os.makedirs(self.out_path) if len(self.samples) > 0: file_name = os.path.join(self.out_path, str(uuid.uuid4().hex) + ".c2s") # print(file_name) with open(file_name, "w") as file: for sample in self.samples: file.write(str(sample.source_mark) + str(sample) + "\n") self.samples.clear() def __parse_function(self, func_node): try: # ignore standard library functions if func_node.displayname.startswith('__'): return # detect header only function duplicates file_name = func_node.location.file.name source_mark = (file_name, func_node.extent.start.line) if file_name.endswith('.h') and func_node.is_definition: # print('Header only function: {0}'.format(func_node.displayname)) if source_mark in self.header_only_functions: # print('Duplicate') return else: self.header_only_functions.add(source_mark) key = tokenize(func_node.spelling, self.max_subtokens_num) g = ast_to_graph(func_node, self.max_ast_depth) # debug_save_graph(func_node, g) terminal_nodes = [node for (node, degree) in g.degree() if degree == 1] random.shuffle(terminal_nodes) contexts = set() ends = combinations(terminal_nodes, 2) for start, end in ends: path = shortest_path(g, start, end) if path: if self.max_path_len != 0 and len(path) > self.max_path_len: continue # skip too long paths path = path[1:-1] start_node = g.nodes[start]['label'] tokenize_start_node = not g.nodes[start]['is_reserved'] end_node = g.nodes[end]['label'] tokenize_end_node = not g.nodes[end]['is_reserved'] path_tokens = [] for path_item in path: path_node = g.nodes[path_item]['label'] path_tokens.append(path_node) context = Context( tokenize(start_node, self.max_subtokens_num) if tokenize_start_node else [start_node], tokenize(end_node, self.max_subtokens_num) if tokenize_end_node else [end_node], Path(path_tokens, self.validate), self.validate) contexts.add(context) if len(contexts) > self.max_contexts_num: break if len(contexts) > 0: sample = Sample(key, contexts, source_mark, self.validate) self.samples.add(sample) except Exception as e: # skip unknown cursor exceptions if 'Unknown template argument kind' not in str(e): print('Failed to parse function : ') print('Filename : ' + func_node.location.file.name) print('Start {0}:{1}'.format(func_node.extent.start.line, func_node.extent.start.column)) print('End {0}:{1}'.format(func_node.extent.end.line, func_node.extent.end.column)) print(e)
python
import onnx from onnxruntime.quantize import quantize, QuantizationMode # Load the onnx model model = onnx.load('/home/lh/pretrain-models/pose_higher_hrnet_256_sim.onnx') # Quantize quantized_model = quantize(model, quantization_mode=QuantizationMode.IntegerOps) # Save the quantized model onnx.save(quantized_model, '/home/lh/pretrain-models/pose_higher_hrnet_256_sim_int8.onnx')
python
# Copyright Fortior Blockchain, LLLP 2021 # Open Source under Apache License from flask import Flask, request, render_template, redirect, url_for from flask_sock import Sock from algosdk import account, encoding, mnemonic from vote import election_voting, hashing, count_votes from algosdk.future.transaction import AssetTransferTxn, PaymentTxn from algosdk.v2client import algod import rsa import hashlib import sqlite3 as sl # Added new sqlite functionality for local devices con = sl.connect('voters.db', check_same_thread=False) cur = con.cursor() app = Flask(__name__) sock = Sock(app) finished = False adminLogin = False corporate_finished = False validated = False my_key = hashing("tee") @app.route("/") def start(): """ Start page """ return render_template('index.html') @app.route('/start', methods=['POST', 'GET']) def start_voting(): error = '' message = '' global finished if request.method == 'POST': key = hashing(str(request.form.get('Key'))) if key == my_key: # message = reset_votes() finished = False message = 'Petition Started' else: error = "Incorrect admin key" return render_template("startprocess.html", message=message, error=error) @app.route('/overview', methods=['POST', 'GET']) def create(): return render_template('overview.html') @app.route('/admin', methods=['POST', 'GET']) def verify(): if request.method == 'POST': Social = hashing(str(request.form.get('Social'))) Drivers = hashing(str(request.form.get('Drivers'))) Key = hashing(str(request.form.get('Key'))) if str(Key) == my_key: return render_template('overview.html') return render_template('adminLogin.html') @app.route('/end', methods=['POST', 'GET']) def end(): error = '' message = '' global finished if request.method == 'POST': key = hashing(str(request.form.get('Key'))) if key == my_key: message = count_votes() finished = True else: error = "Incorrect admin key" return render_template("endprocess.html", message=message, error=error) @app.route('/view', methods=['POST', 'GET']) def view(): count_votes() return render_template("viewprogress.html") @app.route('/vote', methods=['POST', 'GET']) def vote(): message = '' if request.method == 'POST': message = election_voting() count_votes() return render_template('vote.html', message=message) @app.route('/about/') def about(): """about""" return render_template('about.html') if __name__ == "__main__": app.run(host='127.0.0.1', debug=True)
python
""" TODO: Shal check that all the needed packages are available before running the program """
python
import os import time import logging from sarpy.io.nitf.nitf_head import NITFDetails from sarpy.io.nitf.image import ImageSegmentHeader from sarpy.io.nitf.des import DataExtensionHeader from . import unittest def generic_nitf_header_test(instance, test_file): assert isinstance(instance, unittest.TestCase) # can we parse it at all? how long does it take? with instance.subTest(msg="header parsing"): start = time.time() details = NITFDetails(test_file) # how long does it take? logging.info('unpacked nitf details in {}'.format(time.time() - start)) # how does it look? logging.debug(details.nitf_header) # is the output as long as it should be? with instance.subTest(msg="header length match"): header_string = details.nitf_header.to_bytes() equality = (len(header_string) == details.nitf_header.HL) if not equality: logging.error( 'len(produced header) = {}, nitf_header.HL = {}'.format(len(header_string), details.nitf_header.HL)) instance.assertTrue(equality) # is the output what it should be? with instance.subTest(msg="header content match"): with open(test_file, 'rb') as fi: file_header = fi.read(details.nitf_header.HL) equality = (file_header == header_string) if not equality: chunk_size = 80 start_chunk = 0 while start_chunk < len(header_string): end_chunk = min(start_chunk + chunk_size, len(header_string)) logging.error('real[{}:{}] = {}'.format( start_chunk, end_chunk, file_header[start_chunk:end_chunk])) logging.error('prod[{}:{}] = {}'.format( start_chunk, end_chunk, header_string[start_chunk:end_chunk])) start_chunk = end_chunk instance.assertTrue(equality) # is each image subheader working? for i in range(details.img_segment_offsets.size): with instance.subTest('image subheader {} match'.format(i)): img_bytes = details.get_image_subheader_bytes(i) img_sub = ImageSegmentHeader.from_bytes(img_bytes, start=0) instance.assertEqual( len(img_bytes), img_sub.get_bytes_length(), msg='image subheader as long as expected') instance.assertEqual( img_bytes, img_sub.to_bytes(), msg='image subheader serializes and deserializes as expected') # is each data extenson subheader working? for i in range(details.des_segment_offsets.size): with instance.subTest('des subheader {} match'.format(i)): des_bytes = details.get_des_subheader_bytes(i) des_sub = DataExtensionHeader.from_bytes(des_bytes, start=0) instance.assertEqual( len(des_bytes), des_sub.get_bytes_length(), msg='des subheader as long as expected') instance.assertEqual( des_bytes, des_sub.to_bytes(), msg='des subheader serializes and deserializes as expected') class TestNITFHeader(unittest.TestCase): @classmethod def setUp(cls): cls.test_root = os.path.expanduser(os.path.join('~', 'Desktop', 'sarpy_testing', 'sicd')) def test_nitf_header(self): tested = 0 for fil in [ 'sicd_example_RMA_RGZERO_RE16I_IM16I.nitf', 'sicd_example_RMA_RGZERO_RE32F_IM32F.nitf', 'sicd_example_RMA_RGZERO_RE32F_IM32F_cropped_multiple_image_segments_v1.2.nitf']: test_file = os.path.join(self.test_root, fil) if os.path.exists(test_file): tested += 1 generic_nitf_header_test(self, test_file) else: logging.info('No file {} found'.format(test_file)) self.assertTrue(tested > 0, msg="No files for testing found")
python
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Presubmit tests for ProductionSupportedFlagList.java """ import os import sys def _SetupImportPath(input_api): android_webview_common_dir = input_api.PresubmitLocalPath() _CHROMIUM_SRC = os.path.join(android_webview_common_dir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir) sys.path.append(os.path.join(_CHROMIUM_SRC, 'android_webview', 'tools')) def CheckChangeOnUpload(input_api, output_api): _SetupImportPath(input_api) import generate_flag_labels results = [] results.extend(generate_flag_labels.CheckMissingWebViewEnums(input_api, output_api)) return results
python
"""This is the stock insertion generator""" import numpy as np import mitty.lib import mitty.lib.util as mutil from mitty.plugins.variants import scale_probability_and_validate import logging logger = logging.getLogger(__name__) __example_param_text = """ { "p": 0.01, # Per-base probability of having an insertion "t_mat": [[ 0.32654629, 0.17292732, 0.24524503, 0.25528135], # Base transition matrix [ 0.3489394, 0.25942695, 0.04942584, 0.3422078], [ 0.28778188, 0.21087004, 0.25963262, 0.24171546], [ 0.21644706, 0.20588717, 0.24978216, 0.32788362]], "p_end": 0.1, # Probability of chain ending "max_len": 1000 # Maximum length of insertion } """ _description = """ Stock insertion model that generates sequences with same base transition matrix as the human genome and creates a power-law distribution of insertion lengths. A typical parameter set resembles """ + __example_param_text _example_params = eval(__example_param_text) class Model: def __init__(self, p=0.01, t_mat=None, p_end=0.1, max_len=1000, **kwargs): assert 0 <= p <= 1.0, "Probability out of range" assert 0 <= p_end <= 1.0, "Probability out of range" assert 0 < max_len, 'max_len needs to be 1 or more' if t_mat is None: t_mat = [[0.32654629, 0.17292732, 0.24524503, 0.25528135], [0.3489394, 0.25942695, 0.04942584, 0.3422078], [0.28778188, 0.21087004, 0.25963262, 0.24171546], [0.21644706, 0.20588717, 0.24978216, 0.32788362]] self.p, self.t_mat, self.p_end, self.max_len = p, t_mat, p_end, max_len def get_variants(self, ref, p=None, f=None, seed=1, **kwargs): """This function is called by the simulator to obtain variants. :param ref: reference sequence as a string :param chrom: chromosome number (1,2,3,4...) :param p: array/list of probability values :param f: array/list of frequency values :param seed: seed for the random number generators :return: 5 arrays/lists/iterables all of the same length pos - position of SNPs stop - stop locations, (pos + 1 for SNPs) ref - reference base, alt - alt base, p - probability value for this variant. These are uniformly distributed random values """ assert 0 < seed < mitty.lib.SEED_MAX logger.debug('Master seed: {:d}'.format(seed)) base_loc_rng, ins_markov_rng = mutil.initialize_rngs(seed, 2) pt_mat = mutil.add_p_end_to_t_mat(self.t_mat, self.p_end) p_eff = scale_probability_and_validate(self.p, p, f) ins_locs = mutil.place_poisson_seq(base_loc_rng, p_eff, 0, len(ref), ref) #np.array([x for x in mutil.place_poisson(base_loc_rng, p_eff, 0, len(ref)) if ref[x] != 'N'], dtype='i4') ins_list, len_list = mutil.markov_sequences(ref, ins_locs, self.max_len, pt_mat, ins_markov_rng) lengths = np.array(len_list, dtype='i4') return ins_locs, ins_locs + 1, [ins[0] for ins in ins_list], ins_list, (1.0 - lengths / float(lengths.max())) if lengths.shape[0] else [] def test0(): """Edge case - no variants generated""" ref_seq = 'ACTGACTGACTGACTGACTGACTGACTGACTGACTG' m = Model(p=0.00001) pos, stop, ref, alt, p = m.get_variants(ref_seq, seed=10) assert len(pos) == 0 # This should just run and not crash def test1(): """Basic test""" ref_seq = 'ACTGACTGACTGACTGACTGACTGACTGACTGACTG' m = Model(p=0.1) pos, stop, ref, alt, p = m.get_variants(ref_seq, seed=10) for p, r in zip(pos, alt): assert r[0] == ref_seq[p] def test2(): """Do we discard 'N's?""" ref_seq = 'ACTGACTGACTGACTGACTGACTGACTGACTGACTG' m = Model(p=0.1) pos, stop, ref, alt, p = m.get_variants(ref_seq, seed=10) assert 20 in pos, pos ref_seq = 'ACTGACTGACTGACTGACTGNCTGACTGACTGACTG' m = Model(p=0.1) pos, stop, ref, alt, p = m.get_variants(ref_seq, seed=10) assert 20 not in pos if __name__ == "__main__": print _description
python
#! /usr/bin/python #coding: utf-8 fields = {} fields["brand"] = ( [ #BrandId #BrandType #BE_ID #BE_CODE [380043552, 0, 103, '103'] ]) fields["BrandTypes"] = ( [ #name #offset ["pps", 0], ["lca", 1], ["mctu", 2], ["mvno", 3] ]) fields["prefix"] = ( [ #prefix squence eventid+cdrType data_store_id start number ["rec", "SEQ_FILE_SEQ_REC", [[1101, 1]], 1000000], ["sms", "SEQ_FILE_SEQ_SMS", [[1102, 1]], 1000010], ["mms", "SEQ_FILE_SEQ_MMS", [[1103, 1]], 1000020], ["data", "SEQ_FILE_SEQ_DATA", [[1104, 1]], 1000030], ["com", "CDRSERIALNO_NORMAL_COM_PPS", [[1206, 1]], 1000040], ["mgr", "SEQ_FILE_SEQ_MGR", [[1304, 4], [1329, 1]], 1000050], ["vou", "SEQ_FILE_SEQ_VOU", [[1302, 2], [1306, 1], [1350, 2]], 1000060], ["Dump", "SEQ_CDR_ID", [[1415, 1]], 1000070], ["clr", "SEQ_FILE_SEQ_CLR", [[1408, 1]], 1000080], ["mon", "SEQ_FILE_SEQ_MON", [[1401, 1]], 1000090], ["b_modif", "SEQ_CDR_ID", [[1, 6, 20000]], 1000100], ["b_del" ,"SEQ_CDR_ID", [[1, 6, 20001]], 1000110], ["b_create","SEQ_CDR_ID", [[1, 6, 20002]], 1000120] ]) def create_BP_DATA_STORE(): print "delete from BP_DATA_STORE where DATA_STORE_ID >= 1000000;" for prefix in fields["prefix"]: for brand in fields["BrandTypes"]: #dump话单的目录是在${CBP_CDRPATH}/output/{BrandID}/dump if(prefix[0] == "dump"): print "insert into BP_DATA_STORE values ('" + str(prefix[3] + brand[1]) + "', 'R5_" + prefix[0] + "', '.unl', '36700160', '100000', '600', '${CBP_CDRPATH}/output/" + brand[0] + "/dump', '${CBP_CDRPATH}/output/" + brand[0] + "/dump/temp', 'Y', 'Y');" #对于b_开头的BMP话单,是在${HOME}/cdr/output/{BrandID}/normal elif prefix[0][:2] == 'b_': print "insert into BP_DATA_STORE values ('" + str(prefix[3] + brand[1]) + "', 'R5_" + prefix[0] + "', 'unl', '36700160', '100000', '600', '${HOME}/cdr/output/" + brand[0] + "/normal', '${HOME}/cdr/output/" + brand[0] + "/normal/temp', 'N', 'Y');" else: print "insert into BP_DATA_STORE values ('" + str(prefix[3] + brand[1]) + "', 'R5_" + prefix[0] + "', '.unl', '36700160', '100000', '600', '${CBP_CDRPATH}/output/" + brand[0] + "/normal', '${CBP_CDRPATH}/output/" + brand[0] + "/normal/temp', 'Y', 'Y');" def modify_EF_CDR_OUCTPUT_CFG(): for prefix in fields["prefix"]: DATA_STORE_ID = prefix[3] for event in prefix[2]: STD_EVT_TYPE_ID = event[0] if STD_EVT_TYPE_ID == 1: CDR_FILE_OUT_ID = event[2] else: CDR_FILE_OUT_ID = STD_EVT_TYPE_ID NORMAL_DATA_STORE_ID = DATA_STORE_ID ERROR_DATA_STORE_ID = DATA_STORE_ID RERATING_DATA_STORE_ID = DATA_STORE_ID if STD_EVT_TYPE_ID != 1 : ERROR_ORI_DATA_STORE_ID = 103 ROLLBACK_DATA_STORE_ID = 108 else: ERROR_ORI_DATA_STORE_ID = DATA_STORE_ID ROLLBACK_DATA_STORE_ID = DATA_STORE_ID #修改现有的数据 print "update ef_cdr_output_cfg set NORMAL_DATA_STORE_ID = %d, ERROR_DATA_STORE_ID = %d, RERATING_DATA_STORE_ID = %d, ERROR_ORI_DATA_STORE_ID = %d, ROLLBACK_DATA_STORE_ID = %d where CDR_FILE_OUT_ID = %d;"\ %(NORMAL_DATA_STORE_ID, ERROR_DATA_STORE_ID, RERATING_DATA_STORE_ID, ERROR_ORI_DATA_STORE_ID, ROLLBACK_DATA_STORE_ID, CDR_FILE_OUT_ID) def create_EF_CDR_OUCTPUT_CFG(): REC_ID = 1000000 print "delete from ef_cdr_output_cfg where rec_id >= 1000000;" for brand in fields["brand"]: BRAND_ID = brand[0] BE_ID = brand[2] BE_CODE = brand[3] DATA_STORE_ID_offset = brand[1] for prefix in fields["prefix"]: SEQ_NORMAL_CDR_ID = prefix[1] DATA_STORE_ID = prefix[3] + DATA_STORE_ID_offset for event_cdrType in prefix[2]: STD_EVT_TYPE_ID = event_cdrType[0] if STD_EVT_TYPE_ID != 1: CDR_FILE_OUT_ID = STD_EVT_TYPE_ID else: CDR_FILE_OUT_ID = event_cdrType[2] CDR_FILE_OUT_TYPE = event_cdrType[1] NORMAL_DATA_STORE_ID = DATA_STORE_ID ERROR_DATA_STORE_ID = DATA_STORE_ID RERATING_DATA_STORE_ID = DATA_STORE_ID if STD_EVT_TYPE_ID != 1 : ERROR_ORI_DATA_STORE_ID = 103 ROLLBACK_DATA_STORE_ID = 108 else: ERROR_ORI_DATA_STORE_ID = DATA_STORE_ID ROLLBACK_DATA_STORE_ID = DATA_STORE_ID print "insert into ef_cdr_output_cfg values (%d, '%d', %d, '%s', '%s', %d, %d, %d, %d, '%s', %d, %d, '%s', '%s', %d, %d, '%s', '%s');" \ %(CDR_FILE_OUT_ID, CDR_FILE_OUT_TYPE, STD_EVT_TYPE_ID, '*', #PAYMENT_MODE, 'N', #TEST_CDR_FLAG, NORMAL_DATA_STORE_ID, ERROR_DATA_STORE_ID, ERROR_ORI_DATA_STORE_ID, BE_ID, BE_CODE, RERATING_DATA_STORE_ID, ROLLBACK_DATA_STORE_ID, '', #COND_EXPR_TEXT, '', #COND_EXPR_CODE, BRAND_ID, REC_ID, "SEQ_ERR_CDR_ID", #SEQ_ERR_CDR_ID, SEQ_NORMAL_CDR_ID) REC_ID += 1 if __name__ == '__main__': create_BP_DATA_STORE() modify_EF_CDR_OUCTPUT_CFG() create_EF_CDR_OUCTPUT_CFG()
python
import logging from django.core.management import BaseCommand from django.core.management import call_command class Command(BaseCommand): help = 'This command invoke all the importing data command' def handle(self, *args, **options): logger = logging.getLogger(__name__) try: call_command('import_organization_data') except Exception as ex: logger.error(ex) try: call_command('import_affiliated_committers_data') except Exception as ex: logger.error(ex) try: call_command('import_outside_committers_data') except Exception as ex: logger.error(ex) try: call_command('import_outside_projects_data') except Exception as ex: logger.error(ex) try: call_command('import_portfolio_projects_data') except Exception as ex: logger.error(ex) return logger.info('All OpenHub data is imported')
python
"""Preprocess""" import numpy as np from scipy.sparse import ( csr_matrix, ) from sklearn.utils import sparsefuncs from skmisc.loess import loess def select_variable_genes(adata, layer='raw', span=0.3, n_top_genes=2000, ): """Select highly variable genes. This function implenments the method 'vst' in Seurat v3. Inspired by Scanpy. Parameters ---------- adata: AnnData Annotated data matrix. layer: `str`, optional (default: 'raw') The layer to use for calculating variable genes. span: `float`, optional (default: 0.3) Loess smoothing factor n_top_genes: `int`, optional (default: 2000) The number of genes to keep Returns ------- updates `adata` with the following fields. variances_norm: `float`, (`adata.var['variances_norm']`) Normalized variance per gene variances: `float`, (`adata.var['variances']`) Variance per gene. means: `float`, (`adata.var['means']`) Means per gene highly_variable: `bool` (`adata.var['highly_variable']`) Indicator of variable genes """ if layer is None: X = adata.X else: X = adata.layers[layer].astype(np.float64).copy() mean, variance = sparsefuncs.mean_variance_axis(X, axis=0) variance_expected = np.zeros(adata.shape[1], dtype=np.float64) not_const = variance > 0 model = loess(np.log10(mean[not_const]), np.log10(variance[not_const]), span=span, degree=2) model.fit() variance_expected[not_const] = 10**model.outputs.fitted_values N = adata.shape[0] clip_max = np.sqrt(N) clip_val = np.sqrt(variance_expected) * clip_max + mean X = csr_matrix(X) mask = X.data > clip_val[X.indices] X.data[mask] = clip_val[X.indices[mask]] squared_X_sum = np.array(X.power(2).sum(axis=0)) X_sum = np.array(X.sum(axis=0)) norm_gene_var = (1 / ((N - 1) * variance_expected)) \ * ((N * np.square(mean)) + squared_X_sum - 2 * X_sum * mean ) norm_gene_var = norm_gene_var.flatten() adata.var['variances_norm'] = norm_gene_var adata.var['variances'] = variance adata.var['means'] = mean ids_top = norm_gene_var.argsort()[-n_top_genes:][::-1] adata.var['highly_variable'] = np.isin(range(adata.shape[1]), ids_top) print(f'{n_top_genes} variable genes are selected.')
python
import asyncio import pandas as pd # type:ignore from PoEQuery import account_name, league_id, realm from PoEQuery.official_api_async import stash_tab from PoEQuery.stash_tab_result import StashTabResult STASH_URL = "https://www.pathofexile.com/character-window/get-stash-items" def get_tab_overview(): params = { "accountName": account_name, "realm": realm, "league": league_id, "tabIndex": 0, "tabs": 1, } response = asyncio.run(stash_tab(params=params)) return response.json() def get_tab_index(tab_index): params = { "accountName": account_name, "realm": realm, "league": league_id, "tabIndex": tab_index, } response = asyncio.run(stash_tab(params=params)) return response.json() df = pd.DataFrame() stash_tab_results = StashTabResult(get_tab_overview()) print(stash_tab_results.tabs) for tab in stash_tab_results.tabs: if tab.name in ["LOW LEVEL BREACH"]: df = pd.DataFrame() tab_results = StashTabResult(get_tab_index(tab_index=tab.index)) for item in tab_results.items: df = df.append( {"type": item.type, "count": item.stack_size}, ignore_index=True ) print(tab.name, df)
python
import aws_cdk as cdk import constants from deployment import UserManagementBackend from toolchain import Toolchain app = cdk.App() # Development stage UserManagementBackend( app, f"{constants.APP_NAME}-Dev", env=constants.DEV_ENV, api_lambda_reserved_concurrency=constants.DEV_API_LAMBDA_RESERVED_CONCURRENCY, database_dynamodb_billing_mode=constants.DEV_DATABASE_DYNAMODB_BILLING_MODE, ) # Continuous deployment and pull request validation Toolchain( app, f"{constants.APP_NAME}-Toolchain", env=constants.TOOLCHAIN_ENV, ) app.synth()
python
import os from psycopg2 import connect def connect_to_db(config=None): db_name = os.getenv("DATABASE_URL") conn = connect(db_name) conn.set_session(autocommit=True) return conn def create_users_table(cur): cur.execute( """CREATE TABLE IF NOT EXISTS politico.user ( id SERIAL NOT NULL, national_id int NOT NULL PRIMARY KEY, firstname VARCHAR (100) NOT NULL, lastname VARCHAR (100) NOT NULL, othername VARCHAR (100), email VARCHAR (100) NOT NULL, phone VARCHAR (100) NOT NULL, isadmin BOOLEAN NOT NULL, password VARCHAR (250) NOT NULL, passporturl VARCHAR (100) NOT NULL, created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP);""") def init_db(config=None): conn = connect_to_db() cur = conn.cursor() cur.execute("""CREATE SCHEMA IF NOT EXISTS politico;""") create_users_table(cur) print('Database created successfully') if __name__ == '__main__': init_db()
python
# -*- encoding: utf-8 -*- # Copyright 2015 - Alcatel-Lucent # Copyright © 2014-2015 eNovance # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import cProfile def recursive_keypairs(d, separator='.'): # taken from ceilometer and gnocchi for name, value in sorted(d.items()): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) else: yield name, value def opt_exists(conf_parent, opt): try: return conf_parent[opt] except cfg.NoSuchOptError: return False def do_cprofile(func): def profiled_func(*args, **kwargs): profile = cProfile.Profile() try: profile.enable() result = func(*args, **kwargs) profile.disable() return result finally: profile.print_stats('cumulative') return profiled_func
python
# Copyright 2016 Chr. Hansen A/S and The Novo Nordisk Foundation Center for Biosustainability, DTU. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from cameo import fba from cameo.core.strain_design import StrainDesign from cameo.strain_design.heuristic.evolutionary.objective_functions import biomass_product_coupled_yield from marsi.cobra.strain_design.evolutionary import OptMet, process_metabolite_knockout_solution CURRENT_DIRECTORY = os.path.dirname(__file__) FIXTURES = os.path.join(CURRENT_DIRECTORY, 'fixtures') def test_design_processing_function(model): orignal_oxigen_uptake = model.reactions.EX_o2_e.lower_bound target = "EX_succ_e" substrate = "EX_glc__D_e" objective_function = biomass_product_coupled_yield(model.biomass, target, substrate) solution = ["mal__D"] try: model.reactions.EX_o2_e.lower_bound = 0 result = process_metabolite_knockout_solution(model, solution, fba, {}, model.biomass, target, substrate, objective_function) finally: model.reactions.EX_o2_e.lower_bound = orignal_oxigen_uptake design, size, fva_min, fva_max, target_flux, biomass_flux, _yield, fitness = result assert isinstance(design, StrainDesign) assert size == len(solution) assert size == 1 assert fva_min > 0 assert fva_max >= fva_min assert target_flux > 0 assert biomass_flux > 0 assert _yield > 0 assert fitness > 0 def test_succinate(model): optimization = OptMet(model=model, plot=False) # optimization_kwargs = dict(max_evaluations=1000, max_knockouts=6, target="succ_e", # substrate="EX_glc__D_e", biomass=model.biomass) assert optimization.manipulation_type == "metabolites" # result = optimization.run(**optimization_kwargs) # # assert isinstance(result, OptMetResult) # assert len(result) > 0
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' """ Created on Sun Sep 13 15:45:26 2020 @author: samuel """ import numpy as np import pandas as pd df = pd.read_csv( '/home/samuel/Bureau/zip.train', sep=" ", header=None) digits = df.to_numpy() classes = digits[:, 0] digits = digits[:, 1:-1] # %% bdd = [] X = [] y = [] for i in range(10): bdd.append(digits[classes == i][:100]) X.append(digits[classes == i][:100][:128]) y.append(digits[classes == i][:100][128:]) # %% gamma = 0.01 kernel = ("gaussian", gamma) # %% from sklearn.utils.random import sample_without_replacement from sklearn.model_selection import train_test_split n_train = 800 bdd_train = [None] * 10 bdd_test = [None] * 10 for i in range(10): # bdd_train.append(bdd[i][sample_without_replacement(n_population=100, n_samples=n_train//10)]) bdd_train[i], bdd_test[i] = train_test_split(bdd[i], train_size=n_train // 10) bdd_train = np.concatenate(bdd_train) bdd_test = np.concatenate(bdd_test) np.random.shuffle(bdd_train) np.random.shuffle(bdd_test) X_train = bdd_train[:, :128] y_train = bdd_train[:, 128:] X_test = bdd_test[:, :128] y_test = bdd_test[:, 128:] # %% from stpredictions.models.OK3._classes import OK3Regressor, ExtraOK3Regressor from stpredictions.models.OK3._forest import RandomOKForestRegressor, ExtraOKTreesRegressor ok3 = OK3Regressor(kernel=kernel, max_leaf_nodes=50).fit(X_train, y_train) extraok3 = ExtraOK3Regressor(kernel=kernel, max_leaf_nodes=50).fit(X_train, y_train) okforest = RandomOKForestRegressor(kernel=kernel, max_leaf_nodes=50).fit(X_train, y_train) extraokforest = ExtraOKTreesRegressor(kernel=kernel, max_leaf_nodes=50).fit(X_train, y_train) # %% y_pred1 = ok3.predict(X_test) y_pred2 = extraok3.predict(X_test) y_pred3 = okforest.predict(X_test) y_pred4 = extraokforest.predict(X_test) # %% mse1 = np.mean( np.sum((y_test - y_pred1) ** 2, axis=1)) # gamma 0.01, maxleaf=50 ==> 70 ; gamma 0.01, maxleaf=10 ==> 77 mse2 = np.mean(np.sum((y_test - y_pred2) ** 2, axis=1)) mse3 = np.mean(np.sum((y_test - y_pred3) ** 2, axis=1)) mse4 = np.mean( np.sum((y_test - y_pred4) ** 2, axis=1)) # gamma 0.01, maxleaf=50 ==> 55 ; gamma 0.01, maxleaf=10 ==> 70 rbf_loss1 = 2 * (1 - np.exp(- gamma * mse1)) rbf_loss2 = 2 * (1 - np.exp(- gamma * mse2)) rbf_loss3 = 2 * (1 - np.exp(- gamma * mse3)) rbf_loss4 = 2 * (1 - np.exp(- gamma * mse4)) print("MSE 1 :", mse1) print("MSE 2 :", mse2) print("MSE 3 :", mse3) print("MSE 4 :", mse4) print("RBF loss 1 : ", rbf_loss1) print("RBF loss 2 : ", rbf_loss2) print("RBF loss 3 : ", rbf_loss3) print("RBF loss 4 : ", rbf_loss4) # %% # import matplotlib.pyplot as plt test_ex = 3 plt.imshow(X_test[test_ex].reshape(8, 16), cmap='gray') plt.title("Input upper image") plt.show() plt.imshow(y_test[test_ex].reshape(8, 16), cmap='gray') plt.title("True output lower image") plt.show() plt.imshow(y_pred[test_ex].reshape(8, 16), cmap='gray') plt.title("Predicted output lower image") plt.show() plt.imshow(np.vstack((X_test[test_ex].reshape(8, 16), y_test[test_ex].reshape(8, 16), -np.ones((1, 16)), X_test[test_ex].reshape(8, 16), y_pred[test_ex].reshape(8, 16))), cmap='gray') plt.title("Up : True image\nDown : Image with the predicted lower half") # plt.imsave('/home/samuel/Bureau/prediction_ex_'+str(test_ex)+'.png', np.vstack((X_test[test_ex].reshape(8,16), # y_test[test_ex].reshape(8,16), # -np.ones((1,16)), # X_test[test_ex].reshape(8,16), # y_pred[test_ex].reshape(8,16))), # cmap='gray') # %% pixels_importances = ok3.feature_importances_ plt.imshow(pixels_importances.reshape(8, 16), cmap='gray') plt.title("Image of pixels (features) importances") plt.show() '''
python
# -*- python -*- import os import crochet from twisted.application.internet import StreamServerEndpointService from twisted.application import service from twisted.internet import reactor, endpoints from twisted.web.wsgi import WSGIResource import weasyl.polecat import weasyl.wsgi import weasyl.define as d from libweasyl import cache threadPool = reactor.getThreadPool() threadPool.adjustPoolsize(minthreads=6, maxthreads=12) weasylResource = WSGIResource(reactor, threadPool, weasyl.wsgi.wsgi_app) if os.environ.get('WEASYL_SERVE_STATIC_FILES'): weasylResource = weasyl.polecat.TryChildrenBeforeLeaf(weasylResource) staticResource = weasyl.polecat.NoDirectoryListingFile( os.path.join(os.environ['WEASYL_APP_ROOT'], 'static')) cssResource = weasyl.polecat.NoDirectoryListingFile( os.path.join(os.environ['WEASYL_APP_ROOT'], 'build/css')) weasylResource.putChild('static', staticResource) weasylResource.putChild('css', cssResource) rewriters = [weasyl.polecat.rewriteSubmissionUploads] if os.environ.get('WEASYL_REVERSE_PROXY_STATIC'): from twisted.web import proxy weasylResource.putChild( '_weasyl_static', proxy.ReverseProxyResource('www.weasyl.com', 80, '/static')) rewriters.append(weasyl.polecat.rewriteNonlocalImages) from twisted.web.rewrite import RewriterResource weasylResource = RewriterResource(weasylResource, *rewriters) requestLogHost = d.config_read_setting('request_log_host', section='backend') if requestLogHost: requestLogHost, _, requestLogPort = requestLogHost.partition(':') requestLogPort = int(requestLogPort) requestLogHost = requestLogHost, requestLogPort site = weasyl.polecat.WeasylSite(weasylResource) siteStats = weasyl.polecat.WeasylSiteStatsFactory(site, threadPool, reactor, requestLogHost=requestLogHost) weasyl.define.statsFactory = siteStats application = service.Application('weasyl') def attachServerEndpoint(factory, endpointEnvironKey, defaultString=None): "Generates a server endpoint from an environment variable and attaches it to the application." description = os.environ.get(endpointEnvironKey, defaultString) if not description: return endpoint = endpoints.serverFromString(reactor, description) StreamServerEndpointService(endpoint, factory).setServiceParent(application) attachServerEndpoint(site, 'WEASYL_WEB_ENDPOINT', 'tcp:8080:interface=127.0.0.1') attachServerEndpoint(siteStats, 'WEASYL_WEB_STATS_ENDPOINT', 'tcp:8267:interface=127.0.0.1') if d.config_read_bool('run_periodic_tasks', section='backend'): from weasyl.cron import run_periodic_tasks weasyl.polecat.PeriodicTasksService(reactor, run_periodic_tasks).setServiceParent(application) if not d.config_read_bool('rough_shutdowns', section='backend'): reactor.addSystemEventTrigger('before', 'shutdown', site.gracefullyStopActiveClients) statsdServer = d.config_read_setting('server', section='statsd') if statsdServer: statsdHost, _, statsdPort = statsdServer.rpartition(':') statsdPort = int(statsdPort) import socket from txstatsd.client import TwistedStatsDClient, StatsDClientProtocol from txstatsd.metrics.metrics import Metrics from txstatsd.report import ReportingService namespace = d.config_read_setting('namespace', section='statsd') if namespace is None: namespace = os.environ.get('WEASYL_STATSD_NAMESPACE') if namespace is None: namespace = socket.gethostname().split('.')[0] statsdClient = TwistedStatsDClient.create(statsdHost, statsdPort) site.metrics = Metrics(connection=statsdClient, namespace=namespace) reporting = ReportingService() reporting.setServiceParent(application) siteStats.metricService().setServiceParent(application) protocol = StatsDClientProtocol(statsdClient) reactor.listenUDP(0, protocol) crochet.no_setup() cache.region.configure( 'txyam', arguments=dict( reactor=reactor, url=d.config_read_setting( 'servers', 'tcp:127.0.0.1:11211', 'memcached').split(), retryDelay=10, timeOut=0.4, ), wrap=[cache.ThreadCacheProxy, cache.JSONProxy], replace_existing_backend=True )
python
# -*- coding: utf8 -*- from datetime import date from nba.model.utils import oddsshark_team_id_lookup from sqlalchemy import Column, Date, Float, Integer, String, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship, backref NOP_TO_NOH_DATE = date(2013, 10, 29) CHA_TO_CHO_DATE = date(2014, 10, 27) Base = declarative_base() class Team(Base): """ Represents an NBA team """ __tablename__ = 'team' __table_args__ = {'sqlite_autoincrement': True} id = Column(Integer, primary_key=True) name = Column(String) abbr = Column(String) city = Column(String) def get_odds_url(self, year): return "http://www.oddsshark.com/stats/gamelog/basketball/nba/{0}/{1}".format(oddsshark_team_id_lookup.get(self.name), year) class GameFeature(Base): """ Represents the statistics associated with a game or range of games. """ __tablename__ = 'game_feature' __table_args__ = {'sqlite_autoincrement': True} id = Column(Integer, primary_key=True) score = Column(Integer) # Final score of team fg = Column(Integer) # Field Goals made fga = Column(Integer) # Field Goals attempted fgp = Column(Float) # Field goal percentage threep = Column(Integer) # three pointers made threepa = Column(Integer) # three pointers attempted threepp = Column(Float) # three pointers percentage ft = Column(Integer) # Free Throws made fta = Column(Integer) # Free Throws attempted ftp = Column(Float) # Free throws % orb = Column(Integer) # Offensive Rebounds drb = Column(Integer) # Defensive Rebounds trb = Column(Integer) # Total Rebounds ast = Column(Integer) # Assists stl = Column(Integer) # Steals blk = Column(Integer) # Blocks tov = Column(Integer) # Turnovers pf = Column(Integer) # Personal Fouls tsp = Column(Float) # True Shooting Percentage efgp = Column(Float) # Effective Field Goal Percentage threepar = Column(Float) # three Point attempt rate ftr = Column(Float) # FT attempt rate orbp = Column(Float) # Offensive Rebound Percentage drbp = Column(Float) # Defensive Rebound Percentage trpb = Column(Float) # Total Rebound Percentage astp = Column(Float) # Assist rate percentage stlp = Column(Float) # Steal rate percentage blkp = Column(Float) # Block rate percentage tovp = Column(Float) # Turn over rate percentage ortg = Column(Float) # Offensive Rating drtg = Column(Float) # Defensive Rating ftfga = Column(Float) # Ft/FGA Rating pace = Column(Float) # PACE class Odds(Base): __tablename__ = 'odds' __table_args__ = {'sqlite_autoincrement': True} id = Column(Integer, primary_key=True) spread = Column(Float) overunder = Column(Float) class Game(Base): """ Represents a game with keys to the teams and features """ __tablename__ = 'game' __table_args__ = {'sqlite_autoincrement': True} id = Column(Integer, primary_key=True) home_id = Column(ForeignKey('team.id')) home = relationship("Team", backref=backref("game_home", order_by=id), foreign_keys=[home_id]) home_features_id = Column(ForeignKey('game_feature.id')) home_features = relationship("GameFeature", backref=backref("game_home_features", order_by=id), foreign_keys=[home_features_id]) away_id = Column(ForeignKey('team.id')) away = relationship("Team", backref=backref("game_away", order_by=id), foreign_keys=[away_id]) away_features_id = Column(ForeignKey('game_feature.id')) away_features = relationship("GameFeature", backref=backref("game_away_features", order_by=id), foreign_keys=[away_features_id]) date = Column(Date) odds_id = Column(ForeignKey('odds.id')) odds = relationship("Odds", backref=backref("game", order_by=id)) def get_br_url(self): """Returns the URL for the basketball-reference.com box scores""" if self.home.abbr == 'NOP' and self.date < NOP_TO_NOH_DATE: abbr = 'NOH' elif self.home.abbr == "CHA" and self.date > CHA_TO_CHO_DATE: abbr = "CHO" else: abbr = self.home.abbr return "http://www.basketball-reference.com/boxscores/{0}{1}{2}0{3}.html".format(self.date.year, str(self.date.month).zfill(2), str(self.date.day).zfill(2), abbr) class Rollup(Base): """ Contains rollup data for a set of features betweeen an inclusive range of games. """ __tablename__ = "game_rollup" __table_args__ = {'sqlite_autoincrement': True} id = Column(Integer, primary_key=True) team_id = Column(ForeignKey('team.id')) team = relationship("Team", backref=backref("game_rollup", order_by=id)) start_id = Column(ForeignKey('game.id')) start = relationship("Game", backref=backref("game_rollup_start", order_by=id), foreign_keys=[start_id]) end_id = Column(ForeignKey('game.id')) end = relationship("Game", backref=backref("game_rollup_end", order_by=id), foreign_keys=[end_id]) features_id = Column(ForeignKey('game_feature.id')) features = relationship("GameFeature", backref=backref("game_rollup", order_by=id))
python
""" Quick and dirty MQTT door sensor """ import time import network import ubinascii import machine from umqttsimple import MQTTClient import esp import adcmode try: import secrets except: import secrets_sample as secrets try: ### Create wifi network sta_if = network.WLAN(network.STA_IF) sta_if.active(True) print("wifi: connecting") sta_if.connect(secrets.SSID, secrets.PASSWD) # Connect to an AP try: sta_if.ifconfig((secrets.IPADDR, secrets.MASK, secrets.GW, secrets.DNS)) except: print("using DHCP...") ### Setup ADC to measure VCC if not adcmode.set_adc_mode(adcmode.ADC_MODE_VCC): print("ADC mdode changed in flash - restart needed") machine.reset() vcc = machine.ADC(1).read()/1024.0 while not sta_if.isconnected(): time.sleep(0.5) print("wifi connected: ", sta_if.ifconfig()) ### connect to MQTT CLIENT_ID = ubinascii.hexlify(machine.unique_id()) client = MQTTClient(CLIENT_ID, secrets.MQTT_SVR, user=secrets.MQTT_USER, password=secrets.MQTT_PWD ) client.connect() print("mqtt: connected") payload = secrets.MQTT_PAYLOAD.format(vcc) client.publish(secrets.MQTT_TOPIC, payload) print("mqtt: published %s: %s"%(secrets.MQTT_TOPIC, payload)) client.disconnect() print("mqtt: disconnected") except Exception as e: print( "FATAL: ", type(e) ) print( " ", repr(e) ) time.sleep(0.1) # without this, deepsleep doesn't work well esp.deepsleep(0)
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import re def test_invoked_commands_still_work_even_though_they_are_no_customizable(lib, pythondir): # given a command that is calling another using ctx.invoke (pythondir / 'mygroup.py').write_text(""" import click from clk.decorators import group, flag @group() def mygroup(): pass @mygroup.command() @flag("--shout") def invokedcommand(shout): message = "invokedcommand" if shout: message = message.upper() print(message) @mygroup.command() def invokingcommand(): ctx = click.get_current_context() ctx.invoke(invokedcommand) """) # and I customize the invokedcommand lib.cmd('parameter set mygroup.invokedcommand --shout') # when I call the customized command alone output = lib.cmd('mygroup invokedcommand') # then I can see the customization in action assert output == 'INVOKEDCOMMAND' # when I call the invoking command output = lib.cmd('mygroup invokingcommand') # then I can see the output of the invokedcommand but without the # customization (because it was not called using a path, hence the notion of # path itself does not make sense in this context). assert output == 'invokedcommand' def test_broken_command_dont_make_clk_crash(lib, pythondir): # given a command that is poorly written (pythondir / 'a.py').write_text(""" raise Exception("test") """) # when I create an alias to that command output = lib.cmd('alias set b a', with_err=True) # then the output indicates the command could not be loaded assert 'error: Found the command a in the resolver customcommand but could not load it.' in output def test_param_config_default_value_callback_that_depends_on_another_param(pythondir, lib): # given a command to perform http request with a default url lazily computed # that depends on some other value (pythondir / 'http.py').write_text(""" from clk.config import config from clk.decorators import group, param_config def default(): if config.http.api: return f"http://{config.http.api}" @group() @param_config('http', '--api') @param_config('http', '--url', default=default) def http(): "" @http.command() def get(): print("Getting " + config.http.url) """) # when I use the command without providing the first value, then I get the # appropriate default value assert lib.cmd('http --api myapi get') == 'Getting http://myapi' def test_dynamic_option(pythondir, lib): # given a command to perform http request with a default url lazily computed # that depends on some other value (pythondir / 'http.py').write_text(""" from clk.config import config from clk.decorators import group, option class Http: def dump(self): print(self.url) def default(): if config.http.api: return f"http://{config.http.api}" @group() @option('--api', dynamic=Http) @option('--url', dynamic=Http, default=default) def http(api, url): "" @http.command() def get(): print("Getting " + config.http.url) @http.command() def dump(): config.http.dump() """) # when I use the command without providing the first value, then I get the # appropriate default value assert lib.cmd('http --api myapi get') == 'Getting http://myapi' assert lib.cmd('http --api myapi dump') == 'http://myapi' def test_param_config_default_value_callback(pythondir, lib): # given a command to perform http request with a default url lazily computed (pythondir / 'http.py').write_text(""" from clk.config import config from clk.decorators import group, param_config def default(): return 'http://myapi' @group() @param_config('http', '--url', default=default) def http(): "" @http.command() def get(): print("Getting " + config.http.url) """) # when I use the command without providing a value, then I get the default value assert lib.cmd('http get') == 'Getting http://myapi' def test_param_config_default_value(pythondir, lib): # given a command to perform http request with a default url (pythondir / 'http.py').write_text(""" from clk.config import config from clk.decorators import group, param_config @group() @param_config('http', '--url', default='http://myapi') def http(): "" @http.command() def get(): print("Getting " + config.http.url) """) # when I use the command without providing a value, then I get the default value assert lib.cmd('http get') == 'Getting http://myapi' def test_command(lib): output = lib.cmd('command display') assert re.search(r'flowdep\s+Manipulate command flow dependencies\.', output)
python
from unittest import TestCase import pytest import torch import pyro import pyro.infer from pyro.distributions import Bernoulli, Normal from pyro.infer import EmpiricalMarginal from tests.common import assert_equal class HMMSamplingTestCase(TestCase): def setUp(self): # simple Gaussian-emission HMM def model(): p_latent = pyro.param("p1", torch.tensor([[0.7], [0.3]])) p_obs = pyro.param("p2", torch.tensor([[0.9], [0.1]])) latents = [torch.ones(1, 1)] observes = [] for t in range(self.model_steps): latents.append( pyro.sample("latent_{}".format(str(t)), Bernoulli(torch.index_select(p_latent, 0, latents[-1].view(-1).long())))) observes.append( pyro.sample("observe_{}".format(str(t)), Bernoulli(torch.index_select(p_obs, 0, latents[-1].view(-1).long())), obs=self.data[t])) return torch.sum(torch.cat(latents)) self.model_steps = 3 self.data = [torch.ones(1, 1) for _ in range(self.model_steps)] self.model = model class NormalNormalSamplingTestCase(TestCase): def setUp(self): pyro.clear_param_store() def model(): loc = pyro.sample("loc", Normal(torch.zeros(1), torch.ones(1))) xd = Normal(loc, torch.ones(1)) pyro.sample("xs", xd, obs=self.data) return loc def guide(): return pyro.sample("loc", Normal(torch.zeros(1), torch.ones(1))) # data self.data = torch.zeros(50, 1) self.loc_mean = torch.zeros(1) self.loc_stddev = torch.sqrt(torch.ones(1) / 51.0) # model and guide self.model = model self.guide = guide class ImportanceTest(NormalNormalSamplingTestCase): @pytest.mark.init(rng_seed=0) def test_importance_guide(self): posterior = pyro.infer.Importance(self.model, guide=self.guide, num_samples=5000).run() marginal = EmpiricalMarginal(posterior) assert_equal(0, torch.norm(marginal.mean - self.loc_mean).item(), prec=0.01) assert_equal(0, torch.norm(marginal.variance.sqrt() - self.loc_stddev).item(), prec=0.1) @pytest.mark.init(rng_seed=0) def test_importance_prior(self): posterior = pyro.infer.Importance(self.model, guide=None, num_samples=10000).run() marginal = EmpiricalMarginal(posterior) assert_equal(0, torch.norm(marginal.mean - self.loc_mean).item(), prec=0.01) assert_equal(0, torch.norm(marginal.variance.sqrt() - self.loc_stddev).item(), prec=0.1)
python
#!/usr/bin/env python """ Setup script for fio-buffer """ import os from setuptools import setup from setuptools import find_packages with open('README.rst') as f: readme = f.read().strip() version = None author = None email = None source = None with open(os.path.join('fio_buffer', '__init__.py')) as f: for line in f: if line.strip().startswith('__version__'): version = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__author__'): author = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__email__'): email = line.split('=')[1].strip().replace('"', '').replace("'", '') elif line.strip().startswith('__source__'): source = line.split('=')[1].strip().replace('"', '').replace("'", '') elif None not in (version, author, email, source): break setup( author=author, author_email=email, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Information Technology', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Topic :: Scientific/Engineering :: GIS' ], description="A Fiona CLI plugin for buffering geometries.", entry_points=""" [fiona.fio_plugins] buffer=fio_buffer.core:buffer """, extras_require={ 'dev': ['pytest', 'pytest-cov'] }, include_package_data=True, install_requires=[ 'click>=0.3', 'shapely', 'fiona>=1.6' ], keywords='Fiona fio GIS vector buffer plugin', license="New BSD", long_description=readme, name='fio-buffer', packages=find_packages(), url=source, version=version, zip_safe=True )
python
# from http://www.calazan.com/a-simple-python-script-for-backing-up-a-postgresql-database-and-uploading-it-to-amazon-s3/ import os import sys import subprocess from optparse import OptionParser from datetime import date, datetime, timedelta import boto from boto.s3.key import Key # Amazon S3 settings. AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID") AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY") DB_APP_NAME = "total-impact-core" # should be in sync with AWS bucket name wrt staging/production def get_database_cred_from_heroku_app(): cmd_list = ['heroku', 'pg:credentials', 'DATABASE', '--app', DB_APP_NAME] ps = subprocess.Popen(cmd_list, stdout=subprocess.PIPE) output = ps.communicate()[0] cred_dict = dict([t.split("=") for t in output.splitlines()[1].replace('"',"").split(' ') if t]) return cred_dict def call_pg_dump(cred_dict, tablename, dumped_file): # -Fc is a compressed format cmd_list = ['PGPASSWORD='+cred_dict["password"], 'pg_dump', '-h', cred_dict["host"], '-p', cred_dict["port"], '-U', cred_dict["user"], '-Fc', cred_dict["dbname"], '-f', dumped_file, '--verbose', '--data-only'] if tablename: cmd_list += ['-t', tablename] print cmd_list ps = subprocess.Popen(" ".join(cmd_list), stdout=subprocess.PIPE, shell=True) output = ps.communicate()[0] print output return output def upload_to_s3(dumped_file, aws_filename, bucket_name=None): """ Upload a file to an AWS S3 bucket. """ if not bucket_name: bucket_name = os.getenv("AWS_BUCKET", "impactstory-uploads-local") conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) bucket = conn.get_bucket(bucket_name) k = Key(bucket) k.key = aws_filename k.set_contents_from_filename(dumped_file) def backup_table(cred_dict, tablename): dumped_file = tablename aws_filename='old-snaps/' + dumped_file + ".dump" output = call_pg_dump(cred_dict, tablename, dumped_file) upload_to_s3(dumped_file, aws_filename, bucket_name) try: print 'Uploading %s to Amazon S3...' % aws_filename upload_to_s3(dumped_file, aws_filename) except boto.exception.S3ResponseError: print 'Upload did not complete' # from http://stackoverflow.com/questions/10688006/generate-a-list-of-datetimes-between-an-interval-in-python def perdelta(start, end, delta): curr = start while curr < end: yield curr curr += delta def main(): parser = OptionParser() now = datetime.now() # four_months_ago = now + timedelta(days=-124) # two_months_ago = now + timedelta(days=-62) four_months_ago = now + timedelta(days=-4) two_months_ago = now + timedelta(days=-3) cred_dict = get_database_cred_from_heroku_app() for a_month in perdelta(four_months_ago, two_months_ago, timedelta(days=31)): tablename = a_month.strftime("snap_%Y%m") print tablename backup_table(cred_dict, tablename) if __name__ == '__main__': main() # restore the tables again with this # heroku pgbackups:restore DATABASE 'https://s3.amazonaws.com/bucket_name/properties.dump'
python
""" Для поступления в вуз абитуриент должен предъявить результаты трех экзаменов в виде ЕГЭ, каждый из них оценивается целым числом от 0 до 100 баллов. При этом абитуриенты, набравшие менее 40 баллов (неудовлетворительную оценку) по любому экзамену из конкурса выбывают. Остальные абитуриенты участвуют в конкурсе по сумме баллов за три экзамена. В конкурсе участвует N человек, при этом количество мест равно K. Определите проходной балл, то есть такое количество баллов, что количество участников, набравших столько или больше баллов не превосходит K, а при добавлении к ним абитуриентов, набравших наибольшее количество баллов среди непринятых абитуриентов, общее число принятых абитуриентов станет больше K. Формат ввода Программа получает на вход количество мест K. Далее идут строки с информацией об абитуриентах, каждая из которых состоит из имени (текстовая строка содержащая произвольное число пробелов) и трех чисел от 0 до 100, разделенных пробелами. Используйте для ввода файл input.txt с указанием кодировки utf8 (для создания такого файла на своем компьютере в программе Notepad++ следует использовать кодировку UTF-8 без BOM). Формат вывода Программа должна вывести проходной балл в конкурсе. Выведенное значение должно быть минимальным баллом, который набрал абитуриент, прошедший по конкурсу. Также возможны две ситуации, когда проходной балл не определен. Если будут зачислены все абитуриенты, не имеющие неудовлетворительных оценок, программа должна вывести число 0. Если количество имеющих равный максимальный балл абитуриентов больше чем K, программа должна вывести число 1. Используйте для вывода файл output.txt с указанием кодировки utf8. Предупреждение Пожалуйста, тестируйте файловый ввод и вывод на своем компьютере. В этой задаче слушатели часто получают ошибки вроде RE на первом тесте, протестировав у себя с помощью консоли и просто заменив input() на чтение из файла перед сдачей. К сожалению, такую замену не всегда удается сделать без ошибок, и решение слушателей действительно перестает правильно работать даже на первом тесте. """ myFile = open("input.txt", "r", encoding="utf8") k = int(myFile.readline()) myList = [] for line in myFile: newLine = line.split() if int(newLine[-1]) >= 40 and int(newLine[-2]) >= 40 \ and int(newLine[-3]) >= 40: myList.append(newLine) myFile.close() myList.sort(key=lambda a: int(a[-1]) + int(a[-2]) + int(a[-3])) myList.reverse() konk = [] for i in myList: sum = int(i[-1]) + int(i[-2]) + int(i[-3]) konk.append(sum) n = len(konk) def konkurs(n, k, konk): if n <= k: return 0 elif konk[k] == konk[0]: return 1 for i in range(k, 0, -1): if konk[i] < konk[i - 1]: return konk[i - 1] print(konkurs(n, k, konk))
python
# -*- coding: utf-8 -*- from discord.ext.commands import context import settings class GeneralContext(context.Context): """Expanded version of the Discord Context class. This class can be used outside of command functions, such as inside event handlers. It needs to be created manually. Attributes: channel(discord.Channel): server(discord.Server): user(discord.Member/User): """ def __init__(self, **attrs): attrs["prefix"] = settings.BOT_PREFIX super().__init__(**attrs) self.channel = attrs.pop("channel", None) self.context = attrs.pop("context", None) self.server = attrs.pop("server", None) self.user = attrs.pop("user", None) self._extract_message() def _extract_message(self): """Assigns some of the message variables to this class's variables.""" if self.context: self.message = self.context.message if self.message: self.channel = self.message.channel if not self.channel else self.channel self.server = self.message.server if not self.server else self.server self.user = self.message.author if not self.user else self.user
python
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Classes and functions to generate the OI Challenge 2019 dataset using Apache Beam.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import io import json import os from absl import logging import numpy as np import tensorflow.compat.v2 as tf import tensorflow_datasets.public_api as tfds beam = tfds.core.lazy_imports.apache_beam cv2 = tfds.core.lazy_imports.cv2 Metrics = beam.metrics.Metrics class ReadZipFn(beam.DoFn): """Iterates a zip file, yielding filenames and file contents.""" def process(self, zip_filepath): for filename, file in tfds.download.iter_archive( zip_filepath, tfds.download.ExtractMethod.ZIP): if filename.endswith(".jpg"): yield filename, file.read() class ProcessImageFn(beam.DoFn): """Resizes images, re-compresses them in JPEG and yields the result.""" def __init__(self, target_pixels, jpeg_quality=72): self._target_pixels = target_pixels self._jpeg_quality = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality] self._images_failed = Metrics.counter(self.__class__, "images_failed") def __getstate__(self): return (self._target_pixels, self._jpeg_quality, self._images_failed) def __setstate__(self, state): self._target_pixels, self._jpeg_quality, self._images_failed = state def process(self, element): filename, content = element try: image = cv2.imdecode(np.fromstring(content, dtype=np.uint8), flags=3) except: logging.info("Exception raised while decoding image %s", filename) raise if image is None: self._images_failed.inc() logging.info("Image %s could not be decoded", filename) else: # GIF images contain a single frame. if len(image.shape) == 4: # rank=4 -> rank=3 image = image.reshape(image.shape[1:]) # Get image height and width. height, width, _ = image.shape actual_pixels = height * width # If necessary, resize the image to have at most self._target_pixels, # keeping the aspect ratio. if self._target_pixels and actual_pixels > self._target_pixels: factor = np.sqrt(self._target_pixels / actual_pixels) image = cv2.resize(image, dsize=None, fx=factor, fy=factor) # Encode the image with quality=72 and store it in a BytesIO object. _, buff = cv2.imencode(".jpg", image, self._jpeg_quality) yield filename, io.BytesIO(buff.tostring()) class CreateDetectionExampleFn(beam.DoFn): """Creates TFDS examples for the Detection track.""" def __init__(self, image_labels_filepath, box_labels_filepath, hierarchy_filepath, classes_filepath): self._image_labels_filepath = image_labels_filepath self._box_labels_filepath = box_labels_filepath self._hierarchy_filepath = hierarchy_filepath self._classes_filepath = classes_filepath self._load_info_from_files() def __getstate__(self): return (self._image_labels_filepath, self._box_labels_filepath, self._hierarchy_filepath, self._classes_filepath) def __setstate__(self, state): (self._image_labels_filepath, self._box_labels_filepath, self._hierarchy_filepath, self._classes_filepath) = state self._load_info_from_files() def _load_info_from_files(self): self._image2labels = None self._image2boxes = None self._hierarchy = None self._mid2int = None if self._image_labels_filepath: self._image2labels = load_image_level_labels(self._image_labels_filepath) if self._box_labels_filepath: self._image2boxes = load_box_level_labels(self._box_labels_filepath) if self._hierarchy_filepath: self._hierarchy = load_class_hierarchy(self._hierarchy_filepath) if self._classes_filepath: class_descriptions = load_class_descriptions(self._classes_filepath) self._mid2int = {mid: i for i, (mid, _) in enumerate(class_descriptions)} def process(self, element): filename, image_bytes = element image_id = os.path.basename(filename).split(".")[0] # Image-level annotations. objects = [] if self._image2labels: for label, source, confidence in self._image2labels[image_id]: objects.append({ "label": self._mid2int[label], "source": source, "confidence": confidence, }) # Bounding box-level annotations. bobjects = [] if self._image2boxes: for annotation in self._image2boxes[image_id]: label, xmin, xmax, ymin, ymax, is_group_of = annotation bbox = tfds.features.BBox(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax) bobjects.append({ "label": self._mid2int[label], "bbox": bbox, "is_group_of": is_group_of, }) yield image_id, { "id": image_id, "image": image_bytes, "objects": objects, "bobjects": bobjects, } def load_image_level_labels(filepath): """Returns a dictionary mapping image IDs to a list of image-level labels.""" image2labels = collections.defaultdict(list) with tf.io.gfile.GFile(filepath, "r") as csvfile: reader = csv.reader(csvfile) next(reader) # Skip header. for row in reader: if len(row) == 3: image_id, label, confidence = row source = "verification" elif len(row) == 4: image_id, source, label, confidence = row image2labels[image_id].append((label, source, float(confidence))) return image2labels def load_box_level_labels(filepath): """Returns a dictionary mapping image IDs to a list of bounding box annotations.""" image2boxes = collections.defaultdict(list) with tf.io.gfile.GFile(filepath, "r") as csvfile: reader = csv.reader(csvfile) next(reader) # Skip header. for row in reader: if len(row) == 7: image_id, label, xmin_s, xmax_s, ymin_s, ymax_s, is_group_of_s = row elif len(row) == 13: image_id, label = row[0], row[2] xmin_s, xmax_s, ymin_s, ymax_s = row[4:8] is_group_of_s = row[10] xmin, xmax, ymin, ymax = map(float, (xmin_s, xmax_s, ymin_s, ymax_s)) is_group_of = bool(int(is_group_of_s)) image2boxes[image_id].append((label, xmin, xmax, ymin, ymax, is_group_of)) return image2boxes def load_class_hierarchy(filepath): with tf.io.gfile.GFile(filepath, "r") as jsonfile: return json.load(jsonfile) def load_class_descriptions(filepath): with tf.io.gfile.GFile(filepath, "r") as csvfile: reader = csv.reader(csvfile) # Note: this file doesn't have any header. return [row for row in reader] def fill_class_names_in_tfds_info(classes_filepath, tfds_info_features): """Fills the class names in ClassLabel features.""" class_descriptions = load_class_descriptions(classes_filepath) mids = [mid for mid, _ in class_descriptions] tfds_info_features["objects"]["label"].names = mids tfds_info_features["bobjects"]["label"].names = mids
python
import re class CCY: BYN = "BYN" RUB = "RUB" USD = "USD" EUR = "EUR" @classmethod def from_string(cls, s): if s is None: return cls.BYN ccys = [ (r'r[u,r][r,b]?', cls.RUB), (r'b[y,r]?n?', cls.BYN), (r'usd?', cls.USD), (r'eur?', cls.EUR), ] for ccy in ccys: m = re.match(ccy[0], s, re.IGNORECASE) if m is not None: return ccy[1] raise ValueError(f"Invalid currency string {s}, try rub, byn, usd, or eur") class Tables: SPENDINGS = "spendings" MILEAGE = "mileage" REMINDERS = "reminders" class Categories: GAS = "gas" MILEAGE = "mileage" CAR_GOODS = "car-goods" REPAIR = "repair" REMINDER_MILEAGE = "reminder-mileage"
python
""" ================ Compute p-values ================ For the visualization, we used a comodulogram. """ from tensorpac import Pac from tensorpac.signals import pac_signals_wavelet import matplotlib.pyplot as plt plt.style.use('seaborn-poster') # First, we generate a dataset of signals artificially coupled between 10hz # and 100hz. By default, this dataset is organized as (n_epochs, n_times) where # n_times is the number of time points. n_epochs = 1 # number of datasets sf = 512. # sampling frequency data, time = pac_signals_wavelet(f_pha=6, f_amp=90, noise=.8, n_epochs=n_epochs, n_times=4000, sf=sf) # First, let's use the MVL, without any further correction by surrogates : p = Pac(idpac=(1, 2, 0), f_pha=(2, 15, 2, .2), f_amp=(60, 120, 10, 1)) xpac = p.filterfit(sf, data, n_perm=200, p=.05) pval = p.pvalues p.comodulogram(xpac.mean(-1), title=str(p), cmap='Spectral_r', vmin=0., pvalues=pval, levels=.05) p.show()
python
import multiprocessing def validate_chunks(n): if n == 0: raise AssertionError('The number of chunks cannot be 0 ') elif n <= -2: raise AssertionError('The number of chunks should be -1 or > 0') def get_num_partitions(given_partitions, n): if given_partitions == -1: return multiprocessing.cpu_count() elif given_partitions > n: return n else: return given_partitions def get_num_cores(): return multiprocessing.cpu_count() def wrap(object): return object
python
from typing import Union, List, Optional from pyspark.sql.types import ( StructType, StructField, StringType, ArrayType, DataType, TimestampType, ) # This file is auto-generated by generate_schema so do not edit it manually # noinspection PyPep8Naming class MedicationAdministrationSchema: """ Describes the event of a patient consuming or otherwise being administered a medication. This may be as simple as swallowing a tablet or it may be a long running infusion. Related resources tie this event to the authorizing prescription, and the specific encounter between patient and health care practitioner. """ # noinspection PyDefaultArgument @staticmethod def get_schema( max_nesting_depth: Optional[int] = 6, nesting_depth: int = 0, nesting_list: List[str] = [], max_recursion_limit: Optional[int] = 2, include_extension: Optional[bool] = False, extension_fields: Optional[List[str]] = None, extension_depth: int = 0, max_extension_depth: Optional[int] = 2, include_modifierExtension: Optional[bool] = False, use_date_for: Optional[List[str]] = None, parent_path: Optional[str] = "", ) -> Union[StructType, DataType]: """ Describes the event of a patient consuming or otherwise being administered a medication. This may be as simple as swallowing a tablet or it may be a long running infusion. Related resources tie this event to the authorizing prescription, and the specific encounter between patient and health care practitioner. resourceType: This is a MedicationAdministration resource id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). identifier: Identifiers associated with this Medication Administration that are defined by business processes and/or used to refer to it when a direct URL reference to the resource itself is not appropriate. They are business identifiers assigned to this resource by the performer or other systems and remain constant as the resource is updated and propagates from server to server. instantiates: A protocol, guideline, orderset, or other definition that was adhered to in whole or in part by this event. partOf: A larger event of which this particular event is a component or step. status: Will generally be set to show that the administration has been completed. For some long running administrations such as infusions, it is possible for an administration to be started but not completed or it may be paused while some other process is under way. statusReason: A code indicating why the administration was not performed. category: Indicates where the medication is expected to be consumed or administered. medicationCodeableConcept: Identifies the medication that was administered. This is either a link to a resource representing the details of the medication or a simple attribute carrying a code that identifies the medication from a known list of medications. medicationReference: Identifies the medication that was administered. This is either a link to a resource representing the details of the medication or a simple attribute carrying a code that identifies the medication from a known list of medications. subject: The person or animal or group receiving the medication. context: The visit, admission, or other contact between patient and health care provider during which the medication administration was performed. supportingInformation: Additional information (for example, patient height and weight) that supports the administration of the medication. effectiveDateTime: A specific date/time or interval of time during which the administration took place (or did not take place, when the 'notGiven' attribute is true). For many administrations, such as swallowing a tablet the use of dateTime is more appropriate. effectivePeriod: A specific date/time or interval of time during which the administration took place (or did not take place, when the 'notGiven' attribute is true). For many administrations, such as swallowing a tablet the use of dateTime is more appropriate. performer: Indicates who or what performed the medication administration and how they were involved. reasonCode: A code indicating why the medication was given. reasonReference: Condition or observation that supports why the medication was administered. request: The original request, instruction or authority to perform the administration. device: The device used in administering the medication to the patient. For example, a particular infusion pump. note: Extra information about the medication administration that is not conveyed by the other attributes. dosage: Describes the medication dosage information details e.g. dose, rate, site, route, etc. eventHistory: A summary of the events of interest that have occurred, such as when the administration was verified. """ if extension_fields is None: extension_fields = [ "valueBoolean", "valueCode", "valueDate", "valueDateTime", "valueDecimal", "valueId", "valueInteger", "valuePositiveInt", "valueString", "valueTime", "valueUnsignedInt", "valueUri", "valueUrl", "valueReference", "valueCodeableConcept", "valueAddress", ] from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import ( CodeableConceptSchema, ) from spark_fhir_schemas.r4.complex_types.period import PeriodSchema from spark_fhir_schemas.r4.complex_types.medicationadministration_performer import ( MedicationAdministration_PerformerSchema, ) from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema from spark_fhir_schemas.r4.complex_types.medicationadministration_dosage import ( MedicationAdministration_DosageSchema, ) if ( max_recursion_limit and nesting_list.count("MedicationAdministration") >= max_recursion_limit ) or (max_nesting_depth and nesting_depth >= max_nesting_depth): return StructType([StructField("id", StringType(), True)]) # add my name to recursion list for later my_nesting_list: List[str] = nesting_list + ["MedicationAdministration"] my_parent_path = ( parent_path + ".medicationadministration" if parent_path else "medicationadministration" ) schema = StructType( [ # This is a MedicationAdministration resource StructField("resourceType", StringType(), True), # The logical id of the resource, as used in the URL for the resource. Once # assigned, this value never changes. StructField( "id", idSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path + ".id", ), True, ), # The metadata about the resource. This is content that is maintained by the # infrastructure. Changes to the content might not always be associated with # version changes to the resource. StructField( "meta", MetaSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # A reference to a set of rules that were followed when the resource was # constructed, and which must be understood when processing the content. Often, # this is a reference to an implementation guide that defines the special rules # along with other profiles etc. StructField( "implicitRules", uriSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path + ".implicitrules", ), True, ), # The base language in which the resource is written. StructField( "language", codeSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path + ".language", ), True, ), # A human-readable narrative that contains a summary of the resource and can be # used to represent the content of the resource to a human. The narrative need # not encode all the structured data, but is required to contain sufficient # detail to make it "clinically safe" for a human to just read the narrative. # Resource definitions may define what content should be represented in the # narrative to ensure clinical safety. StructField( "text", NarrativeSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # These resources do not have an independent existence apart from the resource # that contains them - they cannot be identified independently, and nor can they # have their own independent transaction scope. StructField( "contained", ArrayType( ResourceListSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # May be used to represent additional information that is not part of the basic # definition of the resource. To make the use of extensions safe and manageable, # there is a strict set of governance applied to the definition and use of # extensions. Though any implementer can define an extension, there is a set of # requirements that SHALL be met as part of the definition of the extension. StructField( "extension", ArrayType( ExtensionSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # May be used to represent additional information that is not part of the basic # definition of the resource and that modifies the understanding of the element # that contains it and/or the understanding of the containing element's # descendants. Usually modifier elements provide negation or qualification. To # make the use of extensions safe and manageable, there is a strict set of # governance applied to the definition and use of extensions. Though any # implementer is allowed to define an extension, there is a set of requirements # that SHALL be met as part of the definition of the extension. Applications # processing a resource are required to check for modifier extensions. # # Modifier extensions SHALL NOT change the meaning of any elements on Resource # or DomainResource (including cannot change the meaning of modifierExtension # itself). StructField( "modifierExtension", ArrayType( ExtensionSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Identifiers associated with this Medication Administration that are defined by # business processes and/or used to refer to it when a direct URL reference to # the resource itself is not appropriate. They are business identifiers assigned # to this resource by the performer or other systems and remain constant as the # resource is updated and propagates from server to server. StructField( "identifier", ArrayType( IdentifierSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # A protocol, guideline, orderset, or other definition that was adhered to in # whole or in part by this event. StructField( "instantiates", ArrayType( uriSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # A larger event of which this particular event is a component or step. StructField( "partOf", ArrayType( ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Will generally be set to show that the administration has been completed. For # some long running administrations such as infusions, it is possible for an # administration to be started but not completed or it may be paused while some # other process is under way. StructField( "status", codeSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path + ".status", ), True, ), # A code indicating why the administration was not performed. StructField( "statusReason", ArrayType( CodeableConceptSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Indicates where the medication is expected to be consumed or administered. StructField( "category", CodeableConceptSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # Identifies the medication that was administered. This is either a link to a # resource representing the details of the medication or a simple attribute # carrying a code that identifies the medication from a known list of # medications. StructField( "medicationCodeableConcept", CodeableConceptSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # Identifies the medication that was administered. This is either a link to a # resource representing the details of the medication or a simple attribute # carrying a code that identifies the medication from a known list of # medications. StructField( "medicationReference", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # The person or animal or group receiving the medication. StructField( "subject", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # The visit, admission, or other contact between patient and health care # provider during which the medication administration was performed. StructField( "context", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # Additional information (for example, patient height and weight) that supports # the administration of the medication. StructField( "supportingInformation", ArrayType( ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # A specific date/time or interval of time during which the administration took # place (or did not take place, when the 'notGiven' attribute is true). For many # administrations, such as swallowing a tablet the use of dateTime is more # appropriate. StructField("effectiveDateTime", TimestampType(), True), # A specific date/time or interval of time during which the administration took # place (or did not take place, when the 'notGiven' attribute is true). For many # administrations, such as swallowing a tablet the use of dateTime is more # appropriate. StructField( "effectivePeriod", PeriodSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # Indicates who or what performed the medication administration and how they # were involved. StructField( "performer", ArrayType( MedicationAdministration_PerformerSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # A code indicating why the medication was given. StructField( "reasonCode", ArrayType( CodeableConceptSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Condition or observation that supports why the medication was administered. StructField( "reasonReference", ArrayType( ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # The original request, instruction or authority to perform the administration. StructField( "request", ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # The device used in administering the medication to the patient. For example, # a particular infusion pump. StructField( "device", ArrayType( ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Extra information about the medication administration that is not conveyed by # the other attributes. StructField( "note", ArrayType( AnnotationSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), # Describes the medication dosage information details e.g. dose, rate, site, # route, etc. StructField( "dosage", MedicationAdministration_DosageSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth + 1, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ), True, ), # A summary of the events of interest that have occurred, such as when the # administration was verified. StructField( "eventHistory", ArrayType( ReferenceSchema.get_schema( max_nesting_depth=max_nesting_depth, nesting_depth=nesting_depth + 1, nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension, use_date_for=use_date_for, parent_path=my_parent_path, ) ), True, ), ] ) if not include_extension: schema.fields = [ c if c.name != "extension" else StructField("extension", StringType(), True) for c in schema.fields ] if not include_modifierExtension: schema.fields = [ c if c.name != "modifierExtension" else StructField("modifierExtension", StringType(), True) for c in schema.fields ] return schema
python
from distutils.core import setup from setuptools import find_packages setup( name='pyesapi', version='0.2.1', description='Python interface to Eclipse Scripting API', author='Michael Folkerts, Varian Medical Systems', author_email='[email protected]', license='MIT', packages=find_packages(), install_requires=[ 'numpy', 'scipy', 'pythonnet==2.3.0', # tested to work with python 3.6 ], )
python
from common import * import collections try: import cupy except: cupy = None # From http://pythonhosted.org/pythran/MANUAL.html def arc_distance(theta_1, phi_1, theta_2, phi_2): """ Calculates the pairwise arc distance between all points in vector a and b. """ temp = (np.sin((theta_2-theta_1)/2)**2 + np.cos(theta_1)*np.cos(theta_2) * np.sin((phi_2-phi_1)/2)**2) distance_matrix = 2 * np.arctan2(np.sqrt(temp), np.sqrt(1-temp)) return distance_matrix def test_numba(ds): ds_original = ds.copy() #ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y) ds['arc_distance'] = expr #assert ds.arc_distance.expression == expr.expression ds['arc_distance_jit'] = ds['arc_distance'].jit_numba() np.testing.assert_array_almost_equal(ds.arc_distance.tolist(), ds.arc_distance_jit.tolist()) # TODO: make it such that they can be pickled ds_original.state_set(ds.state_get()) ds = ds_original np.testing.assert_array_almost_equal(ds.arc_distance.tolist(), ds.arc_distance_jit.tolist()) @pytest.mark.skipif(sys.version_info < (3,6) and sys.version_info[0] != 2, reason="no support for python3.5 (numba segfaults)") def test_jit_overwrite(ds_local): ds = ds_local # TODO: remote overwriting of functions does not work ds_original = ds.copy() expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y) ds['arc_distance'] = expr ds['arc_distance_jit'] = ds['arc_distance'].jit_numba() ds['arc_distance_jit'] = ds['arc_distance * 2'].jit_numba() np.testing.assert_array_almost_equal((ds.arc_distance*2).tolist(), ds.arc_distance_jit.tolist()) @pytest.mark.skipif(cupy is None, reason="cuda support relies on cupy") def test_cuda(ds_local): ds = ds_local ds_original = ds.copy() #ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now expr = arc_distance(ds.y*1, ds.y*1, ds.y**2*ds.y, ds.x+ds.y) ds['arc_distance'] = expr print(expr) #assert ds.arc_distance.expression == expr.expression ds['arc_distance_jit'] = ds['arc_distance'].jit_cuda() np.testing.assert_almost_equal(ds.arc_distance.values, ds.arc_distance_jit.values) # TODO: make it such that they can be pickled ds_original.state_set(ds.state_get()) ds = ds_original np.testing.assert_almost_equal(ds.arc_distance.values, ds.arc_distance_jit.values) def test_metal(df_local): pytest.importorskip("Metal") df = df_local df_original = df.copy() #df.columns['x'] = (df.columns['x']*1).copy() # convert non non-big endian for now expr = arc_distance(df.y*1, df.y*1, df.y**2*df.y, df.x+df.y) # expr = df.x + df.y df['arc_distance'] = expr #assert df.arc_distance.expression == expr.expression df['arc_distance_jit'] = df['arc_distance'].jit_metal() # assert df.arc_distance.tolist() == df.arc_distance_jit.tolist() np.testing.assert_almost_equal(df.arc_distance.values, df.arc_distance_jit.values, decimal=1) # TODO: make it such that they can be pickled df_original.state_set(df.state_get()) df = df_original np.testing.assert_almost_equal(df.arc_distance.values, df.arc_distance_jit.values, decimal=1) @pytest.mark.parametrize("type_name", vaex.array_types._type_names) def test_types_metal(type_name, df_factory_numpy): pytest.importorskip("Metal") df = df_factory_numpy(x=np.array([0, 1, 2], dtype=type_name), y=[2, 3, 4]) # df = df_factory_numpy(x=np.array([0, 1, 2], dtype=type_name), y=np.array([2, 3, 4], dtype=type_name)) # df['x'] = df['x'].astype(type_name) df['z'] = (df['x'] + df['y']).jit_metal() assert df['z'].tolist() == [2, 4, 6]
python
from unittest import mock import pytest from nesta.packages.geographies.uk_geography_lookup import get_gss_codes from nesta.packages.geographies.uk_geography_lookup import get_children from nesta.packages.geographies.uk_geography_lookup import _get_children SPARQL_QUERY = ''' PREFIX entity: <http://statistics.data.gov.uk/def/statistical-entity#> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> SELECT DISTINCT ?area_code WHERE { ?area_entity entity:code ?area_code_entity; rdfs:label ?area_code . ?area_code_entity rdfs:label ?area_code_type; FILTER(SUBSTR(?area_code_type, 2, 2) > "01"). } ''' @pytest.fixture def pars_for_get_children(): return dict(base="dummy", geocodes="dummy", max_attempts=3) @pytest.fixture def side_effect_for_get_children(): return ([1, 2], [2, 3], ["A", 3], ["5", 4], []) @mock.patch("nesta.packages.geographies.uk_geography_lookup.find_filepath_from_pathstub", return_value=None) @mock.patch("builtins.open", new_callable=mock.mock_open, read_data=SPARQL_QUERY) def test_get_gss_codes(mocked_open, mocked_find_filepath_from_pathstub): codes = get_gss_codes(test=True) assert len(codes) > 100 # def test_get_children(): # x = _get_children("E04", "E08000001") # assert len(x) > 0 # @mock.patch("nesta.packages.geographies.uk_geography_lookup._get_children") # def test_get_children_max_out(mocked, pars_for_get_children): # mocked.side_effect = ([], [], [], [], []) # get_children(**pars_for_get_children) # assert mocked.call_count == pars_for_get_children["max_attempts"] # @mock.patch("nesta.packages.geographies.uk_geography_lookup._get_children") # def test_get_children_totals(mocked, pars_for_get_children, side_effect_for_get_children): # mocked.side_effect = side_effect_for_get_children # children = get_children(**pars_for_get_children) # assert len(children) == sum(len(x) for x in side_effect_for_get_children)
python
import unittest import hcl2 from checkov.terraform.checks.resource.gcp.GoogleCloudSqlServerContainedDBAuthentication import check from checkov.common.models.enums import CheckResult class TestCloudSQLServerContainedDBAuthentication(unittest.TestCase): def test_failure(self): hcl_res = hcl2.loads(""" resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" { database_version = "SQLSERVER_2017_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" settings { activation_policy = "ALWAYS" availability_type = "ZONAL" backup_configuration { binary_log_enabled = "false" enabled = "true" location = "us" point_in_time_recovery_enabled = "false" start_time = "00:00" } crash_safe_replication = "false" database_flags =[{ name = "cross db ownership chaining" value = "on" }, { name = "contained database authentication" value = "on" }] disk_autoresize = "true" disk_size = "20" disk_type = "PD_SSD" ip_configuration { ipv4_enabled = "false" private_network = "projects/gcp-bridgecrew-deployment/global/networks/default" require_ssl = "false" } location_preference { zone = "us-central1-a" } maintenance_window { day = "0" hour = "0" } pricing_plan = "PER_USE" replication_type = "SYNCHRONOUS" tier = "db-custom-1-4096" } } """) resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.FAILED, scan_result) def test_success(self): hcl_res = hcl2.loads(""" resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" { database_version = "SQLSERVER_2017_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" settings { activation_policy = "ALWAYS" availability_type = "ZONAL" backup_configuration { binary_log_enabled = "false" enabled = "true" location = "us" point_in_time_recovery_enabled = "false" start_time = "00:00" } crash_safe_replication = "false" database_flags { name = "cross db ownership chaining" value = "off" } database_flags { name = "contained database authentication" value = "off" } disk_autoresize = "true" disk_size = "20" disk_type = "PD_SSD" ip_configuration { ipv4_enabled = "false" private_network = "projects/gcp-bridgecrew-deployment/global/networks/default" require_ssl = "false" } location_preference { zone = "us-central1-a" } maintenance_window { day = "0" hour = "0" } pricing_plan = "PER_USE" replication_type = "SYNCHRONOUS" tier = "db-custom-1-4096" } } """) resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_success_2(self): hcl_res = hcl2.loads(""" resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" { database_version = "SQLSERVER_2017_STANDARD122" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" settings { activation_policy = "ALWAYS" availability_type = "ZONAL" backup_configuration { binary_log_enabled = "false" enabled = "true" location = "us" point_in_time_recovery_enabled = "false" start_time = "00:00" } crash_safe_replication = "false" database_flags { name = "cross db ownership chaining" value = "on" } database_flags { name = "contained database authentication" value = "off" } disk_autoresize = "true" disk_size = "20" disk_type = "PD_SSD" ip_configuration { ipv4_enabled = "false" private_network = "projects/gcp-bridgecrew-deployment/global/networks/default" require_ssl = "false" } location_preference { zone = "us-central1-a" } maintenance_window { day = "0" hour = "0" } pricing_plan = "PER_USE" replication_type = "SYNCHRONOUS" tier = "db-custom-1-4096" } } """) resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_success_3(self): hcl_res = hcl2.loads(""" resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" { database_version = "SQLSERVER_2017_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" settings { activation_policy = "ALWAYS" availability_type = "ZONAL" backup_configuration { binary_log_enabled = "false" enabled = "true" location = "us" point_in_time_recovery_enabled = "false" start_time = "00:00" } crash_safe_replication = "false" disk_autoresize = "true" disk_size = "20" disk_type = "PD_SSD" ip_configuration { ipv4_enabled = "false" private_network = "projects/gcp-bridgecrew-deployment/global/networks/default" require_ssl = "false" } location_preference { zone = "us-central1-a" } maintenance_window { day = "0" hour = "0" } pricing_plan = "PER_USE" replication_type = "SYNCHRONOUS" tier = "db-custom-1-4096" } } """) resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) def test_success_4(self): hcl_res = hcl2.loads(""" resource "google_sql_database_instance" "tfer--general-002D-sqlserver12" { database_version = "SQLSERVER_2017_STANDARD" name = "general-sqlserver12" project = "gcp-bridgecrew-deployment" region = "us-central1" } """) resource_conf = hcl_res['resource'][0]['google_sql_database_instance']['tfer--general-002D-sqlserver12'] scan_result = check.scan_resource_conf(conf=resource_conf) self.assertEqual(CheckResult.PASSED, scan_result) if __name__ == '__main__': unittest.main()
python
import numpy as np import pandas as pd import pytest from scipy import stats from locan import LocData from locan.analysis import BlinkStatistics from locan.analysis.blinking import _blink_statistics, _DistributionFits def test__blink_statistics_0(): # frame with on and off periods up to three frames and starting with one-frame on-period. frames = np.array([0, 4, 6, 7, 8, 12, 13]) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [1, 1, 3, 2]) assert np.array_equal(results["off_periods"], [3, 1, 3]) assert np.array_equal(results["on_periods_frame"], [0, 4, 6, 12]) assert np.array_equal(results["off_periods_frame"], [1, 5, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0], [1], [2, 3, 4], [5, 6]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [1, 5, 2]) assert np.array_equal(results["off_periods"], [3, 3]) assert np.array_equal(results["on_periods_frame"], [0, 4, 12]) assert np.array_equal(results["off_periods_frame"], [1, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0], [1, 2, 3, 4], [5, 6]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [14]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6]]) ] ) def test__blink_statistics_1(): # frame with on and off periods up to three frames and starting with two-frame on-period. frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [1, 2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 3, 6, 12]) assert np.array_equal(results["off_periods_frame"], [2, 4, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [4, 3, 2]) assert np.array_equal(results["off_periods"], [2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 6, 12]) assert np.array_equal(results["off_periods_frame"], [4, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [14]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]] ) ] ) def test__blink_statistics_2(): # frame with on and off periods up to three frames and starting with two-frame on-period. frames = np.array([0, 1, 3, 6, 7, 8, 12, 13]) + 1 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [1, 1, 2, 3]) assert np.array_equal(results["on_periods_frame"], [1, 4, 7, 13]) assert np.array_equal(results["off_periods_frame"], [0, 3, 5, 10]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=1, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [5, 3, 2]) assert np.array_equal(results["off_periods"], [2, 3]) assert np.array_equal(results["on_periods_frame"], [0, 7, 13]) assert np.array_equal(results["off_periods_frame"], [5, 10]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [15]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]] ) ] ) def test__blink_statistics_3(): # frame with on and off periods up to three frames and starting with off-period. frames = np.array([0, 1, 4, 6, 7, 8, 12, 13]) + 4 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [4, 2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert np.array_equal(results["off_periods_frame"], [6, 9, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=2, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [4, 3]) assert np.array_equal(results["on_periods_frame"], [4, 16]) assert np.array_equal(results["off_periods_frame"], [0, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=2, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [3]) assert np.array_equal(results["on_periods_frame"], [4, 16]) assert np.array_equal(results["off_periods_frame"], [13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5], [6, 7]] ) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [18]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert np.array_equal(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]]) def test__blink_statistics_4(): # frame with on and off periods up to three frames and starting with off-period. frames = np.array([0, 1, 4, 6, 12, 13]) + 2 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 1, 2]) assert np.array_equal(results["off_periods"], [2, 2, 1, 5]) assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14]) assert np.array_equal(results["off_periods_frame"], [0, 4, 7, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]] ) ] ) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 1, 2]) assert np.array_equal(results["off_periods"], [2, 1, 5]) assert np.array_equal(results["on_periods_frame"], [2, 6, 8, 14]) assert np.array_equal(results["off_periods_frame"], [4, 7, 9]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3], [4, 5]] ) ] ) results = _blink_statistics(frames, memory=3, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [9, 2]) assert np.array_equal(results["off_periods"], [5]) assert np.array_equal(results["on_periods_frame"], [0, 14]) assert np.array_equal(results["off_periods_frame"], [9]) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]]) ] ) results = _blink_statistics(frames, memory=3, remove_heading_off_periods=True) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [7, 2]) assert np.array_equal(results["off_periods"], [5]) assert np.array_equal(results["on_periods_frame"], [2, 14]) assert np.array_equal(results["off_periods_frame"], [9]) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3], [4, 5]]) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [16]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3, 4, 5]]) ] ) def test__blink_statistics_5(caplog): # frame with on and off periods including repeated frames. frames = np.array([0, 1, 4, 4, 6, 7, 8, 12, 12, 13]) + 4 results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [2, 1, 3, 2]) assert np.array_equal(results["off_periods"], [4, 2, 1, 3]) assert np.array_equal(results["on_periods_frame"], [4, 8, 10, 16]) assert np.array_equal(results["off_periods_frame"], [0, 6, 9, 13]) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1], [2], [3, 4, 5], [6, 7]] ) ] ) assert caplog.record_tuples == [ ( "locan.analysis.blinking", 30, "There are 2 duplicated frames found that will be ignored.", ) ] results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [18]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip( results["on_periods_indices"], [[0, 1, 2, 3, 4, 5, 6, 7]] ) ] ) def test__blink_statistics_6(): # frame with on and off periods up to three frames and starting with one-frame on-period. frames = np.array([0, 2, 3, 9]) results = _blink_statistics(frames, memory=0, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [1, 2, 1]) assert np.array_equal(results["off_periods"], [1, 5]) assert np.array_equal(results["on_periods_frame"], [0, 2, 9]) assert np.array_equal(results["off_periods_frame"], [1, 4]) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0], [1, 2], [3]]) ] ) results = _blink_statistics(frames, memory=10, remove_heading_off_periods=False) assert len(results["on_periods"]) == len(results["on_periods_frame"]) assert len(results["off_periods"]) == len(results["off_periods_frame"]) assert np.array_equal(results["on_periods"], [10]) assert np.array_equal(results["off_periods"], []) assert np.array_equal(results["on_periods_frame"], [0]) assert np.array_equal(results["off_periods_frame"], []) assert all( [ np.array_equal(one, two) for one, two in zip(results["on_periods_indices"], [[0, 1, 2, 3]]) ] ) @pytest.fixture() def locdata_simple(): locdata_dict = { "position_x": [0, 0, 1, 4, 5], "position_y": [0, 1, 3, 4, 1], "intensity": [0, 1, 3, 4, 1], "psf_sigma_x": [100, 100, 100, 100, 100], } return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict)) @pytest.fixture() def locdata_with_zero_frame(): locdata_dict = {"frame": [0, 1, 2, 4, 10, 11, 14]} return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict)) @pytest.fixture() def locdata_without_zero_frame(): locdata_dict = {"frame": [1, 2, 4, 10, 11, 14]} return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict)) @pytest.fixture() def locdata_with_repetitions(): locdata_dict = {"frame": [2, 2, 2, 4, 4, 14]} return LocData(dataframe=pd.DataFrame.from_dict(locdata_dict)) def test_blink_statistics(locdata_with_zero_frame, locdata_without_zero_frame): bs = _blink_statistics( locdata_with_zero_frame, memory=0, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [3, 1, 2, 1]) assert all(bs["off_periods"] == [1, 5, 2]) bs = _blink_statistics( locdata_with_zero_frame.data.frame.values, memory=0, remove_heading_off_periods=False, ) assert all(bs["on_periods"] == [3, 1, 2, 1]) assert all(bs["off_periods"] == [1, 5, 2]) bs = _blink_statistics( locdata_without_zero_frame, memory=0, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [2, 1, 2, 1]) assert all(bs["off_periods"] == [1, 1, 5, 2]) bs = _blink_statistics( locdata_with_zero_frame, memory=0, remove_heading_off_periods=True ) assert all(bs["on_periods"] == [3, 1, 2, 1]) assert all(bs["off_periods"] == [1, 5, 2]) bs = _blink_statistics( locdata_without_zero_frame, memory=0, remove_heading_off_periods=True ) assert all(bs["on_periods"] == [2, 1, 2, 1]) assert all(bs["off_periods"] == [1, 5, 2]) bs = _blink_statistics( locdata_with_zero_frame, memory=1, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [5, 2, 1]) assert all(bs["off_periods"] == [5, 2]) bs = _blink_statistics( locdata_without_zero_frame, memory=1, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [5, 2, 1]) assert all(bs["off_periods"] == [5, 2]) bs = _blink_statistics( locdata_with_zero_frame, memory=2, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [5, 5]) assert all(bs["off_periods"] == [5]) bs = _blink_statistics( locdata_without_zero_frame, memory=2, remove_heading_off_periods=False ) assert all(bs["on_periods"] == [5, 5]) assert all(bs["off_periods"] == [5]) def test_blink_statistics__with_repetitions(locdata_with_repetitions): _blink_statistics( locdata_with_repetitions, memory=0, remove_heading_off_periods=False ) def test_BlinkStatistics_empty(caplog): bs = BlinkStatistics().compute(LocData()) bs.fit_distributions() bs.hist() assert caplog.record_tuples == [ ("locan.analysis.blinking", 30, "Locdata is empty."), ("locan.analysis.blinking", 30, "No results available to fit."), ] def test_BlinkStatistics(locdata_with_zero_frame): bs = BlinkStatistics().compute(locdata_with_zero_frame) assert repr(bs) == "BlinkStatistics(memory=0, remove_heading_off_periods=True)" assert all(bs.results["on_periods"] == [3, 1, 2, 1]) assert all(bs.results["off_periods"] == [1, 5, 2]) assert bs.distribution_statistics == {} bs.hist(data_identifier="on_periods", ax=None, bins="auto", log=True, fit=False) bs.hist(data_identifier="off_periods", ax=None, bins="auto", log=True, fit=False) bs.hist(data_identifier="on_periods", ax=None, bins="auto", log=True, fit=True) def test_DistributionFits(locdata_with_zero_frame): bs = BlinkStatistics().compute(locdata_with_zero_frame) df = _DistributionFits(bs, distribution=stats.expon, data_identifier="on_periods") # print(df.analysis_class.results) assert len(df.analysis_class.results) == 5 assert df.data_identifier == "on_periods" assert ( repr(df) == "_DistributionFits(analysis_class=BlinkStatistics, " "distribution=expon_gen, data_identifier=on_periods)" ) assert df.parameter_dict() == {} df.fit() assert list(df.parameter_dict().keys()) == ["on_periods_loc", "on_periods_scale"] df = _DistributionFits(bs, distribution=stats.expon, data_identifier="off_periods") df.fit() assert list(df.parameter_dict().keys()) == ["off_periods_loc", "off_periods_scale"] df.plot() # print(df.analysis_class.results[df.data_identifier]) def test_fit_distributions(locdata_with_zero_frame): bs = BlinkStatistics().compute(locdata_with_zero_frame) bs.fit_distributions() assert bs.distribution_statistics["on_periods"].parameter_dict() == { "on_periods_loc": 1.0, "on_periods_scale": 0.75, } assert bs.distribution_statistics["off_periods"].parameter_dict() == { "off_periods_loc": 1.0, "off_periods_scale": 1.6666666666666665, } bs.hist() bs.hist(data_identifier="off_periods") del bs bs = BlinkStatistics().compute(locdata_with_zero_frame) bs.fit_distributions(with_constraints=False) assert ( bs.distribution_statistics["on_periods"].parameter_dict()["on_periods_loc"] == 1 ) assert ( bs.distribution_statistics["off_periods"].parameter_dict()["off_periods_loc"] == 1 ) del bs bs = BlinkStatistics().compute(locdata_with_zero_frame) bs.fit_distributions(data_identifier="on_periods") assert bs.distribution_statistics["on_periods"].parameter_dict() == { "on_periods_loc": 1.0, "on_periods_scale": 0.75, }
python
import unittest import sys from math import pi sys.path.insert(0, "..") from sections.sections import Wedge import test_sections_generic as generic class TestPhysicalProperties(generic.TestPhysicalProperties, unittest.TestCase): @classmethod def setUpClass(cls): cls.sectclass = Wedge cls.dimensions = dict(r=3.0, phi=pi) cls.angular = ["phi"] cls.rp = 5.0, 4.0 cls._cog = 1.2732395447351625, 0.0 cls.A = 14.137166941154069 cls._I0 = 31.808625617596654, 8.890313812363729, 0.0 cls._I = 31.808625617596654, 31.808625617596654, 0.0 def test_check_dimensions(self): self.assertRaises(ValueError, self.section.set_dimensions, r=-1) self.assertRaises(ValueError, self.section.set_dimensions, r=0) self.assertRaises(ValueError, self.section.set_dimensions, phi=-1) self.assertRaises(ValueError, self.section.set_dimensions, phi=0) self.assertRaises(ValueError, self.section.set_dimensions, phi=2.1*pi) if __name__ == "__main__": unittest.main()
python
#!/usr/bin/env python3 # Usage raw_harness.py Y/N repTimes sourceFile arguments # finally, will append a full function file ''' original R file #if has input, gen args=c(args, argd, ...) dataset = setup ''' import sys,os raw_haress_str = ''' rnorm <- runif if(exists('setup')) { if(length(bench_args) == 0) { bench_args <- setup() TRUE } else { bench_args <- setup(bench_args) FALSE } } if(length(bench_args) == 0) { for(bench_i in 1:bench_reps) { run() } } else { for(bench_i in 1:bench_reps) { run(bench_args) } } ''' if __name__ == "__main__": argv = sys.argv argc = int(argv[1]) #this is how many fixed for the rvm rvm_path = argv[2] rvm_cmd = argv[3:(argc+1)] #with all args use_system_time = argv[argc+1] if(use_system_time == 'TRUE'): print('[rbench]Cannot use system.time() for these experiment R VMs. Fall back to meter=time.') rep = argv[argc+2] print(rep) src = argv[argc+3] #the file print(src) #construct the file's full current full path src = os.path.join(os.getcwd(), src) #now generate the source file #use the benchmark file to src_dir = os.path.dirname(src) src_basename = os.path.basename(src) tmpsrc = os.path.join(src_dir, 'rbench_'+src_basename) #then decide whether there are additional args if(len(argv) > argc+4): bench_args = argv[argc+4:] bench_args_str = "bench_args <- c('" + "','".join(bench_args)+ "')\n" else: bench_args_str = "bench_args <- character(0)\n" bench_reps_str = 'bench_reps <- ' + rep +'\n' # now generate the file with open(tmpsrc, 'w') as f: f.write('harness_argc<-1\n') f.write(bench_args_str) f.write(bench_reps_str) with open(src, 'r') as srcf: f.write(srcf.read()) f.write(raw_haress_str) #now start running #need change to the directory os.chdir(rvm_path) rvm_cmd.append(tmpsrc) exit_code = os.system(' '.join(rvm_cmd)) os.remove(tmpsrc) sys.exit(exit_code)
python
# Import libraries import numpy as np import matplotlib import matplotlib.pyplot as plt import os import scipy # from scipy.sparse.construct import random import scipy.stats from scipy.stats import arcsine from scipy.interpolate import interp1d from astropy.io import fits import astropy.units as u # WebbPSF import webbpsf from webbpsf.opds import OTE_Linear_Model_WSS from webbpsf.utils import get_webbpsf_data_path # Logging from . import conf from .logging_utils import setup_logging import logging _log = logging.getLogger('webbpsf_ext') # Progress bar from tqdm.auto import trange, tqdm __epsilon = np.finfo(float).eps def OPDFile_to_HDUList(file, slice=0): """ Make a picklable HDUList for ingesting into multiproccessor WebbPSF helper function. """ try: hdul = fits.open(file) except FileNotFoundError: # Check WebbPSF instrument OPD directory if 'NIRCam' in file: inst = 'NIRCam' elif 'MIRI' in file: inst = 'MIRI' elif 'NIRSpec' in file: inst = 'NIRSpec' elif 'NIRISS' in file: inst = 'NIRISS' elif 'FGS' in file: inst = 'FGS' opd_dir = os.path.join(get_webbpsf_data_path(),inst,'OPD') hdul = fits.open(os.path.join(opd_dir, file)) ndim = len(hdul[0].data.shape) if ndim==3: opd_im = hdul[0].data[slice,:,:] else: opd_im = hdul[0].data hdu_new = fits.PrimaryHDU(opd_im) hdu_new.header = hdul[0].header.copy() opd_hdul = fits.HDUList([hdu_new]) hdul.close() return opd_hdul class OTE_WFE_Drift_Model(OTE_Linear_Model_WSS): """ OPD subclass for calculating OPD drift values over time. """ def __init__(self, **kwargs): """ Parameters ---------- opdfile : str or fits.HDUList FITS file to load an OPD from. The OPD must be specified in microns. opd_index : int, optional FITS extension to load OPD from transmission : str or None FITS file for pupil mask, with throughput from 0-1. If not explicitly provided, will be inferred from wherever is nonzero in the OPD file. slice : int, optional Slice of a datacube to load OPD from, if the selected extension contains a datacube. segment_mask_file : str FITS file for pupil mask, with throughput from 0-1. If not explicitly provided, will use JWpupil_segments.fits zero : bool Set an OPD to precisely zero everywhere. rm_ptt : bool Remove piston, tip, and tilt? This is mostly for visualizing the higher order parts of the LOM. Default: False. """ # Initialize OTE_Linear_Model_WSS OTE_Linear_Model_WSS.__init__(self, **kwargs) # Initialize delta OPD normalized images self.dopd_thermal = None self.dopd_frill = None self.dopd_iec = None # Initialize normalized delta OPD images log_prev = conf.logging_level if 'WARN' not in log_prev: setup_logging('WARN', verbose=False) self._calc_delta_opds() if 'WARN' not in log_prev: setup_logging(log_prev, verbose=False) def reset(self, verbose=True): """ Reset an OPD to the state it was loaded from disk. i.e. undo all segment moves. """ self._frill_wfe_amplitude = 0 self._iec_wfe_amplitude = 0 self.opd = self._opd_original.copy() self.segment_state *= 0 if verbose: _log.info("Reset to unperturbed OPD") def _calc_delta_opds(self, thermal=True, frill=True, iec=True): """ Calculate delta OPDs for the three components and save to class properties. Each delta OPD image will be normalized such that the nm RMS WFE is equal to 1. """ # Set everything to initial state self.reset(verbose=False) # Calculate thermal dOPD if thermal: self.thermal_slew(1*u.day) # self.opd has now been updated to drifted OPD # Calculate delta OPD and save into self.opd attribute # This is because self.rms() uses the image in self.opd self.opd -= self._opd_original # scale by RMS of delta OPD, and save self.dopd_thermal = self.opd / self.rms() # Calculate frill dOPD if frill: # Explicitly set thermal component to 0 self.thermal_slew(0*u.min, scaling=0, delay_update=True) self.apply_frill_drift(amplitude=1) # self.opd has now been updated to drifted OPD # Temporarily calculate delta and calc rms self.opd -= self._opd_original # scale by RMS of delta OPD, and save self.dopd_frill = self.opd / self.rms() # Calculate IEC dOPD if iec: # Explicitly set thermal and frill components to 0 self.thermal_slew(0*u.min, scaling=0, delay_update=True) self.apply_frill_drift(amplitude=0, delay_update=True) self.apply_iec_drift(amplitude=1) # self.opd has now been updated to drifted OPD # Temporarily calculate delta and calc rms self.opd -= self._opd_original # scale by RMS of delta OPD, and save self.dopd_iec = self.opd / self.rms() # Back to initial state self.reset(verbose=False) def calc_rms(self, arr, segname=None): """Calculate RMS of input images""" # RMS for a single image def rms_im(im): """ Find RMS of an image by excluding pixels with 0s, NaNs, or Infs""" ind = (im != 0) & (np.isfinite(im)) res = 0 if len(im[ind]) == 0 else np.sqrt(np.mean(im[ind] ** 2)) res = 0 if np.isnan(res) else res return res # Reshape into a 3-dimension cube for consistency if len(arr.shape) == 3: nz,ny,nx = arr.shape else: ny,nx = arr.shape nz = 1 arr = arr.reshape([nz,ny,nx]) if segname is None: # RMS of whole aperture rms = np.asarray([rms_im(im) for im in arr]) else: # RMS of specified segment assert (segname in self.segnames) iseg = np.where(self.segnames == segname)[0][0] + 1 # segment index from 1 - 18 seg_mask = self._segment_masks == iseg arr_seg = arr[:,seg_mask] rms = np.asarray([rms_im(im) for im in arr_seg]) # If single image, remove first dimension if nz==1: rms = rms[0] return rms def slew_scaling(self, start_angle, end_angle): """ WFE scaling due to slew angle Scale the WSS Hexike components based on slew pitch angles. Parameters ---------- start_angle : float The starting sun pitch angle, in degrees between -5 and +45 end_angle : float The ending sun pitch angle, in degrees between -5 and +45 """ num = np.sin(np.radians(end_angle)) - np.sin(np.radians(start_angle)) den = np.sin(np.radians(45.)) - np.sin(np.radians(-5.)) return num / den def gen_frill_drift(self, delta_time, start_angle=-5, end_angle=45, case='BOL'): """ Frill WFE drift scaling Function to determine the factor to scale the delta OPD associated with frill tensioning. Returns the RMS WFE (nm) depending on time and slew angles. Parameters ---------- delta_time : astropy.units quantity object The time since a slew occurred. start_angle : float The starting sun pitch angle, in degrees between -5 and +45 end_angle : float The ending sun pitch angle, in degrees between -5 and +45 case : string either "BOL" for current best estimate at beginning of life, or "EOL" for more conservative prediction at end of life. The amplitude of the frill drift is roughly 2x lower for BOL (8.6 nm after 2 days) versus EOL (18.4 nm after 2 days). """ frill_hours = np.array([ 0.00, 0.55, 1.00, 1.60, 2.23, 2.85, 3.47, 4.09, 4.71, 5.33, 5.94, 6.56, 7.78, 9.00, 9.60, 11.41, 12.92, 15.02, 18.00, 21.57, 23.94, 26.90, 32.22, 35.76, 41.07, 45.20, 50.50, 100.58 ]) # Normalized frill drift amplitude frill_wfe_drift_norm = np.array([ 0.000, 0.069, 0.120, 0.176, 0.232, 0.277, 0.320, 0.362, 0.404, 0.444, 0.480, 0.514, 0.570, 0.623, 0.648, 0.709, 0.758, 0.807, 0.862, 0.906, 0.930, 0.948, 0.972, 0.981, 0.991, 0.995, 0.998, 1.000 ]) # Create interpolation function finterp = interp1d(frill_hours, frill_wfe_drift_norm, kind='cubic', fill_value=(0, 1), bounds_error=False) # Convert input time to hours and get normalized amplitude time_hour = delta_time.to(u.hour).value amp_norm = finterp(time_hour) # Scale height from either EOL or BOL (nm RMS) # Assuming slew angles from -5 to +45 deg if case=='EOL': wfe_drift_rms = 18.4 * amp_norm elif case=='BOL': wfe_drift_rms = 8.6 * amp_norm else: print(f'case={case} is not recognized') # Get scale factor based on start and end angle solar elongation angles scaling = self.slew_scaling(start_angle, end_angle) wfe_drift_rms *= scaling return wfe_drift_rms def gen_thermal_drift(self, delta_time, start_angle=-5, end_angle=45, case='BOL'): """ Thermal WFE drift scaling Function to determine the factor to scale the delta OPD associated with OTE backplane thermal distortion. Returns the RMS WFE (nm) depending on time and slew angles. Parameters ---------- delta_time : astropy.units quantity object The time since a slew occurred. start_angle : float The starting sun pitch angle, in degrees between -5 and +45 end_angle : float The ending sun pitch angle, in degrees between -5 and +45 case : string either "BOL" for current best estimate at beginning of life, or "EOL" for more conservative prediction at end of life. The amplitude of the frill drift is roughly 3x lower for BOL (13 nm after 14 days) versus EOL (43 nm after 14 days). """ thermal_hours = np.array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24., 48., 72., 96., 120., 144., 168., 192., 216., 240., 264., 288., 312., 336., 360., 384., 408., 432., 456., 480., 800. ]) thermal_wfe_drift_norm = np.array([ 0.0000, 0.0134, 0.0259, 0.0375, 0.0484, 0.0587, 0.0685, 0.0777, 0.0865, 0.0950, 0.1031, 0.1109, 0.1185, 0.1259, 0.1330, 0.1400, 0.1468, 0.1534, 0.1600, 0.1664, 0.1727, 0.1789, 0.1850, 0.1910, 0.1970, 0.3243, 0.4315, 0.5227, 0.5999, 0.6650, 0.7197, 0.7655, 0.8038, 0.8358, 0.8625, 0.8849, 0.9035, 0.9191, 0.9322, 0.9431, 0.9522, 0.9598, 0.9662, 0.9716, 1.0000 ]) # Create interpolation function finterp = interp1d(thermal_hours, thermal_wfe_drift_norm, kind='cubic', fill_value=(0, 1), bounds_error=False) # Convert input time to hours and get normalized amplitude time_hour = delta_time.to(u.hour).value amp_norm = finterp(time_hour) # Normalize to 14 days (336 hours) amp_norm /= finterp(336) # Scale height from either EOL or BOL (nm RMS) # Assuming full slew angle from -5 to +45 deg if case=='EOL': wfe_drift_rms = 45.0 * amp_norm elif case=='BOL': wfe_drift_rms = 13.0 * amp_norm else: print(f'case={case} is not recognized') # Get scale factor based on start and end angle solar elongation angles scaling = self.slew_scaling(start_angle, end_angle) wfe_drift_rms *= scaling return wfe_drift_rms def gen_iec_series(self, delta_time, amplitude=3.5, period=5.0, interp_kind='linear', random_seed=None): """Create a series of IEC WFE scale factors Create a series of random IEC heater state changes based on arcsine distribution. Parameters ---------- delta_time : astropy.units quantity object array Time series of atropy units to interpolate IEC amplitudes Keyword Args ------------ amplitude : float Full amplitude of arcsine distribution. Values will range from -0.5*amplitude to +0.5*amplitude. period : float Period in minutes of IEC oscillations. Usually 3-5 minutes. random_seed : int Provide a random seed value between 0 and (2**32)-1 to generate reproducible random values. interp_kind : str or int Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of zeroth, first, second or third order; 'previous' and 'next' simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. """ # Convert time array to minutes and get values if isinstance(delta_time, (u.Quantity)): time_arr_minutes = np.array(delta_time.to(u.min).value) else: time_arr_minutes = delta_time # Create a series of random IEC heater state changes based on arcsin distribution dt = period nsamp = int(np.max(time_arr_minutes)/dt) + 2 tvals = np.arange(nsamp) * dt # Random values between 0 and 1 arcsine_rand = arcsine.rvs(size=nsamp, random_state=random_seed) # Scale by amplitude wfe_iec_all = arcsine_rand * amplitude - amplitude / 2 # res = np.interp(time_arr_minutes, tvals, wfe_iec_all) finterp = interp1d(tvals, wfe_iec_all, kind=interp_kind, fill_value=0, bounds_error=False) res = finterp(time_arr_minutes) return res def gen_delta_opds(self, delta_time, start_angle=-5, end_angle=45, do_thermal=True, do_frill=True, do_iec=True, case='BOL', return_wfe_amps=True, return_dopd_fin=True, random_seed=None, **kwargs): """Create series of delta OPDs Generate a series of delta OPDS, the result of which is a combination of thermal, frill, and IEC effects. The thermal and frill values are dependent on time, start/end slew angles, and case ('BOL' or 'EOL'). Delta OPD contributions from the IEC heater switching are treated as random state switches assuming an arcsine distribution. Parameters ---------- delta_time : astropy.units quantity object An array of times assuming astropy units. start_angle : float The starting sun pitch angle, in degrees between -5 and +45. end_angle : float The ending sun pitch angle, in degrees between -5 and +45. case : string Either "BOL" for current best estimate at beginning of life, or "EOL" for more conservative prediction at end of life. do_thermal : bool Include thermal slew component? Mostly for debugging purposes. do_frill : bool Include frill component? Mostly for debugging purposes. do_iec : bool Include IEC component? Good to exclude if calling this function repeatedly for evolution of multiple slews, then add IEC later. return_wfe_amps : bool Return a dictionary that provides the RMS WFE (nm) of each component at each time step. return_dopd_fin : bool Option to exclude calculating final delta OPD in case we only want the final RMS WFE dictionary. random_seed : int Random seed to pass to IEC generation. """ if (not return_wfe_amps) and (not return_dopd_fin): _log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`') return try: nz = len(delta_time) except TypeError: nz = 1 ny,nx = self.opd.shape # Thermal drift amplitudes if do_thermal: amp_thermal = self.gen_thermal_drift(delta_time, case=case, start_angle=start_angle, end_angle=end_angle) else: amp_thermal = np.zeros(nz) if nz>1 else 0 # Frill drift amplitudes if do_frill: amp_frill = self.gen_frill_drift(delta_time, case=case, start_angle=start_angle, end_angle=end_angle) else: amp_frill = np.zeros(nz) if nz>1 else 0 # Random IEC amplitudes if do_iec: amp_iec = self.gen_iec_series(delta_time, random_seed=random_seed, **kwargs) if nz>1: amp_iec[0] = 0 else: amp_iec = np.zeros(nz) if nz>1 else 0 # Add OPD deltas delta_opd_fin = np.zeros([nz,ny,nx]) if do_thermal: amp = np.reshape(amp_thermal, [-1,1,1]) delta_opd_fin += self.dopd_thermal.reshape([1,ny,nx]) * amp if do_frill: amp = np.reshape(amp_frill, [-1,1,1]) delta_opd_fin += self.dopd_frill.reshape([1,ny,nx]) * amp if do_iec: amp = np.reshape(amp_iec, [-1,1,1]) delta_opd_fin += self.dopd_iec.reshape([1,ny,nx]) * amp if nz==1: delta_opd_fin = delta_opd_fin[0] # Get final RMS in nm rms_tot = np.array(self.calc_rms(delta_opd_fin)) * 1e9 wfe_amps = { 'thermal': amp_thermal, 'frill' : amp_frill, 'iec' : amp_iec, 'total' : rms_tot } if return_wfe_amps and return_dopd_fin: return delta_opd_fin, wfe_amps elif return_wfe_amps: return wfe_amps elif return_dopd_fin: return delta_opd_fin else: _log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`') def evolve_dopd(self, delta_time, slew_angles, case='BOL', return_wfe_amps=True, return_dopd_fin=True, do_thermal=True, do_frill=True, do_iec=True, **kwargs): """ Evolve the delta OPD with multiple slews Input an array of `delta_time` and `slew_angles` to return the evolution of a delta_OPD image. Option to return the various WFE components, including OTE backplane (thermal), frill tensioning, and IEC heater switching. Parameters ---------- delta_time : astropy.units quantity object An array of times assuming astropy units. slew_angles : ndarray The sun pitch angles, in degrees between -5 and +45. case : string Either "BOL" for current best estimate at beginning of life, or "EOL" for more conservative prediction at end of life. do_thermal : bool Include thermal slew component? Mostly for debugging purposes. do_frill : bool Include frill component? Mostly for debugging purposes. do_iec : bool Include IEC component? Good to exclude if calling this function repeatedly for evolution of multiple slews, then add IEC later. return_wfe_amps : bool Return a dictionary that provides the RMS WFE (nm) of each component at each time step. return_dopd_fin : bool Option to exclude calculating final delta OPD in case we only want the final RMS WFE dictionary. Keyword Args ------------ amplitude : float Full amplitude of IEC arcsine distribution. Values will range from -0.5*amplitude to +0.5*amplitude. period : float Period in minutes of IEC oscillations. Usually 3-5 minutes. """ if (not return_wfe_amps) and (not return_dopd_fin): _log.warning('Must specify `return_wfe_amps` and/or `return_dopd_fin`') return log_prev = conf.logging_level if 'WARN' not in log_prev: setup_logging('WARN', verbose=False) # Indices where slews occur islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1 islew = np.concatenate(([0], islew)) # Build delta OPDs for each slew angle kwargs['case'] = case kwargs['return_wfe_amps'] = return_wfe_amps kwargs['return_dopd_fin'] = True kwargs['do_thermal'] = do_thermal kwargs['do_frill'] = do_frill kwargs['do_iec'] = False for i in tqdm(islew, desc='Slews'): ang1 = slew_angles[0] if i==0 else ang2 ang2 = slew_angles[i] tvals = delta_time[i:] tvals = tvals - tvals[0] res = self.gen_delta_opds(tvals, start_angle=ang1, end_angle=ang2, **kwargs) if return_wfe_amps: dopds, wfe_dict = res else: dopds = res # Accumulate delta OPD images if i==0: dopds_fin = dopds + 0.0 else: dopds_fin[i:] += dopds # Add in drift amplitudes for thermal and frill components if return_wfe_amps: if i==0: wfe_dict_fin = wfe_dict else: for k in wfe_dict.keys(): wfe_dict_fin[k][i:] += wfe_dict[k] del dopds # Get IEC values if do_iec: kwargs['do_thermal'] = False kwargs['do_frill'] = False kwargs['do_iec'] = True res = self.gen_delta_opds(delta_time-delta_time[0], **kwargs) if return_wfe_amps: dopds, wfe_dict = res wfe_dict_fin['iec'] = wfe_dict['iec'] else: dopds = res # Add IEC OPDs dopds_fin += dopds del dopds if 'WARN' not in log_prev: setup_logging(log_prev, verbose=False) # Calculate RMS values on final delta OPDs if return_wfe_amps: wfe_dict_fin['total'] = self.calc_rms(dopds_fin)*1e9 if return_wfe_amps and return_dopd_fin: return dopds_fin, wfe_dict_fin elif return_dopd_fin: return dopds_fin elif return_wfe_amps: return wfe_dict_fin def interp_dopds(self, delta_time, dopds, dt_new, wfe_dict=None, interp_kind='linear', **kwargs): """ Interpolate an array of delta OPDs Perform a linear interpolation on a series of delta OPDS. Parameters ---------- delta_time : astropy.units quantity object An array of times assuming astropy units corresponding to each `dopd`. dopds : ndarray Array of delta OPD images associated with `delta_time`. dt_new : astropy.units quantity object New array to interpolate onto. Keyword Args ------------ wfe_dict : dict or None If specified, then must provide a dictionary where the values for each keywords are the WFE drift components associated with each `delta_time`. Will then return a dictionary interp_kind : str or int Specifies the kind of interpolation as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of zeroth, first, second or third order; 'previous' and 'next' simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is 'linear'. """ dt_new_vals = dt_new.to('hour') # Create interpolation function dt_vals = delta_time.to('hour') func = interp1d(dt_vals, dopds, axis=0, kind=interp_kind, bounds_error=True) opds_new = func(dt_new_vals) if wfe_dict is not None: wfe_dict_new = {} for k in wfe_dict.keys(): vals = wfe_dict[k] func = interp1d(dt_vals, vals, kind=interp_kind, bounds_error=True) wfe_dict_new[k] = func(dt_new_vals) return opds_new, wfe_dict_new else: return opds_new def slew_pos_averages(self, delta_time, slew_angles, opds=None, wfe_dict=None, mn_func=np.mean, interpolate=False, **kwargs): """ Get averages at each slew position Given a series of times and slew angles, calculate the average OPD and WFE RMS error within each slew angle position. Returns a tuple with new arrays of (dt_new, opds_new, wfe_dict_new). If input both `opds` and `wfe_dict` are not specified, then we call the `evolve_dopd` function and return . Parameters ---------- delta_time : astropy.units quantity object An array of times assuming astropy units. slew_angles : ndarray The sun pitch angles at each `delta_time`, in degrees between -5 and +45. opds : ndarray or None Cube of OPD images (or delta OPDs) associated with each `delta_time`. If set to None, then a new set of OPDs are not calculated. wfe_dict : dict or None If specified, then must provide a dictionary where the values for each keywords are the WFE drift components associated with each `delta_time`. New set of WFE dictionary is not calculated if set to None. mn_func : function Function to use for taking averages. Default: np.mean() interpolate : bool Instead of taking average, use the interpolation function `self.interp_dopds()`. Keyword Args ------------ case : string Either "BOL" for current best estimate at beginning of life, or "EOL" for more conservative prediction at end of life. do_thermal : bool Include thermal slew component? Mostly for debugging purposes. do_frill : bool Include frill component? Mostly for debugging purposes. do_iec : bool Include IEC component? Good to exclude if calling this function repeatedly for evolution of multiple slews, then add IEC later. amplitude : float Full amplitude of IEC arcsine distribution. Values will range from -0.5*amplitude to +0.5*amplitude. period : float Period in minutes of IEC oscillations. Usually 3-5 minutes. kind : str or int Specifies the kind of interpolation (if specified) as a string. Default: 'linear'. """ if (opds is None) and (wfe_dict is None): kwargs['return_wfe_amps'] = True kwargs['return_dopd_fin'] = True opds, wfe_dict = self.evolve_dopd(delta_time, slew_angles, **kwargs) # Indices where slews occur islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1 # Start and stop indices for each slew position i1_arr = np.concatenate(([0], islew)) i2_arr = np.concatenate((islew, [len(slew_angles)])) # Get average time at each position dt_new = np.array([mn_func(delta_time[i1:i2].value) for i1, i2 in zip(i1_arr, i2_arr)]) dt_new = dt_new * delta_time.unit if interpolate: res = self.interp_dopds(delta_time, opds, dt_new, wfe_dict=wfe_dict, **kwargs) if wfe_dict is None: opds_new = res wfe_dict_new = None else: opds_new, wfe_dict_new = res return dt_new, opds_new, wfe_dict_new # Averages of OPD at each position if opds is not None: opds_new = np.array([mn_func(opds[i1:i2], axis=0) for i1, i2 in zip(i1_arr, i2_arr)]) else: opds_new = None # Get average of each WFE drift component if wfe_dict is not None: wfe_dict_new = {} for k in wfe_dict.keys(): wfe_dict_new[k] = np.array([mn_func(wfe_dict[k][i1:i2]) for i1, i2 in zip(i1_arr, i2_arr)]) if opds_new is not None: wfe_dict_new['total'] = self.calc_rms(opds_new)*1e9 else: wfe_dict = None return dt_new, opds_new, wfe_dict_new def opds_as_hdul(self, delta_time, slew_angles, delta_opds=None, wfe_dict=None, case=None, add_main_opd=True, slew_averages=False, return_ind=None, **kwargs): """Convert series of delta OPDS to HDUList""" if delta_opds is None: case = 'BOL' if case is None else case kwargs['case'] = case kwargs['return_wfe_amps'] = True kwargs['return_dopd_fin'] = True delta_opds, wfe_dict = self.evolve_dopd(delta_time, slew_angles, **kwargs) if slew_averages: res = self.slew_pos_averages(delta_time, slew_angles, opds=delta_opds, wfe_dict=wfe_dict, **kwargs) delta_time, delta_opds, wfe_dict = res # Indices where slews occur islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1 islew = np.concatenate(([0], islew)) slew_angles = slew_angles[islew] nz, ny, nx = delta_opds.shape # Indices where slews occur islew = np.where(slew_angles[1:] - slew_angles[:-1] != 0)[0] + 1 islew = np.concatenate(([0], islew)) hdul = fits.HDUList() for i in range(nz): if i<islew[1]: ang1 = ang2 = slew_angles[i] else: if i in islew: ang1 = slew_angles[i-1] ang2 = slew_angles[i] # Skip if only returning a single OPD if (return_ind is not None) and (i != return_ind): continue # Update header dt = delta_time[i].to(u.day).to_string() opd_im = self._opd_original + delta_opds[i] if add_main_opd else delta_opds[i] hdu = fits.ImageHDU(data=opd_im, header=self.opd_header, name=f'OPD{i}') hdr = hdu.header hdr['BUNIT'] = 'meter' hdr['DELTA_T'] = (dt, "Delta time after initial slew [d]") hdr['STARTANG'] = (ang1, "Starting sun pitch angle [deg]") hdr['ENDANG'] = (ang2, "Ending sun pitch angle [deg]") hdr['THRMCASE'] = (case, "Thermal model case, beginning or end of life") # if add_main_opd: # hdr['OPDSLICE'] = (self.opd_slice, 'OPD slice index') hdr['WFE_RMS'] = (self.calc_rms(hdu.data)*1e9, "RMS WFE [nm]") # Include the WFE RMS inputs from each component if wfe_dict is not None: for k in wfe_dict.keys(): hdr[k] = (wfe_dict[k][i], f"{k} RMS delta WFE [nm]") hdul.append(hdu) return hdul def plot_im(im, fig, ax, vlim=None, add_cbar=True, return_ax=False, extent=None, cmap='RdBu_r'): """ Plot single image on some axes """ if vlim is None: vlim = np.max(np.abs(im)) img = ax.imshow(im, cmap=cmap, vmin=-1*vlim, vmax=+1*vlim, extent=extent) # Add colorbar if add_cbar: cbar = fig.colorbar(img, ax=ax) cbar.set_label('Amplitude [nm]') if return_ax and add_cbar: return ax, cbar elif return_ax: return ax def plot_opd(hdul, index=1, opd0=None, vlim1=None, vlim2=None): """ Plot OPDs images (full or delta) """ def calc_rms_nm(im): ind = (im != 0) & (np.isfinite(im)) rms = np.sqrt((im[ind] ** 2).mean()) * 1e9 return rms m_to_nm = 1e9 # Define OPD to compare delta OPD image opd0 = hdul[0].data if opd0 is None else opd0 # Header and data for current image header = hdul[index].header opd = hdul[index].data opd_diff = (opd - opd0) rms_opd = calc_rms_nm(opd) rms_diff = calc_rms_nm(opd_diff) # Time since slew delta_time = header['DELTA_T'] try: pupilscale = header['PUPLSCAL'] s = opd.shape extent = [a * pupilscale for a in [-s[0] / 2, s[0] / 2, -s[1] / 2, s[1] / 2]] except KeyError: extent = None # Create figure fig, axes = plt.subplots(1,2, figsize=(12,5)) ax = axes[0] vlim = 3*rms_opd if vlim1 is None else vlim1 plot_im(opd * m_to_nm, fig, ax, vlim=vlim, extent=extent) data_val, data_units = str.split(delta_time) data_val = np.float(data_val) if 'h' in data_units: dt = data_val * u.hr elif 'm' in data_units: dt = data_val * u.min elif 'd' in data_units: dt = data_val * u.day # Convert to hours dt = dt.to('hr') ax.set_title("Delta Time = {:.1f} (RMS = {:.2f} nm)".format(dt, rms_opd)) ax = axes[1] vlim = 3*rms_diff if vlim2 is None else vlim2 plot_im(opd_diff * m_to_nm, fig, ax, vlim=vlim, extent=extent) ax.set_title("Delta OPD = {:.2f} nm RMS".format(rms_diff)) fig.tight_layout() plt.draw() def slew_time(dist_asec): """ Given a slew distance (arcsec), calculate telescope slew time. Output is sec. Data comes from JDOX website: https://jwst-docs.stsci.edu/jppom/visit-overheads-timing-model/slew-times. """ # Slew value in arcsec slew_arr = np.array([ 0, 0.06, 0.0600001, 15, 20, 20.0000001, 30, 50, 100, 150, 300, 1000, 3600, 4000, 10000, 10800, 10800, 14400, 18000, 21600, 25200, 28800, 32400, 36000, 39600, 43200, 46800, 50400, 54000, 57600, 61200, 64800, 68400, 72000, 108000, 144000, 180000, 216000, 252000, 288000, 324000, 360000, 396000, 432000, 468000, 504000, 540000, 576000, 612000, 648000 ]) # Slew times tsec_arr = np.array([ 0, 0, 20.48, 20.48, 23.296, 101.632, 116.224, 137.728, 173.568, 198.656, 250.112, 373.504, 572.416, 592.896, 804.864, 825.6, 521.216, 578.048, 628.608, 674.56, 716.928, 756.608, 793.856, 829.184, 862.848, 894.976, 925.824, 955.648, 984.32, 1012.224, 1039.104, 1065.344, 1090.816, 1115.648, 1336.448, 1537.408,1744, 1939.328, 2112.192, 2278.272, 2440.32, 2599.936, 2757.632, 2914.24, 3069.888, 3224.832, 3379.328, 3533.376, 3687.104, 3840.512 ]) return np.interp(dist_asec, slew_arr, tsec_arr)
python
import argparse import contextlib import collections import grp import hashlib import logging import io import json import os import os.path import platform import pwd import re import shlex import signal import socket import stat import subprocess import sys import textwrap import threading import time import uuid from binascii import hexlify from collections import namedtuple, deque, abc, Counter from datetime import datetime, timezone, timedelta from functools import partial, lru_cache from itertools import islice from operator import attrgetter from string import Formatter from shutil import get_terminal_size import msgpack import msgpack.fallback from .logger import create_logger logger = create_logger() import borg.crypto.low_level from . import __version__ as borg_version from . import __version_tuple__ as borg_version_tuple from . import chunker from . import hashindex from .constants import * # NOQA ''' The global exit_code variable is used so that modules other than archiver can increase the program exit code if a warning or error occurred during their operation. This is different from archiver.exit_code, which is only accessible from the archiver object. ''' exit_code = EXIT_SUCCESS def set_ec(ec): ''' Sets the exit code of the program, if an exit code higher or equal than this is set, this does nothing. This makes EXIT_ERROR override EXIT_WARNING, etc.. ec: exit code to set ''' global exit_code exit_code = max(exit_code, ec) return exit_code class Error(Exception): """Error base class""" # if we raise such an Error and it is only catched by the uppermost # exception handler (that exits short after with the given exit_code), # it is always a (fatal and abrupt) EXIT_ERROR, never just a warning. exit_code = EXIT_ERROR # show a traceback? traceback = False def __init__(self, *args): super().__init__(*args) self.args = args def get_message(self): return type(self).__doc__.format(*self.args) __str__ = get_message class ErrorWithTraceback(Error): """like Error, but show a traceback also""" traceback = True class IntegrityError(ErrorWithTraceback): """Data integrity error: {}""" class DecompressionError(IntegrityError): """Decompression error: {}""" class ExtensionModuleError(Error): """The Borg binary extension modules do not seem to be properly installed""" class NoManifestError(Error): """Repository has no manifest.""" class PlaceholderError(Error): """Formatting Error: "{}".format({}): {}({})""" class InvalidPlaceholder(PlaceholderError): """Invalid placeholder "{}" in string: {}""" class PythonLibcTooOld(Error): """FATAL: this Python was compiled for a too old (g)libc and misses required functionality.""" def check_python(): required_funcs = {os.stat, os.utime, os.chown} if not os.supports_follow_symlinks.issuperset(required_funcs): raise PythonLibcTooOld def check_extension_modules(): from . import platform, compress, item if hashindex.API_VERSION != '1.1_01': raise ExtensionModuleError if chunker.API_VERSION != '1.1_01': raise ExtensionModuleError if compress.API_VERSION != '1.1_03': raise ExtensionModuleError if borg.crypto.low_level.API_VERSION != '1.1_01': raise ExtensionModuleError if platform.API_VERSION != platform.OS_API_VERSION != '1.1_01': raise ExtensionModuleError if item.API_VERSION != '1.1_02': raise ExtensionModuleError ArchiveInfo = namedtuple('ArchiveInfo', 'name id ts') class Archives(abc.MutableMapping): """ Nice wrapper around the archives dict, making sure only valid types/values get in and we can deal with str keys (and it internally encodes to byte keys) and either str timestamps or datetime timestamps. """ def __init__(self): # key: encoded archive name, value: dict(b'id': bytes_id, b'time': bytes_iso_ts) self._archives = {} def __len__(self): return len(self._archives) def __iter__(self): return iter(safe_decode(name) for name in self._archives) def __getitem__(self, name): assert isinstance(name, str) _name = safe_encode(name) values = self._archives.get(_name) if values is None: raise KeyError ts = parse_timestamp(values[b'time'].decode('utf-8')) return ArchiveInfo(name=name, id=values[b'id'], ts=ts) def __setitem__(self, name, info): assert isinstance(name, str) name = safe_encode(name) assert isinstance(info, tuple) id, ts = info assert isinstance(id, bytes) if isinstance(ts, datetime): ts = ts.replace(tzinfo=None).isoformat() assert isinstance(ts, str) ts = ts.encode() self._archives[name] = {b'id': id, b'time': ts} def __delitem__(self, name): assert isinstance(name, str) name = safe_encode(name) del self._archives[name] def list(self, sort_by=(), reverse=False, prefix='', first=None, last=None): """ Inexpensive Archive.list_archives replacement if we just need .name, .id, .ts Returns list of borg.helpers.ArchiveInfo instances. sort_by can be a list of sort keys, they are applied in reverse order. """ if isinstance(sort_by, (str, bytes)): raise TypeError('sort_by must be a sequence of str') archives = [x for x in self.values() if x.name.startswith(prefix)] for sortkey in reversed(sort_by): archives.sort(key=attrgetter(sortkey)) if reverse or last: archives.reverse() n = first or last or len(archives) return archives[:n] def list_considering(self, args): """ get a list of archives, considering --first/last/prefix/sort cmdline args """ if args.location.archive: raise Error('The options --first, --last and --prefix can only be used on repository targets.') return self.list(sort_by=args.sort_by.split(','), prefix=args.prefix, first=args.first, last=args.last) def set_raw_dict(self, d): """set the dict we get from the msgpack unpacker""" for k, v in d.items(): assert isinstance(k, bytes) assert isinstance(v, dict) and b'id' in v and b'time' in v self._archives[k] = v def get_raw_dict(self): """get the dict we can give to the msgpack packer""" return self._archives class Manifest: MANIFEST_ID = b'\0' * 32 def __init__(self, key, repository, item_keys=None): self.archives = Archives() self.config = {} self.key = key self.repository = repository self.item_keys = frozenset(item_keys) if item_keys is not None else ITEM_KEYS self.tam_verified = False self.timestamp = None @property def id_str(self): return bin_to_hex(self.id) @property def last_timestamp(self): return datetime.strptime(self.timestamp, "%Y-%m-%dT%H:%M:%S.%f") @classmethod def load(cls, repository, key=None, force_tam_not_required=False): from .item import ManifestItem from .crypto.key import key_factory, tam_required_file, tam_required from .repository import Repository try: cdata = repository.get(cls.MANIFEST_ID) except Repository.ObjectNotFound: raise NoManifestError if not key: key = key_factory(repository, cdata) manifest = cls(key, repository) data = key.decrypt(None, cdata) manifest_dict, manifest.tam_verified = key.unpack_and_verify_manifest(data, force_tam_not_required=force_tam_not_required) m = ManifestItem(internal_dict=manifest_dict) manifest.id = key.id_hash(data) if m.get('version') != 1: raise ValueError('Invalid manifest version') manifest.archives.set_raw_dict(m.archives) manifest.timestamp = m.get('timestamp') manifest.config = m.config # valid item keys are whatever is known in the repo or every key we know manifest.item_keys = ITEM_KEYS | frozenset(key.decode() for key in m.get('item_keys', [])) if manifest.tam_verified: manifest_required = manifest.config.get(b'tam_required', False) security_required = tam_required(repository) if manifest_required and not security_required: logger.debug('Manifest is TAM verified and says TAM is required, updating security database...') file = tam_required_file(repository) open(file, 'w').close() if not manifest_required and security_required: logger.debug('Manifest is TAM verified and says TAM is *not* required, updating security database...') os.unlink(tam_required_file(repository)) return manifest, key def write(self): from .item import ManifestItem if self.key.tam_required: self.config[b'tam_required'] = True # self.timestamp needs to be strictly monotonically increasing. Clocks often are not set correctly if self.timestamp is None: self.timestamp = datetime.utcnow().isoformat() else: prev_ts = self.last_timestamp incremented = (prev_ts + timedelta(microseconds=1)).isoformat() self.timestamp = max(incremented, datetime.utcnow().isoformat()) manifest = ManifestItem( version=1, archives=StableDict(self.archives.get_raw_dict()), timestamp=self.timestamp, config=StableDict(self.config), item_keys=tuple(sorted(self.item_keys)), ) self.tam_verified = True data = self.key.pack_and_authenticate_metadata(manifest.as_dict()) self.id = self.key.id_hash(data) self.repository.put(self.MANIFEST_ID, self.key.encrypt(data)) def prune_within(archives, within): multiplier = {'H': 1, 'd': 24, 'w': 24 * 7, 'm': 24 * 31, 'y': 24 * 365} try: hours = int(within[:-1]) * multiplier[within[-1]] except (KeyError, ValueError): # I don't like how this displays the original exception too: raise argparse.ArgumentTypeError('Unable to parse --keep-within option: "%s"' % within) if hours <= 0: raise argparse.ArgumentTypeError('Number specified using --keep-within option must be positive') target = datetime.now(timezone.utc) - timedelta(seconds=hours * 3600) return [a for a in archives if a.ts > target] def prune_split(archives, pattern, n, skip=[]): last = None keep = [] if n == 0: return keep for a in sorted(archives, key=attrgetter('ts'), reverse=True): period = to_localtime(a.ts).strftime(pattern) if period != last: last = period if a not in skip: keep.append(a) if len(keep) == n: break return keep def get_home_dir(): """Get user's home directory while preferring a possibly set HOME environment variable """ # os.path.expanduser() behaves differently for '~' and '~someuser' as # parameters: when called with an explicit username, the possibly set # environment variable HOME is no longer respected. So we have to check if # it is set and only expand the user's home directory if HOME is unset. if os.environ.get('HOME', ''): return os.environ.get('HOME') else: return os.path.expanduser('~%s' % os.environ.get('USER', '')) def get_keys_dir(): """Determine where to repository keys and cache""" xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(get_home_dir(), '.config')) keys_dir = os.environ.get('BORG_KEYS_DIR', os.path.join(xdg_config, 'borg', 'keys')) if not os.path.exists(keys_dir): os.makedirs(keys_dir) os.chmod(keys_dir, stat.S_IRWXU) return keys_dir def get_security_dir(repository_id=None): """Determine where to store local security information.""" xdg_config = os.environ.get('XDG_CONFIG_HOME', os.path.join(get_home_dir(), '.config')) security_dir = os.environ.get('BORG_SECURITY_DIR', os.path.join(xdg_config, 'borg', 'security')) if repository_id: security_dir = os.path.join(security_dir, repository_id) if not os.path.exists(security_dir): os.makedirs(security_dir) os.chmod(security_dir, stat.S_IRWXU) return security_dir def get_cache_dir(): """Determine where to repository keys and cache""" xdg_cache = os.environ.get('XDG_CACHE_HOME', os.path.join(get_home_dir(), '.cache')) cache_dir = os.environ.get('BORG_CACHE_DIR', os.path.join(xdg_cache, 'borg')) if not os.path.exists(cache_dir): os.makedirs(cache_dir) os.chmod(cache_dir, stat.S_IRWXU) with open(os.path.join(cache_dir, CACHE_TAG_NAME), 'wb') as fd: fd.write(CACHE_TAG_CONTENTS) fd.write(textwrap.dedent(""" # This file is a cache directory tag created by Borg. # For information about cache directory tags, see: # http://www.brynosaurus.com/cachedir/ """).encode('ascii')) return cache_dir def to_localtime(ts): """Convert datetime object from UTC to local time zone""" return datetime(*time.localtime((ts - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())[:6]) def parse_timestamp(timestamp): """Parse a ISO 8601 timestamp string""" if '.' in timestamp: # microseconds might not be present return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f').replace(tzinfo=timezone.utc) else: return datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) def timestamp(s): """Convert a --timestamp=s argument to a datetime object""" try: # is it pointing to a file / directory? ts = safe_s(os.stat(s).st_mtime) return datetime.utcfromtimestamp(ts) except OSError: # didn't work, try parsing as timestamp. UTC, no TZ, no microsecs support. for format in ('%Y-%m-%dT%H:%M:%SZ', '%Y-%m-%dT%H:%M:%S+00:00', '%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S', '%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M', '%Y-%m-%d', '%Y-%j', ): try: return datetime.strptime(s, format) except ValueError: continue raise ValueError def ChunkerParams(s): if s.strip().lower() == "default": return CHUNKER_PARAMS chunk_min, chunk_max, chunk_mask, window_size = s.split(',') if int(chunk_max) > 23: raise ValueError('max. chunk size exponent must not be more than 23 (2^23 = 8MiB max. chunk size)') return int(chunk_min), int(chunk_max), int(chunk_mask), int(window_size) def dir_is_cachedir(path): """Determines whether the specified path is a cache directory (and therefore should potentially be excluded from the backup) according to the CACHEDIR.TAG protocol (http://www.brynosaurus.com/cachedir/spec.html). """ tag_path = os.path.join(path, CACHE_TAG_NAME) try: if os.path.exists(tag_path): with open(tag_path, 'rb') as tag_file: tag_data = tag_file.read(len(CACHE_TAG_CONTENTS)) if tag_data == CACHE_TAG_CONTENTS: return True except OSError: pass return False def dir_is_tagged(path, exclude_caches, exclude_if_present): """Determines whether the specified path is excluded by being a cache directory or containing user-specified tag files/directories. Returns a list of the paths of the tag files/directories (either CACHEDIR.TAG or the matching user-specified files/directories). """ tag_paths = [] if exclude_caches and dir_is_cachedir(path): tag_paths.append(os.path.join(path, CACHE_TAG_NAME)) if exclude_if_present is not None: for tag in exclude_if_present: tag_path = os.path.join(path, tag) if os.path.exists(tag_path): tag_paths.append(tag_path) return tag_paths def partial_format(format, mapping): """ Apply format.format_map(mapping) while preserving unknown keys Does not support attribute access, indexing and ![rsa] conversions """ for key, value in mapping.items(): key = re.escape(key) format = re.sub(r'(?<!\{)((\{%s\})|(\{%s:[^\}]*\}))' % (key, key), lambda match: match.group(1).format_map(mapping), format) return format class DatetimeWrapper: def __init__(self, dt): self.dt = dt def __format__(self, format_spec): if format_spec == '': format_spec = '%Y-%m-%dT%H:%M:%S' return self.dt.__format__(format_spec) def format_line(format, data): for _, key, _, conversion in Formatter().parse(format): if not key: continue if conversion or key not in data: raise InvalidPlaceholder(key, format) try: return format.format_map(data) except Exception as e: raise PlaceholderError(format, data, e.__class__.__name__, str(e)) def replace_placeholders(text): """Replace placeholders in text with their values.""" current_time = datetime.now() data = { 'pid': os.getpid(), 'fqdn': socket.getfqdn(), 'hostname': socket.gethostname(), 'now': DatetimeWrapper(current_time.now()), 'utcnow': DatetimeWrapper(current_time.utcnow()), 'user': uid2user(os.getuid(), os.getuid()), 'uuid4': str(uuid.uuid4()), 'borgversion': borg_version, 'borgmajor': '%d' % borg_version_tuple[:1], 'borgminor': '%d.%d' % borg_version_tuple[:2], 'borgpatch': '%d.%d.%d' % borg_version_tuple[:3], } return format_line(text, data) PrefixSpec = replace_placeholders HUMAN_SORT_KEYS = ['timestamp'] + list(ArchiveInfo._fields) HUMAN_SORT_KEYS.remove('ts') def SortBySpec(text): for token in text.split(','): if token not in HUMAN_SORT_KEYS: raise ValueError('Invalid sort key: %s' % token) return text.replace('timestamp', 'ts') # Not too rarely, we get crappy timestamps from the fs, that overflow some computations. # As they are crap anyway (valid filesystem timestamps always refer to the past up to # the present, but never to the future), nothing is lost if we just clamp them to the # maximum value we can support. # As long as people are using borg on 32bit platforms to access borg archives, we must # keep this value True. But we can expect that we can stop supporting 32bit platforms # well before coming close to the year 2038, so this will never be a practical problem. SUPPORT_32BIT_PLATFORMS = True # set this to False before y2038. if SUPPORT_32BIT_PLATFORMS: # second timestamps will fit into a signed int32 (platform time_t limit). # nanosecond timestamps thus will naturally fit into a signed int64. # subtract last 48h to avoid any issues that could be caused by tz calculations. # this is in the year 2038, so it is also less than y9999 (which is a datetime internal limit). # msgpack can pack up to uint64. MAX_S = 2**31-1 - 48*3600 MAX_NS = MAX_S * 1000000000 else: # nanosecond timestamps will fit into a signed int64. # subtract last 48h to avoid any issues that could be caused by tz calculations. # this is in the year 2262, so it is also less than y9999 (which is a datetime internal limit). # round down to 1e9 multiple, so MAX_NS corresponds precisely to a integer MAX_S. # msgpack can pack up to uint64. MAX_NS = (2**63-1 - 48*3600*1000000000) // 1000000000 * 1000000000 MAX_S = MAX_NS // 1000000000 def safe_s(ts): if 0 <= ts <= MAX_S: return ts elif ts < 0: return 0 else: return MAX_S def safe_ns(ts): if 0 <= ts <= MAX_NS: return ts elif ts < 0: return 0 else: return MAX_NS def safe_timestamp(item_timestamp_ns): t_ns = safe_ns(item_timestamp_ns) return datetime.fromtimestamp(t_ns / 1e9) def format_time(t): """use ISO-8601 date and time format """ return t.strftime('%a, %Y-%m-%d %H:%M:%S') def format_timedelta(td): """Format timedelta in a human friendly format """ ts = td.total_seconds() s = ts % 60 m = int(ts / 60) % 60 h = int(ts / 3600) % 24 txt = '%.2f seconds' % s if m: txt = '%d minutes %s' % (m, txt) if h: txt = '%d hours %s' % (h, txt) if td.days: txt = '%d days %s' % (td.days, txt) return txt def format_file_size(v, precision=2, sign=False): """Format file size into a human friendly format """ return sizeof_fmt_decimal(v, suffix='B', sep=' ', precision=precision, sign=sign) class FileSize(int): def __format__(self, format_spec): return format_file_size(int(self)).__format__(format_spec) def parse_file_size(s): """Return int from file size (1234, 55G, 1.7T).""" if not s: return int(s) # will raise suffix = s[-1] power = 1000 try: factor = { 'K': power, 'M': power**2, 'G': power**3, 'T': power**4, 'P': power**5, }[suffix] s = s[:-1] except KeyError: factor = 1 return int(float(s) * factor) def sizeof_fmt(num, suffix='B', units=None, power=None, sep='', precision=2, sign=False): prefix = '+' if sign and num > 0 else '' for unit in units[:-1]: if abs(round(num, precision)) < power: if isinstance(num, int): return "{}{}{}{}{}".format(prefix, num, sep, unit, suffix) else: return "{}{:3.{}f}{}{}{}".format(prefix, num, precision, sep, unit, suffix) num /= float(power) return "{}{:.{}f}{}{}{}".format(prefix, num, precision, sep, units[-1], suffix) def sizeof_fmt_iec(num, suffix='B', sep='', precision=2, sign=False): return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign, units=['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi'], power=1024) def sizeof_fmt_decimal(num, suffix='B', sep='', precision=2, sign=False): return sizeof_fmt(num, suffix=suffix, sep=sep, precision=precision, sign=sign, units=['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y'], power=1000) def format_archive(archive): return '%-36s %s [%s]' % ( archive.name, format_time(to_localtime(archive.ts)), bin_to_hex(archive.id), ) class Buffer: """ provide a thread-local buffer """ class MemoryLimitExceeded(Error, OSError): """Requested buffer size {} is above the limit of {}.""" def __init__(self, allocator, size=4096, limit=None): """ Initialize the buffer: use allocator(size) call to allocate a buffer. Optionally, set the upper <limit> for the buffer size. """ assert callable(allocator), 'must give alloc(size) function as first param' assert limit is None or size <= limit, 'initial size must be <= limit' self._thread_local = threading.local() self.allocator = allocator self.limit = limit self.resize(size, init=True) def __len__(self): return len(self._thread_local.buffer) def resize(self, size, init=False): """ resize the buffer - to avoid frequent reallocation, we usually always grow (if needed). giving init=True it is possible to first-time initialize or shrink the buffer. if a buffer size beyond the limit is requested, raise Buffer.MemoryLimitExceeded (OSError). """ size = int(size) if self.limit is not None and size > self.limit: raise Buffer.MemoryLimitExceeded(size, self.limit) if init or len(self) < size: self._thread_local.buffer = self.allocator(size) def get(self, size=None, init=False): """ return a buffer of at least the requested size (None: any current size). init=True can be given to trigger shrinking of the buffer to the given size. """ if size is not None: self.resize(size, init) return self._thread_local.buffer @lru_cache(maxsize=None) def uid2user(uid, default=None): try: return pwd.getpwuid(uid).pw_name except KeyError: return default @lru_cache(maxsize=None) def user2uid(user, default=None): try: return user and pwd.getpwnam(user).pw_uid except KeyError: return default @lru_cache(maxsize=None) def gid2group(gid, default=None): try: return grp.getgrgid(gid).gr_name except KeyError: return default @lru_cache(maxsize=None) def group2gid(group, default=None): try: return group and grp.getgrnam(group).gr_gid except KeyError: return default def posix_acl_use_stored_uid_gid(acl): """Replace the user/group field with the stored uid/gid """ entries = [] for entry in safe_decode(acl).split('\n'): if entry: fields = entry.split(':') if len(fields) == 4: entries.append(':'.join([fields[0], fields[3], fields[2]])) else: entries.append(entry) return safe_encode('\n'.join(entries)) def safe_decode(s, coding='utf-8', errors='surrogateescape'): """decode bytes to str, with round-tripping "invalid" bytes""" if s is None: return None return s.decode(coding, errors) def safe_encode(s, coding='utf-8', errors='surrogateescape'): """encode str to bytes, with round-tripping "invalid" bytes""" if s is None: return None return s.encode(coding, errors) def bin_to_hex(binary): return hexlify(binary).decode('ascii') class Location: """Object representing a repository / archive location """ proto = user = _host = port = path = archive = None # user must not contain "@", ":" or "/". # Quoting adduser error message: # "To avoid problems, the username should consist only of letters, digits, # underscores, periods, at signs and dashes, and not start with a dash # (as defined by IEEE Std 1003.1-2001)." # We use "@" as separator between username and hostname, so we must # disallow it within the pure username part. optional_user_re = r""" (?:(?P<user>[^@:/]+)@)? """ # path must not contain :: (it ends at :: or string end), but may contain single colons. # to avoid ambiguities with other regexes, it must also not start with ":" nor with "//" nor with "ssh://". scp_path_re = r""" (?!(:|//|ssh://)) # not starting with ":" or // or ssh:// (?P<path>([^:]|(:(?!:)))+) # any chars, but no "::" """ # file_path must not contain :: (it ends at :: or string end), but may contain single colons. # it must start with a / and that slash is part of the path. file_path_re = r""" (?P<path>(([^/]*)/([^:]|(:(?!:)))+)) # start opt. servername, then /, then any chars, but no "::" """ # abs_path must not contain :: (it ends at :: or string end), but may contain single colons. # it must start with a / and that slash is part of the path. abs_path_re = r""" (?P<path>(/([^:]|(:(?!:)))+)) # start with /, then any chars, but no "::" """ # optional ::archive_name at the end, archive name must not contain "/". # borg mount's FUSE filesystem creates one level of directories from # the archive names and of course "/" is not valid in a directory name. optional_archive_re = r""" (?: :: # "::" as separator (?P<archive>[^/]+) # archive name must not contain "/" )?$""" # must match until the end # regexes for misc. kinds of supported location specifiers: ssh_re = re.compile(r""" (?P<proto>ssh):// # ssh:// """ + optional_user_re + r""" # user@ (optional) (?P<host>([^:/]+|\[[0-9a-fA-F:.]+\]))(?::(?P<port>\d+))? # host or host:port or [ipv6] or [ipv6]:port """ + abs_path_re + optional_archive_re, re.VERBOSE) # path or path::archive file_re = re.compile(r""" (?P<proto>file):// # file:// """ + file_path_re + optional_archive_re, re.VERBOSE) # servername/path, path or path::archive # note: scp_re is also use for local paths scp_re = re.compile(r""" ( """ + optional_user_re + r""" # user@ (optional) (?P<host>([^:/]+|\[[0-9a-fA-F:.]+\])): # host: (don't match / or [ipv6] in host to disambiguate from file:) )? # user@host: part is optional """ + scp_path_re + optional_archive_re, re.VERBOSE) # path with optional archive # get the repo from BORG_REPO env and the optional archive from param. # if the syntax requires giving REPOSITORY (see "borg mount"), # use "::" to let it use the env var. # if REPOSITORY argument is optional, it'll automatically use the env. env_re = re.compile(r""" # the repo part is fetched from BORG_REPO (?:::$) # just "::" is ok (when a pos. arg is required, no archive) | # or """ + optional_archive_re, re.VERBOSE) # archive name (optional, may be empty) def __init__(self, text=''): self.orig = text if not self.parse(self.orig): raise ValueError('Location: parse failed: %s' % self.orig) def parse(self, text): text = replace_placeholders(text) valid = self._parse(text) if valid: return True m = self.env_re.match(text) if not m: return False repo = os.environ.get('BORG_REPO') if repo is None: return False valid = self._parse(repo) if not valid: return False self.archive = m.group('archive') return True def _parse(self, text): def normpath_special(p): # avoid that normpath strips away our relative path hack and even makes p absolute relative = p.startswith('/./') p = os.path.normpath(p) return ('/.' + p) if relative else p m = self.ssh_re.match(text) if m: self.proto = m.group('proto') self.user = m.group('user') self._host = m.group('host') self.port = m.group('port') and int(m.group('port')) or None self.path = normpath_special(m.group('path')) self.archive = m.group('archive') return True m = self.file_re.match(text) if m: self.proto = m.group('proto') self.path = normpath_special(m.group('path')) self.archive = m.group('archive') return True m = self.scp_re.match(text) if m: self.user = m.group('user') self._host = m.group('host') self.path = normpath_special(m.group('path')) self.archive = m.group('archive') self.proto = self._host and 'ssh' or 'file' return True return False def __str__(self): items = [ 'proto=%r' % self.proto, 'user=%r' % self.user, 'host=%r' % self.host, 'port=%r' % self.port, 'path=%r' % self.path, 'archive=%r' % self.archive, ] return ', '.join(items) def to_key_filename(self): name = re.sub('[^\w]', '_', self.path).strip('_') if self.proto != 'file': name = re.sub('[^\w]', '_', self.host) + '__' + name if len(name) > 100: # Limit file names to some reasonable length. Most file systems # limit them to 255 [unit of choice]; due to variations in unicode # handling we truncate to 100 *characters*. name = name[:100] return os.path.join(get_keys_dir(), name) def __repr__(self): return "Location(%s)" % self @property def host(self): # strip square brackets used for IPv6 addrs if self._host is not None: return self._host.lstrip('[').rstrip(']') def canonical_path(self): if self.proto == 'file': return self.path else: if self.path and self.path.startswith('~'): path = '/' + self.path # /~/x = path x relative to home dir elif self.path and not self.path.startswith('/'): path = '/./' + self.path # /./x = path x relative to cwd else: path = self.path return 'ssh://{}{}{}{}'.format('{}@'.format(self.user) if self.user else '', self._host, # needed for ipv6 addrs ':{}'.format(self.port) if self.port else '', path) def location_validator(archive=None): def validator(text): try: loc = Location(text) except ValueError: raise argparse.ArgumentTypeError('Invalid location format: "%s"' % text) from None if archive is True and not loc.archive: raise argparse.ArgumentTypeError('"%s": No archive specified' % text) elif archive is False and loc.archive: raise argparse.ArgumentTypeError('"%s" No archive can be specified' % text) return loc return validator def archivename_validator(): def validator(text): if '/' in text or '::' in text or not text: raise argparse.ArgumentTypeError('Invalid repository name: "%s"' % text) return text return validator def decode_dict(d, keys, encoding='utf-8', errors='surrogateescape'): for key in keys: if isinstance(d.get(key), bytes): d[key] = d[key].decode(encoding, errors) return d def prepare_dump_dict(d): def decode_bytes(value): # this should somehow be reversible later, but usual strings should # look nice and chunk ids should mostly show in hex. Use a special # inband signaling character (ASCII DEL) to distinguish between # decoded and hex mode. if not value.startswith(b'\x7f'): try: value = value.decode() return value except UnicodeDecodeError: pass return '\u007f' + bin_to_hex(value) def decode_tuple(t): res = [] for value in t: if isinstance(value, dict): value = decode(value) elif isinstance(value, tuple) or isinstance(value, list): value = decode_tuple(value) elif isinstance(value, bytes): value = decode_bytes(value) res.append(value) return res def decode(d): res = collections.OrderedDict() for key, value in d.items(): if isinstance(value, dict): value = decode(value) elif isinstance(value, (tuple, list)): value = decode_tuple(value) elif isinstance(value, bytes): value = decode_bytes(value) if isinstance(key, bytes): key = key.decode() res[key] = value return res return decode(d) def remove_surrogates(s, errors='replace'): """Replace surrogates generated by fsdecode with '?' """ return s.encode('utf-8', errors).decode('utf-8') _safe_re = re.compile(r'^((\.\.)?/+)+') def make_path_safe(path): """Make path safe by making it relative and local """ return _safe_re.sub('', path) or '.' def daemonize(): """Detach process from controlling terminal and run in background """ pid = os.fork() if pid: os._exit(0) os.setsid() pid = os.fork() if pid: os._exit(0) os.chdir('/') os.close(0) os.close(1) os.close(2) fd = os.open(os.devnull, os.O_RDWR) os.dup2(fd, 0) os.dup2(fd, 1) os.dup2(fd, 2) class StableDict(dict): """A dict subclass with stable items() ordering""" def items(self): return sorted(super().items()) def bigint_to_int(mtime): """Convert bytearray to int """ if isinstance(mtime, bytes): return int.from_bytes(mtime, 'little', signed=True) return mtime def int_to_bigint(value): """Convert integers larger than 64 bits to bytearray Smaller integers are left alone """ if value.bit_length() > 63: return value.to_bytes((value.bit_length() + 9) // 8, 'little', signed=True) return value def is_slow_msgpack(): return msgpack.Packer is msgpack.fallback.Packer FALSISH = ('No', 'NO', 'no', 'N', 'n', '0', ) TRUISH = ('Yes', 'YES', 'yes', 'Y', 'y', '1', ) DEFAULTISH = ('Default', 'DEFAULT', 'default', 'D', 'd', '', ) def yes(msg=None, false_msg=None, true_msg=None, default_msg=None, retry_msg=None, invalid_msg=None, env_msg='{} (from {})', falsish=FALSISH, truish=TRUISH, defaultish=DEFAULTISH, default=False, retry=True, env_var_override=None, ofile=None, input=input, prompt=True, msgid=None): """Output <msg> (usually a question) and let user input an answer. Qualifies the answer according to falsish, truish and defaultish as True, False or <default>. If it didn't qualify and retry is False (no retries wanted), return the default [which defaults to False]. If retry is True let user retry answering until answer is qualified. If env_var_override is given and this var is present in the environment, do not ask the user, but just use the env var contents as answer as if it was typed in. Otherwise read input from stdin and proceed as normal. If EOF is received instead an input or an invalid input without retry possibility, return default. :param msg: introducing message to output on ofile, no \n is added [None] :param retry_msg: retry message to output on ofile, no \n is added [None] :param false_msg: message to output before returning False [None] :param true_msg: message to output before returning True [None] :param default_msg: message to output before returning a <default> [None] :param invalid_msg: message to output after a invalid answer was given [None] :param env_msg: message to output when using input from env_var_override ['{} (from {})'], needs to have 2 placeholders for answer and env var name :param falsish: sequence of answers qualifying as False :param truish: sequence of answers qualifying as True :param defaultish: sequence of answers qualifying as <default> :param default: default return value (defaultish answer was given or no-answer condition) [False] :param retry: if True and input is incorrect, retry. Otherwise return default. [True] :param env_var_override: environment variable name [None] :param ofile: output stream [sys.stderr] :param input: input function [input from builtins] :return: boolean answer value, True or False """ def output(msg, msg_type, is_prompt=False, **kwargs): json_output = getattr(logging.getLogger('borg'), 'json', False) if json_output: kwargs.update(dict( type='question_%s' % msg_type, msgid=msgid, message=msg, )) print(json.dumps(kwargs), file=sys.stderr) else: if is_prompt: print(msg, file=ofile, end='', flush=True) else: print(msg, file=ofile) msgid = msgid or env_var_override # note: we do not assign sys.stderr as default above, so it is # really evaluated NOW, not at function definition time. if ofile is None: ofile = sys.stderr if default not in (True, False): raise ValueError("invalid default value, must be True or False") if msg: output(msg, 'prompt', is_prompt=True) while True: answer = None if env_var_override: answer = os.environ.get(env_var_override) if answer is not None and env_msg: output(env_msg.format(answer, env_var_override), 'env_answer', env_var=env_var_override) if answer is None: if not prompt: return default try: answer = input() except EOFError: # avoid defaultish[0], defaultish could be empty answer = truish[0] if default else falsish[0] if answer in defaultish: if default_msg: output(default_msg, 'accepted_default') return default if answer in truish: if true_msg: output(true_msg, 'accepted_true') return True if answer in falsish: if false_msg: output(false_msg, 'accepted_false') return False # if we get here, the answer was invalid if invalid_msg: output(invalid_msg, 'invalid_answer') if not retry: return default if retry_msg: output(retry_msg, 'prompt_retry', is_prompt=True) # in case we used an environment variable and it gave an invalid answer, do not use it again: env_var_override = None def hostname_is_unique(): return yes(env_var_override='BORG_HOSTNAME_IS_UNIQUE', prompt=False, env_msg=None, default=True) def ellipsis_truncate(msg, space): """ shorten a long string by adding ellipsis between it and return it, example: this_is_a_very_long_string -------> this_is..._string """ from .platform import swidth ellipsis_width = swidth('...') msg_width = swidth(msg) if space < 8: # if there is very little space, just show ... return '...' + ' ' * (space - ellipsis_width) if space < ellipsis_width + msg_width: return '%s...%s' % (swidth_slice(msg, space // 2 - ellipsis_width), swidth_slice(msg, -space // 2)) return msg + ' ' * (space - msg_width) class ProgressIndicatorBase: LOGGER = 'borg.output.progress' JSON_TYPE = None json = False operation_id_counter = 0 @classmethod def operation_id(cls): """Unique number, can be used by receiving applications to distinguish different operations.""" cls.operation_id_counter += 1 return cls.operation_id_counter def __init__(self, msgid=None): self.handler = None self.logger = logging.getLogger(self.LOGGER) self.id = self.operation_id() self.msgid = msgid # If there are no handlers, set one up explicitly because the # terminator and propagation needs to be set. If there are, # they must have been set up by BORG_LOGGING_CONF: skip setup. if not self.logger.handlers: self.handler = logging.StreamHandler(stream=sys.stderr) self.handler.setLevel(logging.INFO) logger = logging.getLogger('borg') # Some special attributes on the borg logger, created by setup_logging # But also be able to work without that try: formatter = logger.formatter terminator = '\n' if logger.json else '\r' self.json = logger.json except AttributeError: terminator = '\r' else: self.handler.setFormatter(formatter) self.handler.terminator = terminator self.logger.addHandler(self.handler) if self.logger.level == logging.NOTSET: self.logger.setLevel(logging.WARN) self.logger.propagate = False # If --progress is not set then the progress logger level will be WARN # due to setup_implied_logging (it may be NOTSET with a logging config file, # but the interactions there are generally unclear), so self.emit becomes # False, which is correct. # If --progress is set then the level will be INFO as per setup_implied_logging; # note that this is always the case for serve processes due to a "args.progress |= is_serve". # In this case self.emit is True. self.emit = self.logger.getEffectiveLevel() == logging.INFO def __del__(self): if self.handler is not None: self.logger.removeHandler(self.handler) self.handler.close() def output_json(self, *, finished=False, **kwargs): assert self.json if not self.emit: return kwargs.update(dict( operation=self.id, msgid=self.msgid, type=self.JSON_TYPE, finished=finished, time=time.time(), )) print(json.dumps(kwargs), file=sys.stderr) def finish(self): if self.json: self.output_json(finished=True) else: self.output('') def justify_to_terminal_size(message): terminal_space = get_terminal_size(fallback=(-1, -1))[0] # justify only if we are outputting to a terminal if terminal_space != -1: return message.ljust(terminal_space) return message class ProgressIndicatorMessage(ProgressIndicatorBase): JSON_TYPE = 'progress_message' def output(self, msg): if self.json: self.output_json(message=msg) else: self.logger.info(justify_to_terminal_size(msg)) class ProgressIndicatorPercent(ProgressIndicatorBase): JSON_TYPE = 'progress_percent' def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None): """ Percentage-based progress indicator :param total: total amount of items :param step: step size in percent :param start: at which percent value to start :param msg: output message, must contain one %f placeholder for the percentage """ self.counter = 0 # 0 .. (total-1) self.total = total self.trigger_at = start # output next percentage value when reaching (at least) this self.step = step self.msg = msg super().__init__(msgid=msgid) def progress(self, current=None, increase=1): if current is not None: self.counter = current pct = self.counter * 100 / self.total self.counter += increase if pct >= self.trigger_at: self.trigger_at += self.step return pct def show(self, current=None, increase=1, info=None): """ Show and output the progress message :param current: set the current percentage [None] :param increase: increase the current percentage [None] :param info: array of strings to be formatted with msg [None] """ pct = self.progress(current, increase) if pct is not None: # truncate the last argument, if no space is available if info is not None: if not self.json: # no need to truncate if we're not outputing to a terminal terminal_space = get_terminal_size(fallback=(-1, -1))[0] if terminal_space != -1: space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + [''])) info[-1] = ellipsis_truncate(info[-1], space) return self.output(self.msg % tuple([pct] + info), justify=False, info=info) return self.output(self.msg % pct) def output(self, message, justify=True, info=None): if self.json: self.output_json(message=message, current=self.counter, total=self.total, info=info) else: if justify: message = justify_to_terminal_size(message) self.logger.info(message) class ProgressIndicatorEndless: def __init__(self, step=10, file=None): """ Progress indicator (long row of dots) :param step: every Nth call, call the func :param file: output file, default: sys.stderr """ self.counter = 0 # call counter self.triggered = 0 # increases 1 per trigger event self.step = step # trigger every <step> calls if file is None: file = sys.stderr self.file = file def progress(self): self.counter += 1 trigger = self.counter % self.step == 0 if trigger: self.triggered += 1 return trigger def show(self): trigger = self.progress() if trigger: return self.output(self.triggered) def output(self, triggered): print('.', end='', file=self.file, flush=True) def finish(self): print(file=self.file) def sysinfo(): info = [] info.append('Platform: %s' % (' '.join(platform.uname()), )) if sys.platform.startswith('linux'): info.append('Linux: %s %s %s' % platform.linux_distribution()) info.append('Borg: %s Python: %s %s' % (borg_version, platform.python_implementation(), platform.python_version())) info.append('PID: %d CWD: %s' % (os.getpid(), os.getcwd())) info.append('sys.argv: %r' % sys.argv) info.append('SSH_ORIGINAL_COMMAND: %r' % os.environ.get('SSH_ORIGINAL_COMMAND')) info.append('') return '\n'.join(info) def log_multi(*msgs, level=logging.INFO, logger=logger): """ log multiple lines of text, each line by a separate logging call for cosmetic reasons each positional argument may be a single or multiple lines (separated by newlines) of text. """ lines = [] for msg in msgs: lines.extend(msg.splitlines()) for line in lines: logger.log(level, line) class BaseFormatter: FIXED_KEYS = { # Formatting aids 'LF': '\n', 'SPACE': ' ', 'TAB': '\t', 'CR': '\r', 'NUL': '\0', 'NEWLINE': os.linesep, 'NL': os.linesep, } def get_item_data(self, item): raise NotImplementedError def format_item(self, item): return self.format.format_map(self.get_item_data(item)) @staticmethod def keys_help(): return " - NEWLINE: OS dependent line separator\n" \ " - NL: alias of NEWLINE\n" \ " - NUL: NUL character for creating print0 / xargs -0 like output, see barchive/bpath\n" \ " - SPACE\n" \ " - TAB\n" \ " - CR\n" \ " - LF" class ArchiveFormatter(BaseFormatter): def __init__(self, format): self.format = partial_format(format, self.FIXED_KEYS) def get_item_data(self, archive): return { # *name* is the key used by borg-info for the archive name, this makes the formats more compatible 'name': remove_surrogates(archive.name), 'barchive': archive.name, 'archive': remove_surrogates(archive.name), 'id': bin_to_hex(archive.id), 'time': format_time(to_localtime(archive.ts)), # *start* is the key used by borg-info for this timestamp, this makes the formats more compatible 'start': format_time(to_localtime(archive.ts)), } @staticmethod def keys_help(): return " - archive, name: archive name interpreted as text (might be missing non-text characters, see barchive)\n" \ " - barchive: verbatim archive name, can contain any character except NUL\n" \ " - time: time of creation of the archive\n" \ " - id: internal ID of the archive" class ItemFormatter(BaseFormatter): KEY_DESCRIPTIONS = { 'bpath': 'verbatim POSIX path, can contain any character except NUL', 'path': 'path interpreted as text (might be missing non-text characters, see bpath)', 'source': 'link target for links (identical to linktarget)', 'extra': 'prepends {source} with " -> " for soft links and " link to " for hard links', 'csize': 'compressed size', 'dsize': 'deduplicated size', 'dcsize': 'deduplicated compressed size', 'num_chunks': 'number of chunks in this file', 'unique_chunks': 'number of unique chunks in this file', 'health': 'either "healthy" (file ok) or "broken" (if file has all-zero replacement chunks)', } KEY_GROUPS = ( ('type', 'mode', 'uid', 'gid', 'user', 'group', 'path', 'bpath', 'source', 'linktarget', 'flags'), ('size', 'csize', 'dsize', 'dcsize', 'num_chunks', 'unique_chunks'), ('mtime', 'ctime', 'atime', 'isomtime', 'isoctime', 'isoatime'), tuple(sorted(hashlib.algorithms_guaranteed)), ('archiveid', 'archivename', 'extra'), ('health', ) ) KEYS_REQUIRING_CACHE = ( 'dsize', 'dcsize', 'unique_chunks', ) @classmethod def available_keys(cls): class FakeArchive: fpr = name = "" from .item import Item fake_item = Item(mode=0, path='', user='', group='', mtime=0, uid=0, gid=0) formatter = cls(FakeArchive, "") keys = [] keys.extend(formatter.call_keys.keys()) keys.extend(formatter.get_item_data(fake_item).keys()) return keys @classmethod def keys_help(cls): help = [] keys = cls.available_keys() for key in cls.FIXED_KEYS: keys.remove(key) for group in cls.KEY_GROUPS: for key in group: keys.remove(key) text = " - " + key if key in cls.KEY_DESCRIPTIONS: text += ": " + cls.KEY_DESCRIPTIONS[key] help.append(text) help.append("") assert not keys, str(keys) return "\n".join(help) @classmethod def format_needs_cache(cls, format): format_keys = {f[1] for f in Formatter().parse(format)} return any(key in cls.KEYS_REQUIRING_CACHE for key in format_keys) def __init__(self, archive, format, *, json_lines=False): self.archive = archive self.json_lines = json_lines static_keys = { 'archivename': archive.name, 'archiveid': archive.fpr, } static_keys.update(self.FIXED_KEYS) self.format = partial_format(format, static_keys) self.format_keys = {f[1] for f in Formatter().parse(format)} self.call_keys = { 'size': self.calculate_size, 'csize': self.calculate_csize, 'dsize': partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.size), 'dcsize': partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.csize), 'num_chunks': self.calculate_num_chunks, 'unique_chunks': partial(self.sum_unique_chunks_metadata, lambda chunk: 1), 'isomtime': partial(self.format_time, 'mtime'), 'isoctime': partial(self.format_time, 'ctime'), 'isoatime': partial(self.format_time, 'atime'), 'mtime': partial(self.time, 'mtime'), 'ctime': partial(self.time, 'ctime'), 'atime': partial(self.time, 'atime'), } for hash_function in hashlib.algorithms_guaranteed: self.add_key(hash_function, partial(self.hash_item, hash_function)) self.used_call_keys = set(self.call_keys) & self.format_keys if self.json_lines: self.item_data = {} self.format_item = self.format_item_json else: self.item_data = static_keys def format_item_json(self, item): return json.dumps(self.get_item_data(item)) + '\n' def add_key(self, key, callable_with_item): self.call_keys[key] = callable_with_item self.used_call_keys = set(self.call_keys) & self.format_keys def get_item_data(self, item): mode = stat.filemode(item.mode) item_type = mode[0] item_data = self.item_data source = item.get('source', '') extra = '' if source: source = remove_surrogates(source) if item_type == 'l': extra = ' -> %s' % source else: mode = 'h' + mode[1:] extra = ' link to %s' % source item_data['type'] = item_type item_data['mode'] = mode item_data['user'] = item.user or item.uid item_data['group'] = item.group or item.gid item_data['uid'] = item.uid item_data['gid'] = item.gid item_data['path'] = remove_surrogates(item.path) if self.json_lines: item_data['healthy'] = 'chunks_healthy' not in item else: item_data['bpath'] = item.path item_data['extra'] = extra item_data['health'] = 'broken' if 'chunks_healthy' in item else 'healthy' item_data['source'] = source item_data['linktarget'] = source item_data['flags'] = item.get('bsdflags') for key in self.used_call_keys: item_data[key] = self.call_keys[key](item) return item_data def sum_unique_chunks_metadata(self, metadata_func, item): """ sum unique chunks metadata, a unique chunk is a chunk which is referenced globally as often as it is in the item item: The item to sum its unique chunks' metadata metadata_func: A function that takes a parameter of type ChunkIndexEntry and returns a number, used to return the metadata needed from the chunk """ chunk_index = self.archive.cache.chunks chunks = item.get('chunks', []) chunks_counter = Counter(c.id for c in chunks) return sum(metadata_func(c) for c in chunks if chunk_index[c.id].refcount == chunks_counter[c.id]) def calculate_num_chunks(self, item): return len(item.get('chunks', [])) def calculate_size(self, item): # note: does not support hardlink slaves, they will be size 0 return item.get_size(compressed=False) def calculate_csize(self, item): # note: does not support hardlink slaves, they will be csize 0 return item.get_size(compressed=True) def hash_item(self, hash_function, item): if 'chunks' not in item: return "" hash = hashlib.new(hash_function) for data in self.archive.pipeline.fetch_many([c.id for c in item.chunks]): hash.update(data) return hash.hexdigest() def format_time(self, key, item): return format_time(safe_timestamp(item.get(key) or item.mtime)) def time(self, key, item): return safe_timestamp(item.get(key) or item.mtime) class ChunkIteratorFileWrapper: """File-like wrapper for chunk iterators""" def __init__(self, chunk_iterator, read_callback=None): """ *chunk_iterator* should be an iterator yielding bytes. These will be buffered internally as necessary to satisfy .read() calls. *read_callback* will be called with one argument, some byte string that has just been read and will be subsequently returned to a caller of .read(). It can be used to update a progress display. """ self.chunk_iterator = chunk_iterator self.chunk_offset = 0 self.chunk = b'' self.exhausted = False self.read_callback = read_callback def _refill(self): remaining = len(self.chunk) - self.chunk_offset if not remaining: try: chunk = next(self.chunk_iterator) self.chunk = memoryview(chunk) except StopIteration: self.exhausted = True return 0 # EOF self.chunk_offset = 0 remaining = len(self.chunk) return remaining def _read(self, nbytes): if not nbytes: return b'' remaining = self._refill() will_read = min(remaining, nbytes) self.chunk_offset += will_read return self.chunk[self.chunk_offset - will_read:self.chunk_offset] def read(self, nbytes): parts = [] while nbytes and not self.exhausted: read_data = self._read(nbytes) nbytes -= len(read_data) parts.append(read_data) if self.read_callback: self.read_callback(read_data) return b''.join(parts) def open_item(archive, item): """Return file-like object for archived item (with chunks).""" chunk_iterator = archive.pipeline.fetch_many([c.id for c in item.chunks]) return ChunkIteratorFileWrapper(chunk_iterator) def file_status(mode): if stat.S_ISREG(mode): return 'A' elif stat.S_ISDIR(mode): return 'd' elif stat.S_ISBLK(mode): return 'b' elif stat.S_ISCHR(mode): return 'c' elif stat.S_ISLNK(mode): return 's' elif stat.S_ISFIFO(mode): return 'f' return '?' def hardlinkable(mode): """return True if we support hardlinked items of this type""" return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode) def chunkit(it, size): """ Chunk an iterator <it> into pieces of <size>. >>> list(chunker('ABCDEFG', 3)) [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] """ iterable = iter(it) return iter(lambda: list(islice(iterable, size)), []) def consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(islice(iterator, n, n), None) # GenericDirEntry, scandir_generic (c) 2012 Ben Hoyt # from the python-scandir package (3-clause BSD license, just like us, so no troubles here) # note: simplified version class GenericDirEntry: __slots__ = ('name', '_scandir_path', '_path') def __init__(self, scandir_path, name): self._scandir_path = scandir_path self.name = name self._path = None @property def path(self): if self._path is None: self._path = os.path.join(self._scandir_path, self.name) return self._path def stat(self, follow_symlinks=True): assert not follow_symlinks return os.stat(self.path, follow_symlinks=follow_symlinks) def _check_type(self, type): st = self.stat(False) return stat.S_IFMT(st.st_mode) == type def is_dir(self, follow_symlinks=True): assert not follow_symlinks return self._check_type(stat.S_IFDIR) def is_file(self, follow_symlinks=True): assert not follow_symlinks return self._check_type(stat.S_IFREG) def is_symlink(self): return self._check_type(stat.S_IFLNK) def inode(self): st = self.stat(False) return st.st_ino def __repr__(self): return '<{0}: {1!r}>'.format(self.__class__.__name__, self.path) def scandir_generic(path='.'): """Like os.listdir(), but yield DirEntry objects instead of returning a list of names.""" for name in sorted(os.listdir(path)): yield GenericDirEntry(path, name) try: from os import scandir except ImportError: try: # Try python-scandir on Python 3.4 from scandir import scandir except ImportError: # If python-scandir is not installed, then use a version that is just as slow as listdir. scandir = scandir_generic def scandir_inorder(path='.'): return sorted(scandir(path), key=lambda dirent: dirent.inode()) def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True): """ clean lines (usually read from a config file): 1. strip whitespace (left and right), 2. remove empty lines, 3. remove comments. note: only "pure comment lines" are supported, no support for "trailing comments". :param lines: input line iterator (e.g. list or open text file) that gives unclean input lines :param lstrip: lstrip call arguments or False, if lstripping is not desired :param rstrip: rstrip call arguments or False, if rstripping is not desired :param remove_comments: remove comment lines (lines starting with "#") :param remove_empty: remove empty lines :return: yields processed lines """ for line in lines: if lstrip is not False: line = line.lstrip(lstrip) if rstrip is not False: line = line.rstrip(rstrip) if remove_empty and not line: continue if remove_comments and line.startswith('#'): continue yield line class ErrorIgnoringTextIOWrapper(io.TextIOWrapper): def read(self, n): if not self.closed: try: return super().read(n) except BrokenPipeError: try: super().close() except OSError: pass return '' def write(self, s): if not self.closed: try: return super().write(s) except BrokenPipeError: try: super().close() except OSError: pass return len(s) class SignalException(BaseException): """base class for all signal-based exceptions""" class SigHup(SignalException): """raised on SIGHUP signal""" class SigTerm(SignalException): """raised on SIGTERM signal""" @contextlib.contextmanager def signal_handler(sig, handler): """ when entering context, set up signal handler <handler> for signal <sig>. when leaving context, restore original signal handler. <sig> can bei either a str when giving a signal.SIGXXX attribute name (it won't crash if the attribute name does not exist as some names are platform specific) or a int, when giving a signal number. <handler> is any handler value as accepted by the signal.signal(sig, handler). """ if isinstance(sig, str): sig = getattr(signal, sig, None) if sig is not None: orig_handler = signal.signal(sig, handler) try: yield finally: if sig is not None: signal.signal(sig, orig_handler) def raising_signal_handler(exc_cls): def handler(sig_no, frame): # setting SIG_IGN avoids that an incoming second signal of this # kind would raise a 2nd exception while we still process the # exception handler for exc_cls for the 1st signal. signal.signal(sig_no, signal.SIG_IGN) raise exc_cls return handler def swidth_slice(string, max_width): """ Return a slice of *max_width* cells from *string*. Negative *max_width* means from the end of string. *max_width* is in units of character cells (or "columns"). Latin characters are usually one cell wide, many CJK characters are two cells wide. """ from .platform import swidth reverse = max_width < 0 max_width = abs(max_width) if reverse: string = reversed(string) current_swidth = 0 result = [] for character in string: current_swidth += swidth(character) if current_swidth > max_width: break result.append(character) if reverse: result.reverse() return ''.join(result) class BorgJsonEncoder(json.JSONEncoder): def default(self, o): from .repository import Repository from .remote import RemoteRepository from .archive import Archive from .cache import Cache if isinstance(o, Repository) or isinstance(o, RemoteRepository): return { 'id': bin_to_hex(o.id), 'location': o._location.canonical_path(), } if isinstance(o, Archive): return o.info() if isinstance(o, Cache): return { 'path': o.path, 'stats': o.stats(), } return super().default(o) def basic_json_data(manifest, *, cache=None, extra=None): key = manifest.key data = extra or {} data.update({ 'repository': BorgJsonEncoder().default(manifest.repository), 'encryption': { 'mode': key.ARG_NAME, }, }) data['repository']['last_modified'] = format_time(to_localtime(manifest.last_timestamp.replace(tzinfo=timezone.utc))) if key.NAME.startswith('key file'): data['encryption']['keyfile'] = key.find_key() if cache: data['cache'] = cache return data def json_dump(obj): """Dump using BorgJSONEncoder.""" return json.dumps(obj, sort_keys=True, indent=4, cls=BorgJsonEncoder) def json_print(obj): print(json_dump(obj)) def secure_erase(path): """Attempt to securely erase a file by writing random data over it before deleting it.""" with open(path, 'r+b') as fd: length = os.stat(fd.fileno()).st_size fd.write(os.urandom(length)) fd.flush() os.fsync(fd.fileno()) os.unlink(path) def truncate_and_unlink(path): """ Truncate and then unlink *path*. Do not create *path* if it does not exist. Open *path* for truncation in r+b mode (=O_RDWR|O_BINARY). Use this when deleting potentially large files when recovering from a VFS error such as ENOSPC. It can help a full file system recover. Refer to the "File system interaction" section in repository.py for further explanations. """ with open(path, 'r+b') as fd: fd.truncate() os.unlink(path) def popen_with_error_handling(cmd_line: str, log_prefix='', **kwargs): """ Handle typical errors raised by subprocess.Popen. Return None if an error occurred, otherwise return the Popen object. *cmd_line* is split using shlex (e.g. 'gzip -9' => ['gzip', '-9']). Log messages will be prefixed with *log_prefix*; if set, it should end with a space (e.g. log_prefix='--some-option: '). Does not change the exit code. """ assert not kwargs.get('shell'), 'Sorry pal, shell mode is a no-no' try: command = shlex.split(cmd_line) if not command: raise ValueError('an empty command line is not permitted') except ValueError as ve: logger.error('%s%s', log_prefix, ve) return logger.debug('%scommand line: %s', log_prefix, command) try: return subprocess.Popen(command, **kwargs) except FileNotFoundError: logger.error('%sexecutable not found: %s', log_prefix, command[0]) return except PermissionError: logger.error('%spermission denied: %s', log_prefix, command[0]) return
python
#!/usr/bin/env python ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Test read/write functionality for USGSDEM driver. # Author: Even Rouault <even dot rouault at mines dash paris dot org> # ############################################################################### # Copyright (c) 2008-2011, Even Rouault <even dot rouault at mines-paris dot org> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import os import sys from osgeo import gdal from osgeo import osr sys.path.append('../pymod') import gdaltest ############################################################################### # Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/022gdeme def usgsdem_1(): tst = gdaltest.GDALTest('USGSDEM', '022gdeme_truncated', 1, 1583) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('NAD27') return tst.testOpen(check_prj=srs.ExportToWkt(), check_gt=(-67.00041667, 0.00083333, 0.0, 50.000416667, 0.0, -0.00083333)) ############################################################################### # Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/114p01_0100_deme.dem def usgsdem_2(): tst = gdaltest.GDALTest('USGSDEM', '114p01_0100_deme_truncated.dem', 1, 53864) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('NAD27') return tst.testOpen(check_prj=srs.ExportToWkt(), check_gt=(-136.25010416667, 0.000208333, 0.0, 59.25010416667, 0.0, -0.000208333)) ############################################################################### # Test truncated version of file that triggered bug #2348 def usgsdem_3(): tst = gdaltest.GDALTest('USGSDEM', '39079G6_truncated.dem', 1, 61424) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('WGS72') srs.SetUTM(17) return tst.testOpen(check_prj=srs.ExportToWkt(), check_gt=(606855.0, 30.0, 0.0, 4414605.0, 0.0, -30.0)) ############################################################################### # Test CreateCopy() def usgsdem_4(): tst = gdaltest.GDALTest('USGSDEM', '39079G6_truncated.dem', 1, 61424, options=['RESAMPLE=Nearest']) return tst.testCreateCopy(check_gt=1, check_srs=1, vsimem=1) ############################################################################### # Test CreateCopy() without any creation options def usgsdem_5(): ds = gdal.Open('data/n43.dt0') ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/n43.dem', ds, options=['RESAMPLE=Nearest']) if ds.GetRasterBand(1).Checksum() != ds2.GetRasterBand(1).Checksum(): gdaltest.post_reason('Bad checksum.') print(ds2.GetRasterBand(1).Checksum()) print(ds.GetRasterBand(1).Checksum()) ds2 = None print(open('tmp/n43.dem', 'rb').read()) return 'fail' gt1 = ds.GetGeoTransform() gt2 = ds2.GetGeoTransform() for i in range(6): if abs(gt1[i] - gt2[i]) > 1e-5: print('') print('old = ', gt1) print('new = ', gt2) gdaltest.post_reason('Geotransform differs.') return 'fail' srs = osr.SpatialReference() srs.SetWellKnownGeogCS('WGS84') if ds2.GetProjectionRef() != srs.ExportToWkt(): gdaltest.post_reason('Bad SRS.') return 'fail' ds2 = None return 'success' ############################################################################### # Test CreateCopy() without a few creation options. Then create a new copy with TEMPLATE # creation option and check that both files are binary identical. def usgsdem_6(): ds = gdal.Open('data/n43.dt0') ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/file_1.dem', ds, options=['PRODUCER=GDAL', 'OriginCode=GDAL', 'ProcessCode=A', 'RESAMPLE=Nearest']) ds3 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/file_2.dem', ds2, options=['TEMPLATE=tmp/file_1.dem', 'RESAMPLE=Nearest']) del ds2 del ds3 f1 = open('tmp/file_1.dem', 'rb') f2 = open('tmp/file_2.dem', 'rb') # Skip the 40 first bytes because the dataset name will differ f1.seek(40, 0) f2.seek(40, 0) data1 = f1.read() data2 = f2.read() if data1 != data2: return 'fail' f1.close() f2.close() return 'success' ############################################################################### # Test CreateCopy() with CDED50K profile def usgsdem_7(): ds = gdal.Open('data/n43.dt0') # To avoid warning about 'Unable to find NTS mapsheet lookup file: NTS-50kindex.csv' gdal.PushErrorHandler('CPLQuietErrorHandler') ds2 = gdal.GetDriverByName('USGSDEM').CreateCopy('tmp/000a00DEMz', ds, options=['PRODUCT=CDED50K', 'TOPLEFT=80w,44n', 'RESAMPLE=Nearest', 'ZRESOLUTION=1.1', 'INTERNALNAME=GDAL']) gdal.PopErrorHandler() if ds2.RasterXSize != 1201 or ds2.RasterYSize != 1201: gdaltest.post_reason('Bad image dimensions.') print(ds2.RasterXSize) print(ds2.RasterYSize) return 'fail' expected_gt = (-80.000104166666674, 0.000208333333333, 0, 44.000104166666667, 0, -0.000208333333333) got_gt = ds2.GetGeoTransform() for i in range(6): if abs(expected_gt[i] - got_gt[i]) > 1e-5: print('') print('expected = ', expected_gt) print('got = ', got_gt) gdaltest.post_reason('Geotransform differs.') return 'fail' srs = osr.SpatialReference() srs.SetWellKnownGeogCS('NAD83') if ds2.GetProjectionRef() != srs.ExportToWkt(): gdaltest.post_reason('Bad SRS.') return 'fail' ds2 = None return 'success' ############################################################################### # Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/various.zip/39109h1.dem # Undocumented format def usgsdem_8(): tst = gdaltest.GDALTest('USGSDEM', '39109h1_truncated.dem', 1, 39443) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('NAD27') srs.SetUTM(12) return tst.testOpen(check_prj=srs.ExportToWkt(), check_gt=(660055.0, 10.0, 0.0, 4429465.0, 0.0, -10.0)) ############################################################################### # Test truncated version of http://download.osgeo.org/gdal/data/usgsdem/various.zip/4619old.dem # Old format def usgsdem_9(): tst = gdaltest.GDALTest('USGSDEM', '4619old_truncated.dem', 1, 10659) srs = osr.SpatialReference() srs.SetWellKnownGeogCS('NAD27') return tst.testOpen(check_prj=srs.ExportToWkt(), check_gt=(18.99958333, 0.0008333, 0.0, 47.000416667, 0.0, -0.0008333)) ############################################################################### # https://github.com/OSGeo/gdal/issues/583 def usgsdem_with_extra_values_at_end_of_profile(): tst = gdaltest.GDALTest('USGSDEM', 'usgsdem_with_extra_values_at_end_of_profile.dem', 1, 56679) return tst.testOpen() ############################################################################### # Like Novato.dem of https://trac.osgeo.org/gdal/ticket/4901 def usgsdem_with_spaces_after_byte_864(): tst = gdaltest.GDALTest('USGSDEM', 'usgsdem_with_spaces_after_byte_864.dem', 1, 61078) return tst.testOpen() ############################################################################### # Cleanup def usgsdem_cleanup(): try: os.remove('tmp/n43.dem') os.remove('tmp/n43.dem.aux.xml') os.remove('tmp/file_1.dem') os.remove('tmp/file_1.dem.aux.xml') os.remove('tmp/file_2.dem') os.remove('tmp/file_2.dem.aux.xml') os.remove('tmp/000a00DEMz') os.remove('tmp/000a00DEMz.aux.xml') except OSError: pass return 'success' gdaltest_list = [ usgsdem_1, usgsdem_2, usgsdem_3, usgsdem_4, usgsdem_5, usgsdem_6, usgsdem_7, usgsdem_8, usgsdem_9, usgsdem_with_extra_values_at_end_of_profile, usgsdem_with_spaces_after_byte_864, usgsdem_cleanup] if __name__ == '__main__': gdaltest.setup_run('usgsdem') gdaltest.run_tests(gdaltest_list) gdaltest.summarize()
python
import tensorflow as tf class GLU(tf.keras.layers.Layer): def __init__(self, **kwargs): super().__init__(**kwargs) def call(self, inputs, **kwargs): channels = tf.shape(inputs)[-1] nb_split_channels = channels // 2 x_1 = inputs[:, :, :, :nb_split_channels] x_2 = inputs[:, :, :, nb_split_channels:] return x_1 * tf.nn.sigmoid(x_2)
python
import aiohttp import asyncio import sys import json import argparse async def upload_cast_info(session, addr, cast): async with session.post(addr + "/wrk2-api/cast-info/write", json=cast) as resp: return await resp.text() async def upload_plot(session, addr, plot): async with session.post(addr + "/wrk2-api/plot/write", json=plot) as resp: return await resp.text() async def upload_movie_info(session, addr, movie): async with session.post(addr + "/wrk2-api/movie-info/write", json=movie) as resp: return await resp.text() async def register_movie(session, addr, movie): params = { "title": movie["title"], "movie_id": movie["movie_id"] } async with session.post(addr + "/wrk2-api/movie/register", data=params) as resp: return await resp.text() async def write_cast_info(addr, raw_casts): idx = 0 tasks = [] conn = aiohttp.TCPConnector(limit=200) async with aiohttp.ClientSession(connector=conn) as session: for raw_cast in raw_casts: try: cast = dict() cast["cast_info_id"] = raw_cast["id"] cast["name"] = raw_cast["name"] cast["gender"] = True if raw_cast["gender"] == 2 else False cast["intro"] = raw_cast["biography"] task = asyncio.ensure_future(upload_cast_info(session, addr, cast)) tasks.append(task) idx += 1 except: print("Warning: cast info missing!") if idx % 200 == 0: resps = await asyncio.gather(*tasks) print(idx, "casts finished") resps = await asyncio.gather(*tasks) print(idx, "casts finished") async def write_movie_info(addr, raw_movies): idx = 0 tasks = [] conn = aiohttp.TCPConnector(limit=200) async with aiohttp.ClientSession(connector=conn) as session: for raw_movie in raw_movies: movie = dict() casts = list() movie["movie_id"] = str(raw_movie["id"]) movie["title"] = raw_movie["title"] movie["plot_id"] = raw_movie["id"] for raw_cast in raw_movie["cast"]: try: cast = dict() cast["cast_id"] = raw_cast["cast_id"] cast["character"] = raw_cast["character"] cast["cast_info_id"] = raw_cast["id"] casts.append(cast) except: print("Warning: cast info missing!") movie["casts"] = casts movie["thumbnail_ids"] = [raw_movie["poster_path"]] movie["photo_ids"] = [] movie["video_ids"] = [] movie["avg_rating"] = raw_movie["vote_average"] movie["num_rating"] = raw_movie["vote_count"] task = asyncio.ensure_future(upload_movie_info(session, addr, movie)) tasks.append(task) plot = dict() plot["plot_id"] = raw_movie["id"] plot["plot"] = raw_movie["overview"] task = asyncio.ensure_future(upload_plot(session, addr, plot)) tasks.append(task) task = asyncio.ensure_future(register_movie(session, addr, movie)) tasks.append(task) idx += 1 if idx % 200 == 0: resps = await asyncio.gather(*tasks) print(idx, "movies finished") resps = await asyncio.gather(*tasks) print(idx, "movies finished") if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-c", "--cast", action="store", dest="cast_filename", type=str, default="../datasets/tmdb/casts.json") parser.add_argument("-m", "--movie", action="store", dest="movie_filename", type=str, default="../datasets/tmdb/movies.json") args = parser.parse_args() with open(args.cast_filename, 'r') as cast_file: raw_casts = json.load(cast_file) addr = "http://127.0.0.1:8080" loop = asyncio.get_event_loop() future = asyncio.ensure_future(write_cast_info(addr, raw_casts)) loop.run_until_complete(future) with open(args.movie_filename, 'r') as movie_file: raw_movies = json.load(movie_file) addr = "http://127.0.0.1:8080" loop = asyncio.get_event_loop() future = asyncio.ensure_future(write_movie_info(addr, raw_movies)) loop.run_until_complete(future)
python
"""Tests experiment modules."""
python
import pytest import json from pytz import UnknownTimeZoneError from tzlocal import get_localzone from O365.connection import Connection, Protocol, MSGraphProtocol, MSOffice365Protocol, DEFAULT_SCOPES TEST_SCOPES = ['Contacts.Read.Shared', 'Mail.Send.Shared', 'User.Read', 'Contacts.ReadWrite.Shared', 'Mail.ReadWrite.Shared', 'Mail.Read.Shared', 'Contacts.Read', 'Sites.ReadWrite.All', 'Mail.Send', 'Mail.ReadWrite', 'offline_access', 'Mail.Read', 'Contacts.ReadWrite', 'Files.ReadWrite.All', 'Calendars.ReadWrite', 'User.ReadBasic.All'] class TestProtocol: def setup_class(self): self.proto = Protocol(protocol_url="testing", api_version="0.0") def teardown_class(self): pass def test_blank_protocol(self): with pytest.raises(ValueError): p = Protocol() def test_to_api_case(self): assert(self.proto.to_api_case("CaseTest") == "case_test") def test_get_scopes_for(self): with pytest.raises(ValueError): self.proto.get_scopes_for(123) # should error sicne it's not a list or tuple. assert(self.proto.get_scopes_for(['mailbox']) == ['mailbox']) assert(self.proto.get_scopes_for(None) == []) assert(self.proto.get_scopes_for('mailbox') == ['mailbox']) self.proto._oauth_scopes = DEFAULT_SCOPES assert(self.proto.get_scopes_for(['mailbox']) == ['Mail.Read']) # This test verifies that the scopes in the default list don't change #without us noticing. It makes sure that all the scopes we get back are #in the current set of scopes we expect. And all the scopes that we are #expecting are in the scopes we are getting back. The list contains the #same stuff but may not be in the same order and are therefore not equal scopes = self.proto.get_scopes_for(None) for scope in scopes: assert(scope in TEST_SCOPES) for scope in TEST_SCOPES: assert(scope in scopes) assert(self.proto.get_scopes_for('mailbox') == ['Mail.Read']) def test_prefix_scope(self): assert(self.proto.prefix_scope('Mail.Read') == 'Mail.Read') assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read') self.proto.protocol_scope_prefix = 'test_prefix_' assert(self.proto.prefix_scope(('Mail.Read',)) == 'Mail.Read') assert(self.proto.prefix_scope('test_prefix_Mail.Read') == 'test_prefix_Mail.Read') assert(self.proto.prefix_scope('Mail.Read') == 'test_prefix_Mail.Read') def test_decendant_MSOffice365Protocol(self): # Basically we just test that it can create the class w/o erroring. msp = MSOffice365Protocol() # Make sure these don't change without going noticed. assert(msp.keyword_data_store['message_type'] == 'Microsoft.OutlookServices.Message') assert(msp.keyword_data_store['file_attachment_type'] == '#Microsoft.OutlookServices.FileAttachment') assert(msp.keyword_data_store['item_attachment_type'] == '#Microsoft.OutlookServices.ItemAttachment') assert(msp.max_top_value == 999)
python
import os import dgl import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import networkx as nx import numpy as np from sklearn.model_selection import KFold import digital_patient from digital_patient.conformal.base import RegressorAdapter from digital_patient.conformal.icp import IcpRegressor from digital_patient.conformal.nc import RegressorNc from examples.load_data2 import load_physiology def main(): # create directory to save results output_dir = 'cardiac-model' data_dir = os.path.join(output_dir, 'data') result_dir = os.path.join(output_dir, 'results') if not os.path.isdir(result_dir): os.makedirs(result_dir) # load data df = pd.read_csv(os.path.join(data_dir, 'data.csv'), index_col=0) var_names = [name.split(' ')[0] for name in df.columns] x = df.values.astype('float32') reps = 10 x = np.tile(x.T, reps=reps).T # # check # plt.figure() # plt.plot(x[:500, 0], x[:500, 1]) # plt.show() # # scale data # scaler = StandardScaler() # scaler = scaler.fit(x) # x = scaler.transform(x) # create sample lists samples = [] labels = [] window_size = 1000 for batch in range(x.shape[0] - 2 * window_size): print(f"{batch} - {batch + window_size - 2} -> {batch + window_size - 1} - {batch + 2 * window_size - 3}") samples.append(x[batch:batch + window_size - 2]) labels.append(x[batch + window_size - 1:batch + 2 * window_size - 3]) samples = np.array(samples) labels = np.array(labels) # create CV splits skf = KFold(n_splits=5, shuffle=True) trainval_index, test_index = [split for split in skf.split(samples)][0] skf2 = KFold(n_splits=5, shuffle=True) train_index, val_index = [split for split in skf2.split(np.arange(trainval_index.size))][0] x_train, x_val = samples[trainval_index[train_index]], samples[trainval_index[val_index]] y_train, y_val = labels[trainval_index[train_index]], labels[trainval_index[val_index]] x_test, y_test = samples[test_index], labels[test_index] # create edge list edge_list = [] for i in range(df.shape[1]): for j in range(df.shape[1]): edge_list.append((i, j)) # instantiate a digital patient model G = dgl.DGLGraph(edge_list) dp = digital_patient.DigitalPatient(G, epochs=20, lr=0.01, window_size=window_size-2) # # plot the graph corresponding to the digital patient # nx_G = dp.G.to_networkx() # pos = nx.circular_layout(nx_G) # node_labels = {} # for i, cn in enumerate(var_names): # node_labels[i] = cn # plt.figure() # nx.draw(nx_G, pos, alpha=0.3) # nx.draw_networkx_labels(nx_G, pos, labels=node_labels) # plt.tight_layout() # plt.savefig(f'{result_dir}/graph.png') # plt.show() # instantiate the model, train and predict dp.fit(x_train, y_train) predictions = dp.predict(x_test) # plot the results sns.set_style('whitegrid') for i, name in enumerate(var_names): for j in range(predictions.shape[0]): xi = y_test[j, :, i] pi = predictions[j, :, i] if name == 't': continue ti = labels[0, :, 0] # tik = np.repeat(ti, pi.shape[0]) pik = np.hstack(pi) plt.figure() plt.plot(ti, xi, label='true') for pik in pi: plt.plot(ti, pik, c='r', alpha=0.2) # sns.lineplot(tik, pik, alpha=0.2, ci=0.9) # plt.fill_between(ti, pi[:, 0], pi[:, 1], alpha=0.2, label='predicted') plt.title(name) plt.legend() # plt.ylabel(ylabel) plt.xlabel('time') plt.tight_layout() plt.savefig(f'{result_dir}/{name}_{j}.png') plt.show() break return if __name__ == '__main__': main()
python
class Solution: def minSumOfLengths(self, arr: List[int], target: int) -> int: # need to know all subs n = len(arr) left = [math.inf] * n seen = {0 : -1} cur = 0 for i, val in enumerate(arr): cur += val if i > 0: left[i] = left[i - 1] if cur - target in seen: left[i] = min(left[i], i - seen[cur - target]) seen[cur] = i ans = math.inf cur = 0 seen = {0 : n} old = math.inf for i in reversed(range(n)): cur += arr[i] best = old if cur - target in seen: best = min(best, seen[cur - target] - i) if i > 0 and left[i - 1] != -1: ans = min(ans, left[i - 1] + best) seen[cur] = i old = best return ans if ans != math.inf else -1
python
# te18/leaderboard # https://github.com/te18/leaderboard from flask import Flask, render_template app = Flask(__name__) # error handlers @app.errorhandler(400) def error_400(e): return render_template("errors/400.html"), 400 @app.errorhandler(404) def error_404(e): return render_template("errors/404.html"), 400 @app.errorhandler(500) def error_500(e): return render_template("errors/500.html"), 500 # main routes @app.route("/") def index(): return render_template("index.html") if __name__ == "__main__": app.run(host="0.0.0.0")
python
# -*- coding: utf-8 -*- """The Mozilla Firefox history event formatter.""" from __future__ import unicode_literals from plaso.formatters import interface from plaso.formatters import manager from plaso.lib import errors class FirefoxBookmarkAnnotationFormatter(interface.ConditionalEventFormatter): """The Firefox bookmark annotation event formatter.""" DATA_TYPE = 'firefox:places:bookmark_annotation' FORMAT_STRING_PIECES = [ 'Bookmark Annotation: [{content}]', 'to bookmark [{title}]', '({url})'] FORMAT_STRING_SHORT_PIECES = ['Bookmark Annotation: {title}'] SOURCE_LONG = 'Firefox History' SOURCE_SHORT = 'WEBHIST' class FirefoxBookmarkFolderFormatter(interface.EventFormatter): """The Firefox bookmark folder event formatter.""" DATA_TYPE = 'firefox:places:bookmark_folder' FORMAT_STRING = '{title}' SOURCE_LONG = 'Firefox History' SOURCE_SHORT = 'WEBHIST' class FirefoxBookmarkFormatter(interface.ConditionalEventFormatter): """The Firefox URL bookmark event formatter.""" DATA_TYPE = 'firefox:places:bookmark' FORMAT_STRING_PIECES = [ 'Bookmark {type}', '{title}', '({url})', '[{places_title}]', 'visit count {visit_count}'] FORMAT_STRING_SHORT_PIECES = [ 'Bookmarked {title}', '({url})'] SOURCE_LONG = 'Firefox History' SOURCE_SHORT = 'WEBHIST' class FirefoxPageVisitFormatter(interface.ConditionalEventFormatter): """The Firefox page visited event formatter.""" DATA_TYPE = 'firefox:places:page_visited' # Transitions defined in the source file: # src/toolkit/components/places/nsINavHistoryService.idl # Also contains further explanation into what each of these settings mean. _URL_TRANSITIONS = { 1: 'LINK', 2: 'TYPED', 3: 'BOOKMARK', 4: 'EMBED', 5: 'REDIRECT_PERMANENT', 6: 'REDIRECT_TEMPORARY', 7: 'DOWNLOAD', 8: 'FRAMED_LINK', } _URL_TRANSITIONS.setdefault('UNKOWN') # TODO: Make extra conditional formatting. FORMAT_STRING_PIECES = [ '{url}', '({title})', '[count: {visit_count}]', 'Host: {host}', '{extra_string}'] FORMAT_STRING_SHORT_PIECES = ['URL: {url}'] SOURCE_LONG = 'Firefox History' SOURCE_SHORT = 'WEBHIST' # pylint: disable=unused-argument def GetMessages(self, formatter_mediator, event_data): """Determines the formatted message strings for the event data. Args: formatter_mediator (FormatterMediator): mediates the interactions between formatters and other components, such as storage and Windows EventLog resources. event_data (EventData): event data. Returns: tuple(str, str): formatted message string and short message string. Raises: WrongFormatter: if the event data cannot be formatted by the formatter. """ if self.DATA_TYPE != event_data.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event_data.data_type)) event_values = event_data.CopyToDict() visit_type = event_values.get('visit_type', 0) transition = self._URL_TRANSITIONS.get(visit_type, None) if transition: transition_str = 'Transition: {0!s}'.format(transition) extra = event_values.get('extra', None) if extra: if transition: extra.append(transition_str) event_values['extra_string'] = ' '.join(extra) elif transition: event_values['extra_string'] = transition_str return self._ConditionalFormatMessages(event_values) class FirefoxDowloadFormatter(interface.EventFormatter): """The Firefox download event formatter.""" DATA_TYPE = 'firefox:downloads:download' FORMAT_STRING = ( '{url} ({full_path}). Received: {received_bytes} bytes ' 'out of: {total_bytes} bytes.') FORMAT_STRING_SHORT = '{full_path} downloaded ({received_bytes} bytes)' SOURCE_LONG = 'Firefox History' SOURCE_SHORT = 'WEBHIST' manager.FormattersManager.RegisterFormatters([ FirefoxBookmarkAnnotationFormatter, FirefoxBookmarkFolderFormatter, FirefoxBookmarkFormatter, FirefoxPageVisitFormatter, FirefoxDowloadFormatter])
python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE import os import yaml CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) def getctype(typename): flag = False if "Const[" in typename: flag = True typename = typename[len("Const[") : -1] arraycount = 0 while "List[" in typename: arraycount += 1 typename = typename[len("List[") : -1] typename = typename + "*" * arraycount if flag: typename = "const " + typename return typename if __name__ == "__main__": with open( os.path.join(CURRENT_DIR, "..", "include", "awkward", "kernels.h"), "w" ) as header: header.write("// AUTO GENERATED: DO NOT EDIT BY HAND!\n") header.write( "// To regenerate file, execute - python dev/generate-kernelheader.py\n\n" ) header.write( '#ifndef AWKWARD_KERNELS_H_\n#define AWKWARD_KERNELS_H_\n\n#include "awkward/common.h"\n\nextern "C" {\n' ) with open( os.path.join(CURRENT_DIR, "..", "kernel-specification.yml") ) as specfile: indspec = yaml.safe_load(specfile)["kernels"] for spec in indspec: for childfunc in spec["specializations"]: header.write(" " * 2 + "EXPORT_SYMBOL ERROR\n") header.write(" " * 2 + childfunc["name"] + "(\n") for i, arg in enumerate(childfunc["args"]): header.write( " " * 4 + getctype(arg["type"]) + " " + arg["name"] ) if i == (len(childfunc["args"]) - 1): header.write(");\n") else: header.write(",\n") header.write("\n") header.write("}\n#endif\n")
python
from enum import Enum import random class Color(Enum): YELLOW = 0 RED = 1 BLUE = 2 GREEN = 3 NONE = -1 class Player(object): def __init__(self, name, uid): self.cards = [] self.name = name self.id = uid class Card(object): def __init__(self, color): self.id = random.randrange(0,100000000000) self.color = color class Normal(Card): def __init__(self, color, digit): super().__init__(color) self.digit = digit self.link = "%s-%d.png" % (Color(self.color).name.lower(), self.digit) def __repr__(self): return "%s %d" % (self.color.name, self.digit) class Pull2(Card): def __init__(self, color): super().__init__(color) self.link = "%s-%s.png" % (Color(self.color).name.lower(), "Pull2" ) def __repr__(self): return "2 ZIEHEN (%s)" % self.color.name class LoseTurn(Card): def __init__(self, color): super().__init__(color) self.link = "%s-%s.png" % (Color(self.color).name.lower(), "LooseTurn") def __repr__(self): return "AUSSETZEN (%s)" % self.color.name class Retour(Card): def __init__(self, color): super().__init__(color) self.link = "%s-%s.png" % (Color(self.color).name.lower(), "Retour") def __repr__(self): return "RICHTUNGSWECHSEL (%s)" % self.color.name class ChangeColor(Card): def __init__(self): super().__init__(Color.NONE) self.link = "ChangeColor.png" def __repr__(self): return "Wünscher: %s" % self.color.name class Pull4(Card): def __init__(self): super().__init__(Color.NONE) self.link = "Pull4.png" def __repr__(self): return "4 ZIEHEN! und %s " % self.color.name
python
import os from twisted.application import service from twisted.python.filepath import FilePath from buildslave.bot import BuildSlave basedir = '.' rotateLength = 10000000 maxRotatedFiles = 10 # if this is a relocatable tac file, get the directory containing the TAC if basedir == '.': import os.path basedir = os.path.abspath(os.path.dirname(__file__)) # note: this line is matched against to check that this is a buildslave # directory; do not edit it. application = service.Application('buildslave') try: from twisted.python.logfile import LogFile from twisted.python.log import ILogObserver, FileLogObserver logfile = LogFile.fromFullPath(os.path.join(basedir, "twistd.log"), rotateLength=rotateLength, maxRotatedFiles=maxRotatedFiles) application.setComponent(ILogObserver, FileLogObserver(logfile).emit) except ImportError: # probably not yet twisted 8.2.0 and beyond, can't set log yet pass buildmaster_host = '{{host}}' port = {{port}} slavename = '{{name}}' passwd = '{{password}}' keepalive = 600 usepty = False umask = 0022 maxdelay = 300 s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir, keepalive, usepty, umask=umask, maxdelay=maxdelay, allow_shutdown=False) s.setServiceParent(application)
python
class LightCommand(object): pass
python
"""Package for all views.""" from .control import Control from .dashboard import Dashboard from .events import Events from .live import Live from .liveness import Ping, Ready from .login import Login from .logout import Logout from .main import Main from .resultat import Resultat, ResultatHeat from .start import Start from .timing import Timing
python
"""MAGI Validators."""
python
# Author: Nathan Trouvain at 16/08/2021 <[email protected]> # Licence: MIT License # Copyright: Xavier Hinaut (2018) <[email protected]> from functools import partial import numpy as np from scipy import linalg from .utils import (readout_forward, _initialize_readout, _prepare_inputs_for_learning) from ..base.node import Node from ..base.types import global_dtype def _solve_ridge(XXT, YXT, ridge): return linalg.solve(XXT + ridge, YXT.T, assume_a="sym") def partial_backward(readout: Node, X_batch, Y_batch=None): transient = readout.transient X, Y = _prepare_inputs_for_learning(X_batch, Y_batch, transient=transient, bias=readout.input_bias, allow_reshape=True) xxt = X.T.dot(X) yxt = Y.T.dot(X) XXT = readout.get_buffer("XXT") YXT = readout.get_buffer("YXT") # This is not thread-safe, apparently, using Numpy memmap as buffers # ok for parallelization then with a lock (see ESN object) XXT += xxt YXT += yxt def backward(readout: Node, X=None, Y=None): ridge = readout.ridge XXT = readout.get_buffer("XXT") YXT = readout.get_buffer("YXT") input_dim = readout.input_dim if readout.input_bias: input_dim += 1 ridgeid = (ridge * np.eye(input_dim, dtype=global_dtype)) Wout_raw = _solve_ridge(XXT, YXT, ridgeid) if readout.input_bias: Wout, bias = Wout_raw[1:, :], Wout_raw[0, :][np.newaxis, :] readout.set_param("Wout", Wout) readout.set_param("bias", bias) else: readout.set_param("Wout", Wout_raw) def initialize(readout: Node, x=None, y=None, Wout_init=None): _initialize_readout(readout, x, y, bias=readout.input_bias, init_func=Wout_init) def initialize_buffers(readout): # create memmaped buffers for matrices X.X^T and Y.X^T pre-computed # in parallel for ridge regression # ! only memmap can be used ! Impossible to share Numpy arrays with # different processes in r/w mode otherwise (with proper locking) input_dim = readout.input_dim output_dim = readout.output_dim if readout.input_bias: input_dim += 1 readout.create_buffer("XXT", (input_dim, input_dim)) readout.create_buffer("YXT", (output_dim, input_dim)) class Ridge(Node): def __init__(self, output_dim=None, ridge=0.0, transient=0, Wout=None, input_bias=True, name=None): super(Ridge, self).__init__(params={"Wout": None, "bias": None}, hypers={"ridge": ridge, "transient": transient, "input_bias": input_bias}, forward=readout_forward, partial_backward=partial_backward, backward=backward, output_dim=output_dim, initializer=partial(initialize, Wout_init=Wout), buffers_initializer=initialize_buffers, name=name)
python
""" This playbook checks for the presence of the Risk Response workbook and updates tasks or leaves generic notes. &quot;Risk_notable_verdict&quot; recommends this playbook as a second phase of the investigation. Additionally, this playbook can be used in ad-hoc investigations or incorporated into custom workbooks. """ import phantom.rules as phantom import json from datetime import datetime, timedelta def on_start(container): phantom.debug('on_start() called') # call 'workbook_list' block workbook_list(container=container) return def workbook_list(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("workbook_list() called") parameters = [{}] ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_list", parameters=parameters, name="workbook_list", callback=workbook_decision) return def workbook_decision(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("workbook_decision() called") ################################################################################ # Determines if the workbook Risk Response is present and available for use. ################################################################################ # check for 'if' condition 1 found_match_1 = phantom.decision( container=container, conditions=[ ["workbook_list:custom_function_result.data.*.name", "==", "Risk Response"] ]) # call connected blocks if condition 1 matched if found_match_1: workbook_add(action=action, success=success, container=container, results=results, handle=handle) return # check for 'else' condition 2 join_risk_notable_review_indicators(action=action, success=success, container=container, results=results, handle=handle) return def workbook_add(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("workbook_add() called") id_value = container.get("id", None) parameters = [] parameters.append({ "workbook": "Risk Response", "container": id_value, "start_workbook": "true", "check_for_existing_workbook": "true", }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_add", parameters=parameters, name="workbook_add", callback=workbook_start_task) return def workbook_start_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("workbook_start_task() called") id_value = container.get("id", None) parameters = [] parameters.append({ "owner": None, "status": "in_progress", "container": id_value, "task_name": "Block Indicators", "note_title": None, "note_content": None, }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="workbook_start_task", callback=join_risk_notable_review_indicators) return def join_risk_notable_review_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("join_risk_notable_review_indicators() called") # if the joined function has already been called, do nothing if phantom.get_run_data(key="join_risk_notable_review_indicators_called"): return # save the state that the joined function has now been called phantom.save_run_data(key="join_risk_notable_review_indicators_called", value="risk_notable_review_indicators") # call connected block "risk_notable_review_indicators" risk_notable_review_indicators(container=container, handle=handle) return def risk_notable_review_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("risk_notable_review_indicators() called") ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ # call playbook "community/risk_notable_review_indicators", returns the playbook_run_id playbook_run_id = phantom.playbook("community/risk_notable_review_indicators", container=container, name="risk_notable_review_indicators", callback=indicator_get_by_tag) return def risk_notable_block_indicators(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("risk_notable_block_indicators() called") ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ # call playbook "community/risk_notable_block_indicators", returns the playbook_run_id playbook_run_id = phantom.playbook("community/risk_notable_block_indicators", container=container, name="risk_notable_block_indicators", callback=note_decision_1) return def join_risk_notable_protect_assets_and_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("join_risk_notable_protect_assets_and_users() called") # if the joined function has already been called, do nothing if phantom.get_run_data(key="join_risk_notable_protect_assets_and_users_called"): return # save the state that the joined function has now been called phantom.save_run_data(key="join_risk_notable_protect_assets_and_users_called", value="risk_notable_protect_assets_and_users") # call connected block "risk_notable_protect_assets_and_users" risk_notable_protect_assets_and_users(container=container, handle=handle) return def risk_notable_protect_assets_and_users(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("risk_notable_protect_assets_and_users() called") ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ # call playbook "community/risk_notable_protect_assets_and_users", returns the playbook_run_id playbook_run_id = phantom.playbook("community/risk_notable_protect_assets_and_users", container=container, name="risk_notable_protect_assets_and_users", callback=note_decision_2) return def note_decision_1(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("note_decision_1() called") ################################################################################ # Determine if a note was left by the previous playbook and if the Risk Mitigate # workbook should be used. ################################################################################ # check for 'if' condition 1 found_match_1 = phantom.decision( container=container, logical_operator="and", conditions=[ ["risk_notable_block_indicators:playbook_output:note_title", "!=", ""], ["risk_notable_block_indicators:playbook_output:note_content", "!=", ""], ["workbook_list:custom_function_result.data.*.name", "==", "Risk Mitigate"] ]) # call connected blocks if condition 1 matched if found_match_1: update_block_task(action=action, success=success, container=container, results=results, handle=handle) return # check for 'elif' condition 2 found_match_2 = phantom.decision( container=container, logical_operator="and", conditions=[ ["risk_notable_block_indicators:playbook_output:note_title", "!=", ""], ["risk_notable_block_indicators:playbook_output:note_content", "!=", ""] ]) # call connected blocks if condition 2 matched if found_match_2: add_block_note(action=action, success=success, container=container, results=results, handle=handle) return return def update_block_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("update_block_task() called") id_value = container.get("id", None) risk_notable_block_indicators_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_title"]) risk_notable_block_indicators_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_content"]) parameters = [] # build parameters list for 'update_block_task' call for risk_notable_block_indicators_output_note_title_item in risk_notable_block_indicators_output_note_title: for risk_notable_block_indicators_output_note_content_item in risk_notable_block_indicators_output_note_content: parameters.append({ "owner": None, "status": "closed", "container": id_value, "task_name": "Review and Block Indicators", "note_title": risk_notable_block_indicators_output_note_title_item[0], "note_content": risk_notable_block_indicators_output_note_content_item[0], }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="update_block_task", callback=start_protect_task) return def start_protect_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("start_protect_task() called") id_value = container.get("id", None) parameters = [] parameters.append({ "owner": None, "status": "in_progress", "container": id_value, "task_name": "Protect Assets and Users", "note_title": None, "note_content": None, }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="start_protect_task", callback=join_risk_notable_protect_assets_and_users) return def add_block_note(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("add_block_note() called") ################################################################################ # Custom code to handle leaving a note with a dynamic title and content when the # Risk Mitigate workbook is not present. ################################################################################ risk_notable_block_indicators_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_title"]) risk_notable_block_indicators_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_block_indicators:playbook_output:note_content"]) risk_notable_block_indicators_output_note_title_values = [item[0] for item in risk_notable_block_indicators_output_note_title] risk_notable_block_indicators_output_note_content_values = [item[0] for item in risk_notable_block_indicators_output_note_content] ################################################################################ ## Custom Code Start ################################################################################ note_title = risk_notable_block_indicators_output_note_title_values note_content = risk_notable_block_indicators_output_note_content_values for title, content in zip(note_title, note_content): phantom.add_note(container=container, title=title, content=content, note_type="general", note_format="markdown") ################################################################################ ## Custom Code End ################################################################################ join_risk_notable_protect_assets_and_users(container=container) return def note_decision_2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("note_decision_2() called") ################################################################################ # Determine if a note was left by the previous playbook and if the Risk Mitigate # workbook should be used. ################################################################################ # check for 'if' condition 1 found_match_1 = phantom.decision( container=container, logical_operator="and", conditions=[ ["risk_notable_protect_assets_and_users:playbook_output:note_title", "!=", ""], ["risk_notable_protect_assets_and_users:playbook_output:note_content", "!=", ""], ["workbook_list:custom_function_result.data.*.name", "==", "Risk Mitigate"] ]) # call connected blocks if condition 1 matched if found_match_1: update_protect_task(action=action, success=success, container=container, results=results, handle=handle) return # check for 'elif' condition 2 found_match_2 = phantom.decision( container=container, logical_operator="and", conditions=[ ["risk_notable_protect_assets_and_users:playbook_output:note_title", "!=", ""], ["risk_notable_protect_assets_and_users:playbook_output:note_content", "!=", ""] ]) # call connected blocks if condition 2 matched if found_match_2: add_protect_note(action=action, success=success, container=container, results=results, handle=handle) return return def update_protect_task(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("update_protect_task() called") id_value = container.get("id", None) risk_notable_protect_assets_and_users_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_title"]) risk_notable_protect_assets_and_users_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_content"]) parameters = [] # build parameters list for 'update_protect_task' call for risk_notable_protect_assets_and_users_output_note_title_item in risk_notable_protect_assets_and_users_output_note_title: for risk_notable_protect_assets_and_users_output_note_content_item in risk_notable_protect_assets_and_users_output_note_content: parameters.append({ "owner": None, "status": "complete", "container": id_value, "task_name": "Protect Assets and Users", "note_title": risk_notable_protect_assets_and_users_output_note_title_item[0], "note_content": risk_notable_protect_assets_and_users_output_note_content_item[0], }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/workbook_task_update", parameters=parameters, name="update_protect_task") return def add_protect_note(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("add_protect_note() called") ################################################################################ # Custom code to handle leaving a note with a dynamic title and content when the # Risk Mitigate workbook is not present. ################################################################################ risk_notable_protect_assets_and_users_output_note_title = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_title"]) risk_notable_protect_assets_and_users_output_note_content = phantom.collect2(container=container, datapath=["risk_notable_protect_assets_and_users:playbook_output:note_content"]) risk_notable_protect_assets_and_users_output_note_title_values = [item[0] for item in risk_notable_protect_assets_and_users_output_note_title] risk_notable_protect_assets_and_users_output_note_content_values = [item[0] for item in risk_notable_protect_assets_and_users_output_note_content] ################################################################################ ## Custom Code Start ################################################################################ note_title = risk_notable_protect_assets_and_users_output_note_title_values note_content = risk_notable_protect_assets_and_users_output_note_content_values for title, content in zip(note_title, note_content): phantom.add_note(container=container, title=title, content=content, note_type="general", note_format="markdown") ################################################################################ ## Custom Code End ################################################################################ return def decision_4(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("decision_4() called") # check for 'if' condition 1 found_match_1 = phantom.decision( container=container, conditions=[ ["indicator_get_by_tag:custom_function_result.data.*.indicator_value", "!=", ""] ]) # call connected blocks if condition 1 matched if found_match_1: risk_notable_block_indicators(action=action, success=success, container=container, results=results, handle=handle) return # check for 'else' condition 2 join_risk_notable_protect_assets_and_users(action=action, success=success, container=container, results=results, handle=handle) return def indicator_get_by_tag(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug("indicator_get_by_tag() called") id_value = container.get("id", None) parameters = [] parameters.append({ "tags_or": "marked_for_block", "tags_and": None, "container": id_value, "tags_exclude": "blocked, safe", "indicator_timerange": None, }) ################################################################################ ## Custom Code Start ################################################################################ # Write your custom code here... ################################################################################ ## Custom Code End ################################################################################ phantom.custom_function(custom_function="community/indicator_get_by_tag", parameters=parameters, name="indicator_get_by_tag", callback=decision_4) return def on_finish(container, summary): phantom.debug("on_finish() called") ################################################################################ ## Custom Code Start ################################################################################ # This function is called after all actions are completed. # summary of all the action and/or all details of actions # can be collected here. # summary_json = phantom.get_summary() # if 'result' in summary_json: # for action_result in summary_json['result']: # if 'action_run_id' in action_result: # action_results = phantom.get_action_results(action_run_id=action_result['action_run_id'], result_data=False, flatten=False) # phantom.debug(action_results) ################################################################################ ## Custom Code End ################################################################################ return
python
from learnware.feature.timeseries.ts_feature import * import pandas as pd import numpy as np class TestTimeSeriesFeature: def test_ts_feature_stationary_test(self): df1 = pd.DataFrame(np.random.randint(0, 200, size=(100, 1)), columns=['x']) df2 = pd.util.testing.makeTimeDataFrame(50) df3 = pd.DataFrame([1, 2, 3, 2, 3, 1, 1, 1, 1, 5, 5, 5, 8, 9, 9, 10, 11, 12], columns=['x']) assert time_series_stationary_test(df1['x']) assert time_series_stationary_test(df2['A']) assert time_series_stationary_test(df3['x']) == False def test_ts_feature_seasonal_decompose(self): df = pd.DataFrame(np.random.randint(1, 10, size=(365, 1)), columns=['value'], index=pd.date_range('2021-01-01', periods=365, freq='D')) ret = time_series_seasonal_decompose(df['value']) assert "seasonal" in ret and len(ret["seasonal"]) == len(df) assert "resid" in ret and len(ret["resid"]) == len(df) assert "trend" in ret and len(ret["trend"]) == len(df) def test_ts_feature_get_seasonal_value(self): df = pd.DataFrame(np.random.randint(1, 10, size=(365, 1)), columns=['value'], index=pd.date_range('2021-01-01', periods=365, freq='D')) ret = time_series_seasonal_test(df['value'], [1, 30, 60, 120]) assert (type(ret) is list and len(ret) == 4)
python
""" datos de entrada A -->int -->a B -->int -->b C -->int -->c D --> int --> d datos de salida """ #entradas a = int ( input ( "digite el valor de A:" )) c = int ( input ( "digite el valor de B:" )) b = int ( input ( "digite el valor de C:" )) d = int ( input ( "digite el valor de D:" )) #cajanegra resultado = "" si ( c > 5 ): c = 0 re = 0 segundo = segundo + 1 elif ( b == 9 ): segundo = 1 elif ( c < 5 ): c = 0 re = 0 elif ( c == 5 ): re = 0 print ( "su numero redondeado es" , str ( a ) + str ( b ) + str ( c ) + str ( d )) #salida
python
from kivy.logger import Logger from kivy.clock import mainthread from jnius import autoclass from android.activity import bind as result_bind Gso = autoclass("com.google.android.gms.auth.api.signin.GoogleSignInOptions") GsoBuilder = autoclass( "com.google.android.gms.auth.api.signin.GoogleSignInOptions$Builder" ) GSignIn = autoclass("com.google.android.gms.auth.api.signin.GoogleSignIn") ApiException = autoclass("com.google.android.gms.common.api.ApiException") PythonActivity = autoclass("org.kivy.android.PythonActivity") context = PythonActivity.mActivity RC_SIGN_IN = 10122 mGSignInClient = None class GoogleActivityListener: def __init__(self, success_listener, error_listener): self.success_listener = success_listener self.error_listener = error_listener def google_activity_listener(self, request_code, result_code, data): if request_code == RC_SIGN_IN: Logger.info("KivyAuth: google_activity_listener called.") task = GSignIn.getSignedInAccountFromIntent(data) try: account = task.getResult(ApiException) if account: Logger.info( "KivyAuth: Google Login success.\ Calling success listener." ) self.success_listener( account.getDisplayName(), account.getEmail(), account.getPhotoUrl().toString(), ) except Exception as e: Logger.info( "KivyAuth: Error signing in using Google. {}".format(e) ) self.error_listener() def initialize_google(success_listener, error_listener): gso = GsoBuilder(Gso.DEFAULT_SIGN_IN).requestEmail().build() global mGSignInClient mGSignInClient = GSignIn.getClient(context, gso) gal = GoogleActivityListener(success_listener, error_listener) result_bind(on_activity_result=gal.google_activity_listener) Logger.info("KivyAuth: Initialized google signin") # @mainthread def login_google(): Logger.info("KivyAuth: Initiated google login") signInIntent = mGSignInClient.getSignInIntent() context.startActivityForResult(signInIntent, RC_SIGN_IN) def logout_google(after_logout): mGSignInClient.signOut() after_logout() Logger.info("KivyAuth: Logged out from google login")
python
import numpy as np; from random import choices import matplotlib.pyplot as plt; def Kroupa(N): ''' Calculates N stellar masses drawing from a Kroupa IMF 0.08 < m < 130 Input >>> N = number of stars wanted Output >>> masses = N-sized array of stellar masses ''' # Create a list of potential masses and then calculate their weights by using Kroupa IMF potential_mass = np.logspace(np.log10(0.08), np.log10(130), 10**4, endpoint=True) weights_low = 0.204*potential_mass[np.where(potential_mass<0.5)]**(-1.3) # Probabilities below m=0.5Msol weights_high = 0.204*potential_mass[np.where(potential_mass>=0.5)]**(-2.3) # Probabilities above m=0.5M_sol weights_total = np.append(weights_low, weights_high) # Picking the final masses based on the weights masses = choices(potential_mass, weights_total,k=N) return masses masses = Kroupa(1000) fig, ax = plt.subplots() ax.hist(masses, bins=50, density =True, histtype='step') plt.show()
python
import sys from utils import write_exp_utils import pandas as pd from utils import misc_utils import psycopg2 from psycopg2.extras import Json, DictCursor def main(argv): print(argv[1]) w = write_exp_utils.ExperimentConfig(argv[1], argv[2]) print("writing {} to database".format(argv[1]) ) w.write_to_db()# write experiment on database # check if the experiment is written correctly q = 'select experiment_id from rws_experiment.experiment_table order by experiment_id desc limit 1;' conn = misc_utils.connect_rds() print(pd.read_sql(q, conn)) if __name__== '__main__': main(sys.argv)
python
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import sys from gala import imio, classify, features, morpho, agglo, evaluate as ev from scipy.ndimage import label from skimage.morphology import dilation, erosion from skimage.morphology import square, disk import argparse from skimage import morphology as skmorph import pickle def get_parser(): parser = argparse.ArgumentParser(description='GALA neuron Aggolmeration script') parser.set_defaults(func=lambda _: parser.print_help()) parser.add_argument( '-m', '--mode', required=True, help='Train(0) or Deploy(1)') parser.add_argument( '--prob_file', required=True, help='Probability map file') parser.add_argument( '--gt_file', required=False, help='Ground truth file') parser.add_argument( '--ws_file', required=False, help='Watershed file') parser.add_argument( '--train_file', required=False, help='Pretrained classifier file') parser.add_argument( '-o', '--outfile', required=True, help='Output file') parser.add_argument('--seeds_cc_threshold', type=int, default=5, help='Cutoff threshold on seed size') parser.add_argument('--agg_threshold', type=float, default=0.5, help='Cutoff threshold for agglomeration classifier') return parser def train(args): gt_train, pr_train, ws_train = (map(imio.read_h5_stack, [args.gt_file, args.prob_file, args.ws_file])) #['train-gt.lzf.h5', 'train-p1.lzf.h5', # 'train-ws.lzf.h5'])) #print('training') #gt_train = np.load(args.gt_file) #X,Y,Z #gt_train = np.transpose(gt_train,(2,0,1)) #gala wants z,x,y? #pr_train = np.load(args.prob_file) #X,Y,Z #pr_train = np.transpose(np.squeeze(pr_train),(2,0,1)) #gala wants z,x,y? #pr_train = pr_train[0:50,0:256,0:256] #pr_train = np.around(pr_train,decimals=2) #gt_train = gt_train[0:50,0:256,0:256] #print('watershed') #seeds = label(pr_train==0)[0] #seeds_cc_threshold = args.seeds_cc_threshold #seeds = morpho.remove_small_connected_components(seeds, # seeds_cc_threshold) #ws_train = skmorph.watershed(pr_train, seeds) fm = features.moments.Manager() fh = features.histogram.Manager() fc = features.base.Composite(children=[fm, fh]) g_train = agglo.Rag(ws_train, pr_train, feature_manager=fc) (X, y, w, merges) = g_train.learn_agglomerate(gt_train, fc)[0] y = y[:, 0] # gala has 3 truth labeling schemes, pick the first one rf = classify.DefaultRandomForest().fit(X, y) learned_policy = agglo.classifier_probability(fc, rf) #save learned_policy #np.savez(args.outfile, rf=rf, fc=fc) binary_file = open(args.outfile,mode='wb') lp_dump = pickle.dump([fc,rf], binary_file) binary_file.close() def deploy(args): #probability map print("Deploying through driver") if args.prob_file.endswith('.hdf5'): mem = imio.read_image_stack(args.prob_file, single_channel=False) else: mem = np.load(args.prob_file) #X,Y,Z mem = np.transpose(np.squeeze(mem),(2,0,1)) #gala wants z,x,y? pr_test = np.zeros_like(mem) for z in range(0,mem.shape[0]): pr_test[z,:,:] = dilation(mem[z,:,:], disk(10)) pr_test[z,:,:] = erosion(mem[z,:,:], disk(4)) seg_out = np.zeros(pr_test.shape) pr_dim = pr_test.shape xsize = pr_dim[1] ysize = pr_dim[2] zsize = pr_dim[0] print(pr_dim) print(pr_dim[0]) print(np.int(pr_dim[0]/zsize)) print("Starting loop") for iz in range(0,np.int(pr_dim[0]/zsize)): for ix in range(0,np.int(pr_dim[1]/xsize)): for iy in range(0,np.int(pr_dim[2]/ysize)): p0 = pr_test[iz*zsize+0:iz*zsize+zsize,ix*xsize+0:ix*xsize+xsize,iy*ysize+0:iy*ysize+ysize] p0 = np.around(p0,decimals=2) print(p0) #get trained classifier #npzfile = np.load(args.train_file) #rf = npzfile['rf'] #fc = npzfile['fc'] binary_file = open(args.train_file,mode='rb') print(binary_file) temp = pickle.load(binary_file) fc = temp[0] rf = temp[1] binary_file.close() learned_policy = agglo.classifier_probability(fc, rf) #pr_test = (map(imio.read_h5_stack, # ['test-p1.lzf.h5'])) print('watershed') seeds = label(p0==0)[0] seeds_cc_threshold = args.seeds_cc_threshold seeds = morpho.remove_small_connected_components(seeds, seeds_cc_threshold) ws_test = skmorph.watershed(p0, seeds) g_test = agglo.Rag(ws_test, p0, learned_policy, feature_manager=fc) g_test.agglomerate(args.agg_threshold) #This is a map of labels of the same shape as the original image. seg_test1 = g_test.get_segmentation() seg_out[iz*zsize+0:iz*zsize+zsize,ix*xsize+0:ix*xsize+xsize,iy*ysize+0:iy*ysize+ysize] = seg_test1 seg_out = np.transpose(seg_out,(1,2,0)) with open(args.outfile, 'wb') as f: np.save(f,seg_out) return if __name__ == '__main__': parser = get_parser() args = parser.parse_args() if(int(args.mode)==0): train(args) else: deploy(args)
python
from __future__ import print_function from timeit import default_timer as timer import json import datetime print('Loading function') def eratosthenes(n): sieve = [ True for i in range(n+1) ] def markOff(pv): for i in range(pv+pv, n+1, pv): sieve[i] = False markOff(2) for i in range(3, n+1): if sieve[i]: markOff(i) return [ i for i in range(1, n+1) if sieve[i] ] def lambda_handler(event, context): start = timer() #print("Received event: " + json.dumps(event, indent=2)) maxPrime = int(event['queryStringParameters']['max']) numLoops = int(event['queryStringParameters']['loops']) print("looping " + str(numLoops) + " time(s)") for loop in range (0, numLoops): primes = eratosthenes(maxPrime) print("Highest 3 primes: " + str(primes.pop()) + ", " + str(primes.pop()) + ", " + str(primes.pop())) durationSeconds = timer() - start return {"statusCode": 200, \ "headers": {"Content-Type": "application/json"}, \ "body": "{\"durationSeconds\": " + str(durationSeconds) + \ ", \"max\": " + str(maxPrime) + ", \"loops\": " + str(numLoops) + "}"}
python
""" --- Day 1: The Tyranny of the Rocket Equation --- https://adventofcode.com/2019/day/1 """ class FuelCounterUpper: """Determines the amount of fuel required to launch""" @classmethod def calc_fuel_req(cls, mass: int) -> int: """calc fuel required for moving input mass Don't forget to account for the weight of the fuel, too! Returns: int -- fuel required """ fuel_need = max(int(mass / 3) - 2, 0) if fuel_need == 0: return 0 return fuel_need + cls.calc_fuel_req(fuel_need) if __name__ == "__main__": fcu = FuelCounterUpper() with open("inputs/day01") as f: masses = f.readlines() total_fuel = sum([fcu.calc_fuel_req(int(m)) for m in masses]) print(f"total fuel required = { total_fuel }")
python
from pyleap import * bg = Rectangle(0, 0, window.width, window.height, color="white") r = Rectangle(color=(125, 125, 0)) line1 = Line(100, 200, 300, 400, 15, 'pink') tri = Triangle(200, 100, 300, 100, 250, 150, "green") c2 = Circle(200, 200, 50, "#ffff00") c = Circle(200, 200, 100, "red") txt = Text('Hello, world') c.transform.scale_y = 0.5 c2.opacity = 0.5 def update(dt): r.x += 1 r.y += 1 c.x += 1 line1.transform.rotation += 1 c.transform.rotation -= 1 def draw(dt): # update() window.clear() bg.draw() window.show_axis() Rectangle(100, 100, 50, 25, 'pink').draw() r.stroke() line1.draw() tri.stroke() c.stroke() c2.draw() txt.draw() window.show_fps() def start_move(): repeat(update) def stop_move(): stop(update) mouse.on_press(start_move) mouse.on_release(stop_move) repeat(draw) run()
python
# ====================================================================== # Timing is Everything # Advent of Code 2016 Day 15 -- Eric Wastl -- https://adventofcode.com # # Python implementation by Dr. Dean Earl Wright III # Tests from # https://rosettacode.org/wiki/Chinese_remainder_theorem#Functional # https://www.reddit.com/r/adventofcode/comments/5ifn4v/2016_day_15_solutions/ # ====================================================================== # ====================================================================== # t e s t _ c r t . p y # ====================================================================== "Test Cmt for Advent of Code 2016 day 15, Timing is Everything" # ---------------------------------------------------------------------- # import # ---------------------------------------------------------------------- import unittest import crt # ---------------------------------------------------------------------- # constants # ---------------------------------------------------------------------- # ====================================================================== # TestCRT # ====================================================================== class TestCRT(unittest.TestCase): # pylint: disable=R0904 "Test CRT object" def test_rosetta_code_examples(self): "Test examples from rosettacode" self.assertEqual(crt.chinese_remainder([3, 5, 7], [2, 3, 2]), 23) self.assertEqual(crt.chinese_remainder([5, 13], [2, 3]), 42) self.assertEqual(crt.chinese_remainder([100, 23], [19, 0]), 1219) self.assertEqual(crt.chinese_remainder([11, 12, 13], [10, 4, 12]), 1000) self.assertEqual(crt.chinese_remainder([5, 7, 9, 11], [1, 2, 3, 4]), 1731) self.assertEqual(crt.chinese_remainder( [17353461355013928499, 3882485124428619605195281, 13563122655762143587], [7631415079307304117, 1248561880341424820456626, 2756437267211517231]), 937307771161836294247413550632295202816) def test_part_one_example(self): "Test example from part one description [disc sizes], [initial values]" self.assertEqual(crt.chinese_remainder([5, 2], [-4, -1 - 1]), 5 + 1) # ---------------------------------------------------------------------- # module initialization # ---------------------------------------------------------------------- if __name__ == '__main__': pass # ====================================================================== # end t e s t _ c r t . p y end # ======================================================================
python
import time import random import sqlite3 from parsers import OnePageParse from parsers import SeparatedPageParser from parsers import adultCollector from history import History conn = sqlite3.connect('killmepls.db') c = conn.cursor() for row in c.execute("SELECT MAX(hID) FROM stories"): last_hID = row[0] print(last_hID) list_of_histories = [] currentURL = 'https://killpls.me' baseURL = 'https://killpls.me' main_page = OnePageParse(currentURL, baseURL) main_page.startParsing() historyChecking = main_page.getListOfHistories() adultCollector(list_of_histories, historyChecking, baseURL) nextURL = main_page.getNextParsingPage() counter = 1 while nextURL: print('Next: {}'.format(nextURL)) currentPage = OnePageParse(nextURL, baseURL) currentPage.startParsing() historyChecking = currentPage.getListOfHistories() adultCollector(list_of_histories, historyChecking, baseURL) if last_hID in list(map(lambda x : x.historyID, list_of_histories)): print("We've faced history with ID = {}. Collection of histories stopped.".format(last_hID)) break delay_sec = random.randint(1,5) print('Delay : {} seconds'.format(delay_sec)) time.sleep(delay_sec) print('At iteration: {} we have {} histories'.format(counter, len(list_of_histories))) nextURL = currentPage.getNextParsingPage() counter += 1 sqlite_insert_with_param = """INSERT INTO 'stories' ('hID', 'hdate', 'url', 'history', 'tags', 'votes', 'lastAccess', 'adult') VALUES (?, ?, ?, ?, ?, ?, ?, ?);""" for one_history in list_of_histories: data_tuple = (one_history.historyID, one_history.historyTime, one_history.historyURL, one_history.historyText, ' '.join(one_history.historyTags), one_history.historyVotes, one_history.lastAccessTime, one_history.adultFlag) try: c.execute(sqlite_insert_with_param, data_tuple) except sqlite3.IntegrityError: print("Uniqueness violation: {}\t{}".format(data_tuple[0], data_tuple[2] )) conn.commit() conn.close()
python
import math import sys import string sys.path.append("../..") from MolecularSystem import System x = System(None) y = System(None) z = System(None) x.load_pdb('1KAW.pdb') y.load_pdb('1L1OA.pdb') z.load_pdb('1L1OB.pdb') for prot in [x,y,z]: prot.ProteinList[0].fill_pseudo_sidechains(1) prot.ProteinList[0].fill_neighbors_lists(0.35,15.0) x.res_list = [8, 14,31,32,33,34,35,57, 58, 59, 71, 77, 78, 79, 109] y.res_list = [25,31,40,41,42,43,44,53, 54, 55, 63, 67, 68, 69, 84 ] z.res_list = [74,80,91,92,93,94,95,104,105,106,120,126,127,128,146] dsf = 0.15 do_replicate = 1 replicate_thresh = 0.05 shell_start = 7.0 shell_end = 13.0 p_lo_hash = {} p_hi_hash = {} combinations = 0 beta_dist_sum = 0.0 beta_dist_cnt = 0.0 beta_dist_lzst = [] p_cnt = -1 for p in [x,y,z]: p_cnt += 1 for rn1 in range(len(p.res_list)-3): b1 = p.ProteinList[0].residue_dict[p.res_list[rn1]].central_atom c1 = p.ProteinList[0].residue_dict[p.res_list[rn1]].pseudo_sidechain x1,y1,z1 = c1.x,c1.y,c1.z xb1,yb1,zb1 = b1.x,b1.y,b1.z for rn2 in range(rn1+1,len(p.res_list)-2): b2 = p.ProteinList[0].residue_dict[p.res_list[rn2]].pseudo_sidechain c2 = p.ProteinList[0].residue_dict[p.res_list[rn2]].central_atom d2 = c1.dist(c2) for rn3 in range(rn2+1,len(p.res_list)-1): b3 = p.ProteinList[0].residue_dict[p.res_list[rn3]].pseudo_sidechain c3 = p.ProteinList[0].residue_dict[p.res_list[rn3]].central_atom d3 = c1.dist(c3) for rn4 in range(rn3+1,len(p.res_list)): b4 = p.ProteinList[0].residue_dict[p.res_list[rn4]].pseudo_sidechain c4 = p.ProteinList[0].residue_dict[p.res_list[rn4]].central_atom d4 = c1.dist(c4) dist_list = [d2, d3, d4] for d in dist_list: if d<=shell_start or d>=shell_end: break else: atom_list = [c2,c3,c4] beta_list = [b2,b3,b4] atom_num_list = [c2.atom_number, c3.atom_number, c4.atom_number] sorted_list = [c2.atom_number, c3.atom_number, c4.atom_number] sorted_list.sort() f = [0,0,0] for i in range(len(sorted_list)): for j in range(len(dist_list)): if atom_num_list[j] == sorted_list[i]: f[i] = j xs = [atom_list[f[0]].x, atom_list[f[1]].x, atom_list[f[2]].x] ys = [atom_list[f[0]].y, atom_list[f[1]].y, atom_list[f[2]].y] zs = [atom_list[f[0]].z, atom_list[f[1]].z, atom_list[f[2]].z] xbs = [beta_list[f[0]].x, beta_list[f[1]].x, beta_list[f[2]].x] ybs = [beta_list[f[0]].y, beta_list[f[1]].y, beta_list[f[2]].y] zbs = [beta_list[f[0]].z, beta_list[f[1]].z, beta_list[f[2]].z] new_distance_list = [math.sqrt(((x1- xs[0])**2) + ((y1- ys[0])**2) + ((z1- zs[0])**2)), math.sqrt(((x1- xs[1])**2) + ((y1- ys[1])**2) + ((z1- zs[1])**2)), math.sqrt(((x1- xs[2])**2) + ((y1- ys[2])**2) + ((z1- zs[2])**2)), math.sqrt(((xs[0]-xs[1])**2) + ((ys[0]-ys[1])**2) + ((zs[0]-zs[1])**2)), math.sqrt(((xs[0]-xs[2])**2) + ((ys[0]-ys[2])**2) + ((zs[0]-zs[2])**2)), math.sqrt(((xs[1]-xs[2])**2) + ((ys[1]-ys[2])**2) + ((zs[1]-zs[2])**2))] bet_distance_list = [math.sqrt(((xb1- xbs[0])**2) + ((yb1- ybs[0])**2) + ((zb1- zbs[0])**2)), math.sqrt(((xb1- xbs[1])**2) + ((yb1- ybs[1])**2) + ((zb1- zbs[1])**2)), math.sqrt(((xb1- xbs[2])**2) + ((yb1- ybs[2])**2) + ((zb1- zbs[2])**2)), math.sqrt(((xbs[0]-xbs[1])**2) + ((ybs[0]-ybs[1])**2) + ((zbs[0]-zbs[1])**2)), math.sqrt(((xbs[0]-xbs[2])**2) + ((ybs[0]-ybs[2])**2) + ((zbs[0]-zbs[2])**2)), math.sqrt(((xbs[1]-xbs[2])**2) + ((ybs[1]-ybs[2])**2) + ((zbs[1]-zbs[2])**2))] hires_distances = [new_distance_list[0], new_distance_list[1], new_distance_list[2], new_distance_list[3], new_distance_list[4], new_distance_list[5]] lowres_dl_bins = [[],[],[],[],[],[]] lowres_dlstrings = [] for i in range(len(new_distance_list)): lowres_dl_bins[i].append(math.floor(dsf*new_distance_list[i])) if do_replicate: if (new_distance_list[i]*dsf)%1.0 <= replicate_thresh: # if the distance is just over an integer change lowres_dl_bins[i].append((math.floor(dsf*new_distance_list[i]))-1) elif (new_distance_list[i]*dsf)%1.0 >= (1.0-replicate_thresh): lowres_dl_bins[i].append((math.floor(dsf*new_distance_list[i]))+1) if do_replicate: for i0 in lowres_dl_bins[0]: for i1 in lowres_dl_bins[1]: for i2 in lowres_dl_bins[2]: for i3 in lowres_dl_bins[3]: for i4 in lowres_dl_bins[4]: for i5 in lowres_dl_bins[5]: lowres_dlstrings.append('%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_'%(i0,i1,i2,i3,i4,i5)) else: lowres_dlstrings.append('%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_%2.0f_'%(lowres_dl_bins[0][0], lowres_dl_bins[1][0], lowres_dl_bins[2][0], lowres_dl_bins[3][0], lowres_dl_bins[4][0], lowres_dl_bins[5][0])) index_key = '%s %s %s %s'%(rn1,rn2,rn3,rn4) try: p_lo_hash[index_key] except KeyError: p_lo_hash[index_key] = [{'lowstr':lowres_dlstrings, 'betas':bet_distance_list, 'hilist':hires_distances}] else: p_lo_hash[index_key].append({'lowstr':lowres_dlstrings, 'betas':bet_distance_list, 'hilist':hires_distances}) keys = p_lo_hash.keys() keys.sort() good_count = 0 print '%s combinations'%(combinations) print '%s keys'%(len(keys)) sum_alpha1 = 0.0 sum_beta1 = 0.0 sum_alpha2 = 0.0 sum_beta2 = 0.0 cnt_alpha = 0.0 values = [] distance_count = 0 for key in keys: if len(p_lo_hash[key]) == 3: bail = 0 for s1 in range(len(p_lo_hash[key][0]['lowstr'])): for s2 in range(len(p_lo_hash[key][1]['lowstr'])): for s3 in range(len(p_lo_hash[key][2]['lowstr'])): if p_lo_hash[key][0]['lowstr'][s1] == p_lo_hash[key][1]['lowstr'][s2]: if p_lo_hash[key][1]['lowstr'][s2] == p_lo_hash[key][2]['lowstr'][s3]: dist1, dist2, dist3, dist4 = 0.0, 0.0, 0.0, 0.0 # accumulate the squared distance for d_ind in range(len(p_lo_hash[key][0]['hilist'])): d1 = (p_lo_hash[key][0]['hilist'][d_ind] - p_lo_hash[key][1]['hilist'][d_ind])**2 d2 = (p_lo_hash[key][0]['hilist'][d_ind] - p_lo_hash[key][2]['hilist'][d_ind])**2 d3 = (p_lo_hash[key][0]['betas'][d_ind] - p_lo_hash[key][1]['betas'][d_ind])**2 d4 = (p_lo_hash[key][0]['betas'][d_ind] - p_lo_hash[key][2]['betas'][d_ind])**2 dist1 += d1 dist2 += d2 dist3 += d3 dist4 += d4 distance_count += 1 ln = len(p_lo_hash[key][0]['hilist']) dist1,dist2,dist3,dist4 = math.sqrt(dist1/ln), math.sqrt(dist2/ln), math.sqrt(dist3/ln), math.sqrt(dist4/ln) values.append([dist1, dist2, dist3, dist4]) sum_alpha1 += dist1 sum_alpha2 += dist2 sum_beta1 += dist3 sum_beta2 += dist4 bail = 1 print '\n*\n', print '1 alpha %4.2f, beta %4.2f'%(dist1, dist3) print '2 alpha %4.2f, beta %4.2f'%(dist2, dist4) good_count += 1.0 break if bail: break if bail: break key_tokens = string.split(key) key_tokens[0] = int(key_tokens[0]) key_tokens[1] = int(key_tokens[1]) key_tokens[2] = int(key_tokens[2]) key_tokens[3] = int(key_tokens[3]) print '%s\n[[%3s,%3s,%3s,%3s], [%3s,%3s,%3s,%3s], [%3s,%3s,%3s,%3s]]'%(p_lo_hash[key][0]['lowstr'], x.res_list[key_tokens[0]],x.res_list[key_tokens[1]],x.res_list[key_tokens[2]],x.res_list[key_tokens[3]],y.res_list[key_tokens[0]],y.res_list[key_tokens[1]],y.res_list[key_tokens[2]],y.res_list[key_tokens[3]],z.res_list[key_tokens[0]],z.res_list[key_tokens[1]],z.res_list[key_tokens[2]],z.res_list[key_tokens[3]]) # calculate the standard deviation of the different core analogies sum = [0.0, 0.0, 0.0, 0.0] for value in values: sum[0] += (value[0] - (sum_alpha1/good_count))**2 sum[1] += (value[1] - (sum_alpha2/good_count))**2 sum[2] += (value[2] - (sum_beta1/good_count))**2 sum[3] += (value[3] - (sum_beta2/good_count))**2 for i in range(len(sum)): sum[i] /= (len(values)-1.0) for i in range(len(sum)): sum[i] = math.sqrt(sum[i]) print '%s of %s good (%s)'%(good_count, len(keys), good_count/(len(keys)+0.0)) print 'averages - a1 %4.2f a2 %4.2f b1 %4.2f b2 %4.2f'%(sum_alpha1/good_count, sum_alpha2/good_count, sum_beta1/good_count, sum_beta2/good_count) print 'deviatio - %4.2f %4.2f %4.2f %4.2f'%(sum[0], sum[1], sum[2], sum[3])
python
from ajenti.api import * from ajenti.plugins.main.api import SectionPlugin from ajenti.ui import on from ajenti.ui.binder import Binder from reconfigure.configs import ResolvConfig from reconfigure.items.resolv import ItemData @plugin class Resolv (SectionPlugin): def init(self): self.title = _('Nameservers') self.icon = 'globe' self.category = _('System') self.append(self.ui.inflate('resolv:main')) self.find('name-box').labels = [_('DNS nameserver'), _('Local domain name'), _('Search list'), _('Sort list'), _('Options')] self.find('name-box').values = ['nameserver', 'domain', 'search', 'sortlist', 'options'] self.config = ResolvConfig(path='/etc/resolv.conf') self.binder = Binder(None, self.find('resolv-config')) self.find('items').new_item = lambda c: ItemData() def on_page_load(self): self.config.load() self.binder.setup(self.config.tree).populate() @on('save', 'click') def save(self): self.binder.update() self.config.save()
python
import pandas as pd from pandas import ExcelWriter counties_numbers_to_names = { 3: "Santa Clara", 4: "Alameda", 5: "Contra Costa", 2: "San Mateo", 8: "Sonoma", 1: "San Francisco", 6: "Solano", 9: "Marin", 7: "Napa" } counties_map = pd.read_csv("data/taz_geography.csv", index_col="zone").\ county.map(counties_numbers_to_names) writer = ExcelWriter('county_output.xlsx') parcels_to_counties = pd.HDFStore("data/2015_09_01_bayarea_v3.h5", "r").\ parcels.zone_id.map(counties_map) for run in range(1308, 1312): df = pd.read_csv("http://urbanforecast.com/runs/"\ "run%d_parcel_output.csv" % run) df["county"] = df.parcel_id.map(parcels_to_counties) growthinpdas = df[(df.building_type_id <= 3) & (df.pda.notnull())].\ groupby("county").net_units.sum() growthnotinpdas = df[(df.building_type_id <= 3) & (df.pda.isnull())].\ groupby("county").net_units.sum() pctgrowthinpdas = growthinpdas / (growthnotinpdas+growthinpdas) print pctgrowthinpdas baseyear = pd.read_csv("output/baseyear_taz_summaries_2010.csv") baseyear["county"] = baseyear.zone_id.map(counties_map) outyear = pd.read_csv("http://urbanforecast.com/runs/"\ "run%d_taz_summaries_2040.csv" % run) outyear["county"] = outyear.zone_id.map(counties_map) hhpctgrowth = outyear.groupby("county").TOTPOP.sum() / \ baseyear.groupby("county").TOTPOP.sum() - 1 s = outyear.groupby("county").TOTPOP.sum() - \ baseyear.groupby("county").TOTPOP.sum() hhgrowthshare = s / s.sum() emppctgrowth = outyear.groupby("county").TOTEMP.sum() / \ baseyear.groupby("county").TOTEMP.sum() - 1 s = outyear.groupby("county").TOTEMP.sum() - \ baseyear.groupby("county").TOTEMP.sum() empgrowthshare = s / s.sum() growthinunits = outyear.eval("SFDU + MFDU").groupby(outyear.county).sum() - \ baseyear.eval("SFDU + MFDU").groupby(baseyear.county).sum() growthinmultifamily = outyear.groupby(outyear.county).MFDU.sum() - \ baseyear.groupby(baseyear.county).MFDU.sum() pct_multifamily_growth = growthinmultifamily / growthinunits df = pd.DataFrame({ "pct_growth_in_pdas": pctgrowthinpdas, "hh_pct_growth": hhpctgrowth, "hh_growth_share": hhgrowthshare, "emp_pct_growth": emppctgrowth, "emp_growth_share": empgrowthshare, "growth_in_units": growthinunits.astype('int'), "pct_multifamily_growth": pct_multifamily_growth.clip(upper=1.0) }) df.index.name = None df.to_excel(writer, 'run%d' % run, float_format="%.2f")
python
#|============================================================================= #| #| FILE: ports.py [Python module source code] #| #| SYNOPSIS: #| #| The purpose of this module is simply to define #| some easy-to-remember constants naming the port #| numbers used by this application. #| #| SYSTEM CONTEXT: #| #| This file is part of the central server #| application for the COSMICi project. #| #|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv # Names exported from this package. __all__ = [ 'COSMO_PORT', # Global constant port numbers. 'LASER_PORT', 'MESON_PORT', 'DISCO_PORT' ] # Global declaration. global COSMO_PORT, LASER_PORT, MESON_PORT #|=========================================================== #| Port numbers. [global constants] #| #| Define some handy global port numbers based on #| easy-to-remember touch-tone mnemonics. #| #vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv #|----------------------------------------------------------------- #| #| COSMO_PORT [global constant] #| #| This is the main port on which we listen #| for the main (initial) connection from #| each remote node in the local sensor net. #| We process server commands sent to it. #| #|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv COSMO_PORT = 26766 #|------------------------------------------------------------------- #| #| LASER_PORT [global constant] #| #| We listen at this port number (and subsequent #| ones) for the AUXIO (STDIO replacement) stream #| from each remote node (used for diagnostics & #| user interaction with the remote command #| processor). This is the base port number (for #| node #0), the node number gets added to it to #| find the port number to be used by other nodes. #| #|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv LASER_PORT = 52737 # Use this port and subsequent ones for bridged AUXIO connections to the UWscript. #|------------------------------------------------------------------- #| #| MESON_PORT [global constant] #| #| We listen at this port number (and subsequent #| ones) for the bridged UART data stream from #| each remote node. This is the base port number #| (for node #0), the node number gets added to it #| to find the port number for other nodes. #| #|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv MESON_PORT = 63766 # Use this port and subsequent ones for bridged UART connections to the digitizer boards. DISCO_PORT = 34726 # Use this port for server IP address discovery. #|^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ #| END FILE: ports.py #|----------------------------------------------------------------------
python
from __future__ import annotations import skia from core.base import View, Rect from views.enums import Alignment, Justify class HBox(View): def __init__(self): super(HBox, self).__init__() self._alignment = Alignment.BEGIN self._justify = Justify.BEGIN self._spacing = 0 self._height = None self._width = None self._wrap = False self._grow = {} self._view_width = 0 self._view_height = 0 def _lay_out_items( self, canvas: skia.Canvas, x: float, y: float, width: float, height: float, draw: bool = False ) -> None: content_x = self._spacing max_height = 0 rows = [] row = [] view_width = self._width or width if view_width: view_width -= self._left_padding + self._right_padding for item in self._children: bounding_rect = item.get_bounding_rect() max_height = max(max_height, bounding_rect.height) if self._wrap and view_width and content_x + self._spacing + bounding_rect.width > view_width: rows.append({ 'row': row, 'row_items_width': content_x, }) row = [] content_x = self._spacing row.append({ 'width': bounding_rect.width, 'height': bounding_rect.height, 'item': item, }) content_x += bounding_rect.width + self._spacing if row: rows.append({ 'row': row, 'row_items_width': content_x, }) content_x = self._spacing content_y = self._spacing for row_info in rows: row = row_info['row'] leftover_width = view_width - row_info['row_items_width'] for idx, item_info in enumerate(row): item = item_info['item'] item_width = item_info['width'] item_height = item_info['height'] if self._justify == Justify.END and idx == 0: content_x += leftover_width if self._justify == Justify.SPACE_AROUND: content_x += leftover_width / (len(row) + 1) if self._justify == Justify.SPACE_BETWEEN and idx != 0: content_x += leftover_width / (len(row) - 1) if draw: if self._alignment == Alignment.BEGIN: item.draw(canvas, x + content_x, y + content_y, width, height) elif self._alignment == Alignment.END: item.draw(canvas, x + content_x, y + content_y + (max_height - item_height), width, height) elif self._alignment == Alignment.CENTER: item.draw(canvas, x + content_x, y + content_y + (max_height - item_height) / 2, width, height) if self._justify == Justify.SPACE_AROUND and idx == len(row) - 1: content_x += leftover_width / (len(row) + 1) content_x += item_width + self._spacing self._view_width = max(self._view_width, content_x) content_y += max_height + self._spacing self._view_height = content_y content_x = self._spacing def draw(self, canvas: skia.Canvas, x: float, y: float, width: float, height: float) -> None: x += self._x + (self._left_padding or 0) + (self._left_margin or 0) y += self._y + (self._top_padding or 0) + (self._top_margin or 0) self._lay_out_items( canvas, x, y, width - (self._left_padding or 0) - (self._right_padding or 0), height - (self._top_padding or 0) - (self._bottom_padding or 0), draw=True, ) def get_bounding_rect(self) -> Rect: width = self._width height = self._height if height is None or width is None: self._lay_out_items(None, 0, 0, 640, 480) height = height or self._view_height width = width or self._view_width return Rect( x=0, y=0, width=self._left_margin + width + self._right_margin, height=self._top_margin + height + self._bottom_margin, ) def alignment(self, alignment) -> HBox: self._alignment = alignment return self def justify(self, justify) -> HBox: self._justify = justify return self def spacing(self, spacing: float) -> HBox: self._spacing = spacing return self def width(self, width: float) -> HBox: self._width = width return self def height(self, height: float) -> HBox: self._height = height return self def wrap(self, wrap: bool = False) -> HBox: self._wrap = wrap return self def grow(self, view: View, priority: int) -> HBox: self._grow[view] = priority return self
python
# -*- coding: utf-8 -*- from gevent import monkey, event monkey.patch_all() import uuid import unittest import datetime import requests_mock from gevent.queue import Queue from gevent.hub import LoopExit from time import sleep from mock import patch, MagicMock from openprocurement.bot.identification.client import DocServiceClient from openprocurement.bot.identification.databridge.upload_file_to_doc_service import UploadFileToDocService from openprocurement.bot.identification.databridge.utils import generate_doc_id, item_key from openprocurement.bot.identification.databridge.process_tracker import ProcessTracker from openprocurement.bot.identification.databridge.data import Data from openprocurement.bot.identification.tests.utils import custom_sleep, generate_answers, AlmostAlwaysFalse from openprocurement.bot.identification.databridge.constants import file_name, DOC_TYPE from openprocurement.bot.identification.databridge.sleep_change_value import APIRateController class TestUploadFileWorker(unittest.TestCase): __test__ = True def setUp(self): self.tender_id = uuid.uuid4().hex self.award_id = uuid.uuid4().hex self.qualification_id = uuid.uuid4().hex self.document_id = generate_doc_id() self.process_tracker = ProcessTracker(db=MagicMock()) self.process_tracker.set_item(self.tender_id, self.award_id, 1) self.upload_to_doc_service_queue = Queue(10) self.upload_to_tender_queue = Queue(10) self.sleep_change_value = APIRateController() self.sna = event.Event() self.sna.set() self.data = Data(self.tender_id, self.award_id, '123', 'awards', {'meta': {'id': self.document_id}, 'test_data': 'test_data'}) self.qualification_data = Data(self.tender_id, self.qualification_id, '123', 'qualifications', {'meta': {'id': self.document_id}, 'test_data': 'test_data'}) self.doc_service_client = DocServiceClient(host='127.0.0.1', port='80', user='', password='') self.worker = UploadFileToDocService(self.upload_to_doc_service_queue, self.upload_to_tender_queue, self.process_tracker, self.doc_service_client, self.sna, self.sleep_change_value) self.url = '{url}'.format(url=self.doc_service_client.url) @staticmethod def stat_200(): return {'data': {'url': 'http://docs-sandbox.openprocurement.org/get/8ccbfde0c6804143b119d9168452cb6f', 'format': 'application/yaml', 'hash': 'md5:9a0364b9e99bb480dd25e1f0284c8555', 'title': file_name}} @staticmethod def get_tender(): return {'data': {'id': uuid.uuid4().hex, 'documentOf': 'tender', 'documentType': DOC_TYPE, 'url': 'url'}} def tearDown(self): del self.worker def is_working(self, worker): return self.upload_to_doc_service_queue.qsize() or worker.retry_upload_to_doc_service_queue.qsize() def shutdown_when_done(self, worker): worker.start() while self.is_working(worker): sleep(0.1) worker.shutdown() def test_init(self): worker = UploadFileToDocService.spawn(None, None, None, None, self.sna, None) self.assertGreater(datetime.datetime.now().isoformat(), worker.start_time.isoformat()) self.assertEqual(worker.upload_to_doc_service_queue, None) self.assertEqual(worker.upload_to_tender_queue, None) self.assertEqual(worker.process_tracker, None) self.assertEqual(worker.doc_service_client, None) self.assertEqual(worker.services_not_available, self.sna) self.assertEqual(worker.sleep_change_value, None) self.assertEqual(worker.delay, 15) self.assertEqual(worker.exit, False) worker.shutdown() self.assertEqual(worker.exit, True) del worker @requests_mock.Mocker() @patch('gevent.sleep') def test_successful_upload(self, mrequest, gevent_sleep): gevent_sleep.side_effect = custom_sleep mrequest.post(self.url, json=self.stat_200(), status_code=200) self.upload_to_doc_service_queue.put(self.data) self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)]) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 1) self.shutdown_when_done(self.worker) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty') self.assertEqual(self.upload_to_tender_queue.qsize(), 1, 'Queue should be have 1 element') self.assertEqual(mrequest.call_count, 1) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)]) @requests_mock.Mocker() @patch('gevent.sleep') def test_retry_doc_service(self, mrequest, gevent_sleep): gevent_sleep.side_effect = custom_sleep doc_service_client = DocServiceClient(host='127.0.0.1', port='80', user='', password='') mrequest.post(self.url, [{'text': '', 'status_code': 401} for _ in range(6)] + [ {'json': {'data': {'url': 'test url', 'format': 'application/yaml', 'hash': 'md5:9a0364b9e99bb480dd25e1f0284c8555', 'title': file_name}}, 'status_code': 200}]) self.upload_to_doc_service_queue.put(self.data) self.assertItemsEqual(self.process_tracker.processing_items.keys(), [item_key(self.tender_id, self.award_id)]) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 1) self.shutdown_when_done(self.worker) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty') self.assertEqual(self.upload_to_tender_queue.qsize(), 1, 'Queue should be have 1 element') self.assertEqual(mrequest.call_count, 7) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) @requests_mock.Mocker() @patch('gevent.sleep') def test_request_failed(self, mrequest, gevent_sleep): gevent_sleep.side_effect = custom_sleep mrequest.post(self.url, json=self.stat_200(), status_code=200) self.upload_to_doc_service_queue.put(self.data) self.shutdown_when_done(self.worker) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty') self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 1}) self.assertEqual(mrequest.call_count, 1) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) @requests_mock.Mocker() @patch('gevent.sleep') def test_request_failed_item_status_change(self, mrequest, gevent_sleep): gevent_sleep.side_effect = custom_sleep mrequest.post(self.url, json=self.stat_200(), status_code=200) self.process_tracker.set_item(self.tender_id, self.qualification_id, 1) self.upload_to_doc_service_queue.put(self.data) self.upload_to_doc_service_queue.put(self.qualification_data) self.shutdown_when_done(self.worker) self.assertEqual(self.upload_to_doc_service_queue.qsize(), 0, 'Queue should be empty') self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertEqual(self.upload_to_tender_queue.get(), self.qualification_data) self.assertEqual(mrequest.call_count, 2) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 1, item_key(self.tender_id, self.qualification_id): 1}) @requests_mock.Mocker() @patch('gevent.sleep') def test_processing_items(self, mrequest, gevent_sleep): gevent_sleep.side_effect = custom_sleep mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)]) self.process_tracker.set_item(self.tender_id, self.award_id, 2) self.upload_to_doc_service_queue.put(self.data) self.upload_to_doc_service_queue.put(self.data) self.shutdown_when_done(self.worker) self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) @requests_mock.Mocker() @patch('gevent.sleep') def test_upload_to_doc_service_queue_loop_exit(self, mrequest, gevent_sleep): """ Test LoopExit for upload_to_doc_service_queue """ gevent_sleep.side_effect = custom_sleep self.process_tracker.set_item(self.tender_id, self.award_id, 2) self.worker.upload_to_doc_service_queue = MagicMock() self.worker.upload_to_doc_service_queue.peek.side_effect = generate_answers( answers=[LoopExit(), self.data, self.data], default=LoopExit()) mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)]) self.worker.start() sleep(1) self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) self.assertIsNotNone(mrequest.request_history[1].headers['X-Client-Request-ID']) self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 2}) @requests_mock.Mocker() @patch('gevent.sleep') def test_retry_upload_to_doc_service_queue_loop_exit(self, mrequest, gevent_sleep): """ Test LoopExit for retry_upload_to_doc_service_queue """ gevent_sleep.side_effect = custom_sleep mrequest.post(self.url, [{'json': self.stat_200(), 'status_code': 200} for _ in range(2)]) self.process_tracker.set_item(self.tender_id, self.award_id, 2) self.worker.retry_upload_to_doc_service_queue = MagicMock() self.worker.retry_upload_to_doc_service_queue.peek.side_effect = generate_answers( answers=[LoopExit(), self.data, self.data], default=LoopExit()) self.worker.start() sleep(1) self.worker.shutdown() self.assertEqual(self.upload_to_tender_queue.get(), self.data) self.assertEqual(self.process_tracker.processing_items, {item_key(self.tender_id, self.award_id): 2}) self.assertEqual(mrequest.request_history[0].url, u'127.0.0.1:80/upload') self.assertIsNotNone(mrequest.request_history[0].headers['X-Client-Request-ID']) def test_remove_bad_data(self): self.worker.upload_to_doc_service_queue = MagicMock(get=MagicMock()) self.worker.process_tracker = MagicMock(update_items_and_tender=MagicMock()) self.worker.remove_bad_data(self.data, Exception("test message"), False) self.worker.upload_to_doc_service_queue.get.assert_called_once() self.assertEqual(self.worker.retry_upload_to_doc_service_queue.get(), self.data) def test_remove_bad_data_retry(self): self.worker.retry_upload_to_doc_service_queue = MagicMock(get=MagicMock()) self.worker.process_tracker = MagicMock(update_items_and_tender=MagicMock()) with self.assertRaises(Exception): self.worker.remove_bad_data(self.data, Exception("test message"), True) self.worker.retry_upload_to_doc_service_queue.get.assert_called_once() self.worker.process_tracker.update_items_and_tender.assert_called_with(self.data.tender_id, self.data.item_id, self.document_id) def test_try_upload_to_doc_service(self): e = Exception("test error") self.worker.update_headers_and_upload = MagicMock(side_effect=e) self.worker.remove_bad_data = MagicMock() self.worker.try_upload_to_doc_service(self.data, False) self.worker.update_headers_and_upload.assert_called_once() self.worker.remove_bad_data.assert_called_once_with(self.data, e, False) def test_try_upload_to_doc_service_retry(self): e = Exception("test error") self.worker.update_headers_and_upload = MagicMock(side_effect=e) self.worker.remove_bad_data = MagicMock() self.worker.try_upload_to_doc_service(self.data, True) self.worker.update_headers_and_upload.assert_called_once() self.worker.remove_bad_data.assert_called_with(self.data, e, True) def test_run(self): self.worker.delay = 1 upload_worker, retry_upload_worker = MagicMock(), MagicMock() self.worker.upload_worker = upload_worker self.worker.retry_upload_worker = retry_upload_worker with patch.object(self.worker, 'exit', AlmostAlwaysFalse()): self.worker._run() self.assertEqual(self.worker.upload_worker.call_count, 1) self.assertEqual(self.worker.retry_upload_worker.call_count, 1) @patch('gevent.killall') def test_run_exception(self, killlall): self.worker.delay = 1 self.worker._start_jobs = MagicMock(return_value={"a": 1}) self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception("test error")) self.worker._run() killlall.assert_called_once_with([1], timeout=5) @patch('gevent.killall') @patch('gevent.sleep') def test_run_exception(self, gevent_sleep, killlall): gevent_sleep.side_effect = custom_sleep self.worker._start_jobs = MagicMock(return_value={"a": 1}) self.worker.check_and_revive_jobs = MagicMock(side_effect=Exception("test error")) self.worker._run() killlall.assert_called_once_with([1], timeout=5)
python
''' 该模块是控制流实例。 控制流语句如下: if while for break continue ''' def guessnumber(): '''猜数字游戏''' number = 23 running = True while running: guess = int(input('猜整数:')) if guess == number: print('恭喜您,猜中啦!') running = False elif guess < number: print('No,小啦') else: print('No, 大啦') else: print('猜数字结束!') guessnumber() print('游戏结束')
python
import pytest from ipypublish.filters_pandoc.utils import apply_filter from ipypublish.filters_pandoc import prepare_labels from ipypublish.filters_pandoc import format_label_elements def test_math_span_latex(): in_json = {"blocks": [{"t": "Para", "c": [ {"t": "Span", "c": [ ["a", ["labelled-Math"], [["b", "2"]]], [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} ]}], "pandoc-api-version": [1, 17, 5, 1], "meta": { "$$references": {"t": "MetaMap", "c": { "a": {"t": "MetaMap", "c": { "type": {"t": "MetaString", "c": "Math"}, "number": {"t": "MetaString", "c": "1"}}}}}}} out_string = apply_filter( in_json, format_label_elements.main, "latex", in_format="json") assert out_string.strip() == "\n".join([ r"\begin{equation}a=1\label{a}\end{equation}" ]) def test_math_span_rst(): in_json = {"blocks": [{"t": "Para", "c": [ {"t": "Span", "c": [ ["a", ["labelled-Math"], [["b", "2"]]], [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} ]}], "pandoc-api-version": [1, 17, 5, 1], "meta": { "$$references": {"t": "MetaMap", "c": { "a": {"t": "MetaMap", "c": { "type": {"t": "MetaString", "c": "Math"}, "number": {"t": "MetaString", "c": "1"}}}}}}} out_string = apply_filter( in_json, format_label_elements.main, "rst", in_format="json") assert out_string.strip() == "\n".join([ ".. math::", " :nowrap:", " :label: a", "", r" \begin{equation}a=1\end{equation}" ]) @pytest.mark.skip( reason="there's an issue with pandoc outputting unicode in '/em> = 1'") def test_math_span_html(): in_json = {"blocks": [{"t": "Para", "c": [ {"t": "Span", "c": [ ["a", ["labelled-Math"], [["b", "2"]]], [{"t": "Math", "c": [{"t": "DisplayMath"}, "a=1"]}]]} ]}], "pandoc-api-version": [1, 17, 5, 1], "meta": { "$$references": {"t": "MetaMap", "c": { "a": {"t": "MetaMap", "c": { "type": {"t": "MetaString", "c": "Math"}, "number": {"t": "MetaString", "c": "1"}}}}}}} out_string = apply_filter( in_json, format_label_elements.main, "html", in_format="json") assert out_string.strip() == "\n".join([ '<p><a id="a" class="anchor-link" name="#a">' '<br />' '<span class="math display"><em>a</em> = 1</span>' '<br />' '</a></p>' ]) def test_math_md_to_rst(): in_str = [ "$$a = b$$ {#eq:id1}", "$$c &= d \\\\ other &= e$$ {#a env=align .unnumbered}" ] out_string = apply_filter( in_str, [prepare_labels.main, format_label_elements.main], in_format="markdown", out_format="rst") assert out_string.strip() == "\n".join([ ".. math::", " :nowrap:", " :label: eq:id1", "", r" \begin{equation}a = b\end{equation}", "", "", "", ".. math::", " :nowrap:", " :label: a", "", r" \begin{align*}c &= d \\ other &= e\end{align*}" ]) def test_image_html(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" in_json = ( {"blocks": [ {"t": "Para", "c": [ {"t": "Image", "c": [ ["label1", ["class-name"], [["a", "5"]]], [{"t": "Str", "c": "a"}, {"t": "Space"}, {"t": "Str", "c": "title"}], ["path/to/image.png", "fig:"]]}]}], "pandoc-api-version": [1, 17, 5, 1], "meta": {}} ) out_string = apply_filter( in_json, format_label_elements.main, "html", in_format="json") assert out_string.strip() == "\n".join([ '<p><a id="label1" class="anchor-link" name="#label1">' '<img src="path/to/image.png" title="fig:" alt="a title" id="label1" ' 'class="class-name" data-a="5" />' '</a></p>' ]) def test_image_rst(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" in_json = ( {"blocks": [ {"t": "Para", "c": [ {"t": "Image", "c": [ ["label1", ["class-name"], [["a", "5"]]], [{"t": "Str", "c": "a"}, {"t": "Space"}, {"t": "Str", "c": "title"}], ["path/to/image.png", "fig:"]]}]}], "pandoc-api-version": [1, 17, 5, 1], "meta": {}} ) out_string = apply_filter( in_json, format_label_elements.main, "rst", in_format="json") assert out_string.strip() == "\n".join([ ".. figure:: path/to/image.png", " :alt: a title", " :figclass: class-name", " :name: label1", "", " a title" ]) def test_image_latex(): """ """ # "![a title](path/to/image.png){#label1 .class-name a=5}" in_json = ( {"blocks": [ {"t": "Para", "c": [ {"t": "Image", "c": [ ["label1", ["class-name"], [["a", "5"]]], [{"t": "Str", "c": "a"}, {"t": "Space"}, {"t": "Str", "c": "title"}], ["path/to/image.png", "fig:"]]}]}], "pandoc-api-version": [1, 17, 5, 1], "meta": {}} ) out_string = apply_filter( in_json, format_label_elements.main, "latex", in_format="json") assert out_string.strip() == "\n".join([ r"\begin{figure}[]", r"\hypertarget{label1}{%", r"\begin{center}", r"\adjustimage{max size={0.9\linewidth}{0.9\paperheight},}" r"{path/to/image.png}", r"\end{center}", r"\caption{a title}\label{label1}", "}", r"\end{figure}" ]) def test_table_html(): """ Some text a b - - 1 2 4 5 Table: Caption. {#tbl:id} """ in_json = ( { "pandoc-api-version": [1, 17, 5, 1], "meta": { "$$references": {"t": "MetaMap", "c": { "tbl:id": {"t": "MetaMap", "c": { "type": {"t": "MetaString", "c": "Table"}, "number": {"t": "MetaString", "c": "1"}}}}}}, "blocks": [{"t": "Para", "c": [ {"t": "Str", "c": "Some"}, {"t": "Space"}, {"t": "Str", "c": "text"}]}, {"t": "Div", "c": [ ["tbl:id", ["labelled-Table"], []], [{"t": "Table", "c": [ [{"t": "Str", "c": "Caption."}, {"t": "Space"}], [{"t": "AlignDefault"}, {"t": "AlignDefault"}], [0, 0], [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]], [[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]], [[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}] ]]]}]]}]} ) out_string = apply_filter( in_json, format_label_elements.main, "html", in_format="json") assert out_string.strip() == "\n".join([ '<p>Some text</p>', '<a id="tbl:id" class="anchor-link" name="#tbl:id">', '<table>', '<caption>Caption. </caption>', '<thead>', '<tr class="header">', '<th>a</th>', '<th>b</th>', '</tr>', '</thead>', '<tbody>', '<tr class="odd">', '<td>1</td>', '<td>2</td>', '</tr>', '<tr class="even">', '<td>4</td>', '<td>5</td>', '</tr>', '</tbody>', '</table>', '</a>']) def test_table_rst(): """ Some text a b - - 1 2 4 5 Table: Caption. {#tbl:id} """ in_json = ( { "pandoc-api-version": [1, 17, 5, 1], "meta": { "$$references": {"t": "MetaMap", "c": { "tbl:id": {"t": "MetaMap", "c": { "type": {"t": "MetaString", "c": "Table"}, "number": {"t": "MetaString", "c": "1"}}}}}}, "blocks": [{"t": "Para", "c": [ {"t": "Str", "c": "Some"}, {"t": "Space"}, {"t": "Str", "c": "text"}]}, {"t": "Div", "c": [ ["tbl:id", ["labelled-Table"], []], [{"t": "Table", "c": [ [{"t": "Str", "c": "Caption."}, {"t": "Space"}], [{"t": "AlignDefault"}, {"t": "AlignDefault"}], [0, 0], [[{"t": "Plain", "c": [{"t": "Str", "c": "a"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "b"}]}]], [[[{"t": "Plain", "c": [{"t": "Str", "c": "1"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "2"}]}]], [[{"t": "Plain", "c": [{"t": "Str", "c": "4"}]}], [{"t": "Plain", "c": [{"t": "Str", "c": "5"}]}] ]]]}]]}]} ) out_string = apply_filter( in_json, format_label_elements.main, "rst", in_format="json") assert out_string.strip().splitlines()[0:3] == [ 'Some text', '', '.. _`tbl:id`:' ]
python
from lxml import etree import glob class Plugin: """Class that defines a plugin with : - his name - his description - his version - his state...""" def __init__(self, file, name, desc, version, state): self.file = file self.name = name self.desc = desc self.version = version self.state = state def CreatePlugin(p, xml): """Function that loads the plugin.""" tree = etree.parse(xml) root = tree.getroot() file = p name = root[0].text desc = root[1].text version = root[2].text state = str2bool(root[3].text) plugin = Plugin(file, name, desc, version, state) return plugin def LoadPlugins(): """Function that loads the plugin directory and create plugin objects.""" plugs = glob.glob("plugins/*.py") plugins = [] for p in plugs: p = p.replace(".py","") p = p.replace("plugins\\","") if p == "__init__": pass if p == "PluginLoader": pass else: xml = "plugins/{p}.xml".format(p=p) try: plg = CreatePlugin(p, xml) plugins.append(plg) except: pass return plugins def str2bool(v): return v.lower() in ("yes", "true", "t", "1", "oui", "vrai", "activé", "active", "on", "enable", "enabled")
python
# type: ignore import os import signal import sys import time def signal_handler(sig, frame): print("You pressed Ctrl+C!") time.sleep(1) with open( os.path.join( os.path.dirname(os.path.dirname(__file__)), "tests", "signal_gracefully_terminated", ), "w", ) as f: f.write("blah") sys.exit(0) signal.signal(signal.SIGINT, signal_handler) print("Press Ctrl+C") signal.pause()
python
import LagInput import os def readInput(filename): # INPUT: string filename # OUTPUT: LagInput lagin # This function reads from the input file and output the LagInput type lagin containing all the input values os.chdir("../input") fid = open(filename,"r") for line in fid.readlines(): # Line Parsed lp = line.split(); if lp[1] == 'None': print("Invalid input. Using default values") IniPos = 0.0 IniVel = 0.0 IniTemp = 0.0 DampCoef = 0.0 dt = 1 ttot = 10 break if not not lp: # Handle empty strings. Python is weird... if lp[0] == "initial_position": IniPos = float(lp[1]) elif lp[0] == "initial_velocity": IniVel = float(lp[1]) elif lp[0] == "temperature": IniTemp = float(lp[1]) elif lp[0] == "damping_coefficient": DampCoef = float(lp[1]); elif lp[0] == "time_step": dt = float(lp[1]) elif lp[0] == "total_time": ttot = float(lp[1]); #ttot is actually total time step which is equal to total_time/dt ttot = int(ttot/dt) os.chdir("../src") # print(IniPos) # print(IniVel) # print(IniTemp) # print(DampCoef) # print(dt) # print(ttot) laginput = LagInput.get_LagInput(IniPos, IniVel, IniTemp, DampCoef, dt, ttot) return laginput def writeInput(args,filename): if not os.path.isdir("../input"): os.mkdir("../input") os.chdir("../input") fidin = open(filename,"w") for arg in vars(args): line = arg, getattr(args, arg) fidin.write("%s %s\n"%(arg, getattr(args,arg))) os.chdir("../src")
python
import petsc4py import sys petsc4py.init(sys.argv) from petsc4py import PETSc import numpy as np import MatrixOperations as MO class BaseMyPC(object): def setup(self, pc): pass def reset(self, pc): pass def apply(self, pc, x, y): raise NotImplementedError def applyT(self, pc, x, y): self.apply(pc, x, y) def applyS(self, pc, x, y): self.apply(pc, x, y) def applySL(self, pc, x, y): self.applyS(pc, x, y) def applySR(self, pc, x, y): self.applyS(pc, x, y) def applyRich(self, pc, x, y, w, tols): self.apply(pc, x, y) class Direct(BaseMyPC): def __init__(self, W, A): print 333 self.W = W self.A = A IS = MO.IndexSet(W) self.u_is = IS[0] self.p_is = IS[1] def create(self, pc): self.diag = None kspL = PETSc.KSP() kspL.create(comm=PETSc.COMM_WORLD) pc = kspL.getPC() kspL.setType('preonly') pc.setType('lu') OptDB = PETSc.Options() # OptDB['pc_factor_shift_amount'] = 1 OptDB['pc_factor_mat_ordering_type'] = 'rcm' OptDB['pc_factor_mat_solver_package'] = 'mumps' kspL.setFromOptions() self.kspL = kspL kspM = PETSc.KSP() kspM.create(comm=PETSc.COMM_WORLD) pc = kspM.getPC() kspM.setType('preonly') pc.setType('lu') kspM.setFromOptions() self.kspM = kspM # print kspM.view() def setUp(self, pc): A, P = pc.getOperators() L = A.getSubMatrix(self.u_is,self.u_is) self.kspM.setOperators(self.A,self.A) self.kspL.setOperators(L,L) def apply(self, pc, x, y): # print 1000 # self.kspL.setOperators(self.B) x1 = x.getSubVector(self.u_is) y1 = x1.duplicate() x2 = x.getSubVector(self.p_is) y2 = x2.duplicate() # print 111 self.kspM.solve(x2, y2) self.kspL.solve(x1, y1) y.array = (np.concatenate([y1.array, y2.array])) class Approx(object): def __init__(self, W, A): self.W = W self.A = A IS = MO.IndexSet(W) self.u_is = IS[0] self.p_is = IS[1] def create(self, pc): kspL = PETSc.KSP() kspL.create(comm=PETSc.COMM_WORLD) pcL = kspL.getPC() kspL.setType('preonly') pcL.setType('hypre') # kspL.max_it = 1 kspL.setFromOptions() self.kspL = kspL kspM = PETSc.KSP() kspM.create(comm=PETSc.COMM_WORLD) pcM = kspM.getPC() kspM.setType('preonly') pcM.setType('hypre') kspM.setFromOptions() self.kspM = kspM def setUp(self, pc): A, P = pc.getOperators() L = A.getSubMatrix(self.u_is,self.u_is) M = P.getSubMatrix(self.p_is,self.p_is) self.kspM.setOperators(M,M) self.kspL.setOperators(L,L) def apply(self, pc, x, y): # self.kspL.setOperators(self.B) x1 = x.getSubVector(self.u_is) y1 = x1.duplicate() x2 = x.getSubVector(self.p_is) y2 = x2.duplicate() self.kspL.solve(x1, y1) self.kspM.solve(x2, y2) y.array = (np.concatenate([y1.array, y2.array])) class ApproxSplit(object): def __init__(self, W, A, M): self.W = W self.A = A self.M = M IS = MO.IndexSet(W) self.u_is = IS[0] self.p_is = IS[1] def create(self, pc): self.diag = None kspL = PETSc.KSP() kspL.create(comm=PETSc.COMM_WORLD) pcL = kspL.getPC() kspL.setType('preonly') pcL.setType('ml') # kspL.max_it = 1 kspL.setFromOptions() self.kspL = kspL kspM = PETSc.KSP() kspM.create(comm=PETSc.COMM_WORLD) pcM = kspM.getPC() kspM.setType('cg') pcM.setType('jacobi') kspM.setFromOptions() self.kspM = kspM def setUp(self, pc): self.kspM.setOperators(self.M,self.M) self.kspL.setOperators(self.A,self.A) def apply(self, pc, x, y): # self.kspL.setOperators(self.B) x1 = x.getSubVector(self.u_is) y1 = x1.duplicate() x2 = x.getSubVector(self.p_is) y2 = x2.duplicate() self.kspL.solve(x1, y1) self.kspM.solve(x2, y2) y.array = (np.concatenate([y1.array, y2.array])) class MHDApprox(object): def __init__(self, W, kspA, kspQ): self.W = W self.kspA = kspA self.kspQ = kspQ self.u_is = PETSc.IS().createGeneral(range(W.sub(0).dim())) self.p_is = PETSc.IS().createGeneral(range(W.sub(0).dim(),W.sub(0).dim()+W.sub(1).dim())) def apply(self, pc, x, y): # self.kspL.setOperators(self.B) x1 = x.getSubVector(self.u_is) y1 = x1.duplicate() x2 = x.getSubVector(self.p_is) y2 = x2.duplicate() self.kspQ.solve(x2, y2) self.kspA.solve(x1, y1) y.array = (np.concatenate([y1.array, y2.array])) def ApproxFunc(W, A, x, y): IS = MO.IndexSet(W) u_is = IS[0] p_is = IS[1] diag = None kspL = PETSc.KSP() kspL.create(comm=PETSc.COMM_WORLD) pcL = kspL.getPC() kspL.setType('preonly') pcL.setType('gamg') # kspL.max_it = 1 kspL.setFromOptions() kspM = PETSc.KSP() kspM.create(comm=PETSc.COMM_WORLD) pcM = kspM.getPC() kspM.setType('cg') pcM.setType('jacobi') kspM.setFromOptions() L = A.getSubMatrix(u_is,u_is) M = A.getSubMatrix(p_is,p_is) kspM.setOperators(M,M) kspL.setOperators(L,L) # kspL.setOperators(self.B) x1 = x.getSubVector(u_is) y1 = x1.duplicate() x2 = x.getSubVector(p_is) y2 = x2.duplicate() kspL.solve(x1, y1) kspM.solve(x2, y2) y.array = (np.concatenate([y1.array, y2.array])) def ApproxSplitFunc(W, A, M,x,y): W = W A = A M = M IS = MO.IndexSet(W) u_is = IS[0] p_is = IS[1] diag = None kspL = PETSc.KSP() kspL.create(comm=PETSc.COMM_WORLD) pcL = kspL.getPC() kspL.setType('preonly') pcL.setType('gamg') # kspL.max_it = 1 kspL.setFromOptions() kspM = PETSc.KSP() kspM.create(comm=PETSc.COMM_WORLD) pcM = kspM.getPC() kspM.setType('cg') pcM.setType('jacobi') kspM.setFromOptions() kspM.setOperators(M,M) kspL.setOperators(A,A) x1 = x.getSubVector(u_is) y1 = x1.duplicate() x2 = x.getSubVector(p_is) y2 = x2.duplicate() kspL.solve(x1, y1) kspM.solve(x2, y2) y.array = (np.concatenate([y1.array, y2.array]))
python
"""$ fio distrib""" import json import logging import click import cligj from fiona.fio import helpers, with_context_env @click.command() @cligj.use_rs_opt @click.pass_context @with_context_env def distrib(ctx, use_rs): """Distribute features from a collection. Print the features of GeoJSON objects read from stdin. """ logger = logging.getLogger(__name__) stdin = click.get_text_stream('stdin') try: source = helpers.obj_gen(stdin) for i, obj in enumerate(source): obj_id = obj.get('id', 'collection:' + str(i)) features = obj.get('features') or [obj] for j, feat in enumerate(features): if obj.get('type') == 'FeatureCollection': feat['parent'] = obj_id feat_id = feat.get('id', 'feature:' + str(i)) feat['id'] = feat_id if use_rs: click.echo(u'\u001e', nl=False) click.echo(json.dumps(feat)) except Exception: logger.exception("Exception caught during processing") raise click.Abort()
python
#!/usr/bin/env python import os import base64 from fastapi import FastAPI from fastapi.responses import HTMLResponse from plant_disease_classification_api.models import ClassficationRequestItem from plant_disease_classification_api.ml.plant_disease_classifier import ( PlantDiseaseClassifier, ) app = FastAPI() @app.get("/") def read_root(): html_content = """ <html> <head> <title>Plant Disease Classification API</title> </head> <body> <h1>Welcome to Plant Disease Classification API</h1> <h2><a href="/docs">Documentation</a></h2> </body> </html> """ return HTMLResponse(content=html_content, status_code=200) @app.post("/classify") async def classify(requestItem: ClassficationRequestItem): if len(requestItem.modelName) == 0: return {"error": "Please provide name of model you want to use."} if len(requestItem.data) == 0: return {"error": "Please provide Base64 encoded image data."} dir_path = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(dir_path, "models", requestItem.modelName) if os.path.exists(path): plant_disease_classifier = PlantDiseaseClassifier(model_path=path) image_data = base64.b64decode(requestItem.data) result = plant_disease_classifier.classify(image_data=image_data) return {"result": result} else: return {"error": "ML Model not found!"}
python
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from collections import Counter import operator import re import os import gc import gensim from gensim import corpora from nltk.corpus import stopwords import string from copy import deepcopy from sklearn.manifold import TSNE from sklearn.preprocessing import MinMaxScaler from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer from nltk import word_tokenize, ngrams from sklearn.cross_validation import KFold from sklearn import ensemble from sklearn.metrics import log_loss import seaborn as sns import matplotlib.pyplot as plt from subprocess import check_output get_ipython().magic('matplotlib inline') import plotly.offline as py py.init_notebook_mode(connected=True) import plotly.graph_objs as go import plotly.tools as tls pal = sns.color_palette() color = sns.color_palette() pd.set_option('expand_frame_repr', False) pd.set_option('display.max_colwidth', -1) pd.options.mode.chained_assignment = None # default='warn' words = re.compile(r"\w+",re.I) stopword = stopwords.words('english') #prelim data exploration train = pd.read_csv("train.csv").fillna("") test = pd.read_csv("test.csv").fillna("") train.groupby("is_duplicate")['id'].count().plot.bar() dfs = train[0:2500] dfs.groupby("is_duplicate")['id'].count().plot.bar() dfq1, dfq2 = dfs[['qid1', 'question1']], dfs[['qid2', 'question2']] dfq1.columns = ['qid1', 'question'] dfq2.columns = ['qid2', 'question'] dfqa = pd.concat((dfq1, dfq2), axis=0).fillna("") nrows_for_q1 = dfqa.shape[0]/2 all_ques_df = pd.DataFrame(pd.concat([train['question1'], train['question2']])) all_ques_df.columns = ["questions"] all_ques_df["num_of_words"] = all_ques_df["questions"].apply(lambda x : len(str(x).split())) cnt_srs = all_ques_df['num_of_words'].value_counts() plt.figure(figsize=(12,6)) sns.barplot(cnt_srs.index, cnt_srs.values, alpha=0.8, color=color[0]) plt.ylabel('Number of Occurrences', fontsize=12) plt.xlabel('Number of words in the question', fontsize=12) plt.xticks(rotation='vertical') plt.show() all_ques_df["num_of_chars"] = all_ques_df["questions"].apply(lambda x : len(str(x))) cnt_srs = all_ques_df['num_of_chars'].value_counts() plt.figure(figsize=(50,8)) sns.barplot(cnt_srs.index, cnt_srs.values, alpha=0.8, color=color[3]) plt.ylabel('Number of Occurrences', fontsize=12) plt.xlabel('Number of characters in the question', fontsize=12) plt.xticks(rotation='vertical') plt.show() del all_ques_df train_qs = pd.Series(train['question1'].tolist() + train['question2'].tolist()).astype(str) test_qs = pd.Series(test['question1'].tolist() + test['question2'].tolist()).astype(str) dist_train = train_qs.apply(len) dist_test = test_qs.apply(len) plt.figure(figsize=(15, 10)) plt.hist(dist_train, bins=200, range=[0, 200], color=pal[2], normed=True, label='train') plt.hist(dist_test, bins=200, range=[0, 200], color=pal[1], normed=True, alpha=0.5, label='test') plt.title('Normalised histogram of character count in questions', fontsize=15) plt.legend() plt.xlabel('Number of characters', fontsize=15) plt.ylabel('Probability', fontsize=15) print('mean-train {:.2f} std-train {:.2f} mean-test {:.2f} std-test {:.2f} max-train {:.2f} max-test {:.2f}'.format(dist_train.mean(), dist_train.std(), dist_test.mean(), dist_test.std(), dist_train.max(), dist_test.max())) ########################################## #transform questions with Tf-Tfidf mq1 = TfidfVectorizer().fit_transform(dfqa['question'].values) diff_encodings = mq1[::2] - mq1[1::2] import nltk STOP_WORDS = nltk.corpus.stopwords.words() def clean_sentence(val): regex = re.compile('([^\s\w]|_&*)+') sentence = regex.sub('', val).lower() sentence = sentence.split(" ") for word in list(sentence): if word in STOP_WORDS: sentence.remove(word) sentence = " ".join(sentence) return sentence def clean_trainframe(df): df = df.dropna(how="any") for col in ['question1', 'question2']: df[col] = df[col].apply(clean_sentence) return df def build_corpus(df): corpus = [] for col in ['question1', 'question2']: for sentence in df[col].iteritems(): word_list = sentence[1].split(" ") corpus.append(word_list) return corpus df = clean_trainframe(train) corpus = build_corpus(df) from gensim.models import word2vec model = word2vec.Word2Vec(corpus, size=100, window=20, min_count=200, workers=4) def tsne_plot(model): labels = [] tokens = [] for word in model.wv.vocab: tokens.append(model[word]) labels.append(word) tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23) new_values = tsne_model.fit_transform(tokens) x = [] y = [] for value in new_values: x.append(value[0]) y.append(value[1]) plt.figure(figsize=(16, 16)) for i in range(len(x)): plt.scatter(x[i],y[i]) plt.annotate(labels[i], xy=(x[i], y[i]), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.show() tsne_plot(model) from collections import Counter import matplotlib.pyplot as plt import operator def eda(df): print ("Duplicate Count = %s , Non Duplicate Count = %s" %(df.is_duplicate.value_counts()[1],df.is_duplicate.value_counts()[0])) question_ids_combined = df.qid1.tolist() + df.qid2.tolist() print ("Unique Questions = %s" %(len(np.unique(question_ids_combined)))) question_ids_counter = Counter(question_ids_combined) sorted_question_ids_counter = sorted(question_ids_counter.items(), key=operator.itemgetter(1)) question_appearing_more_than_once = [i for i in question_ids_counter.values() if i > 1] print ("Count of Quesitons appearing more than once = %s" %(len(question_appearing_more_than_once))) eda(train) def eda(df): question_ids_combined = df.qid1.tolist() + df.qid2.tolist() print ("Unique Questions = %s" %(len(np.unique(question_ids_combined)))) question_ids_counter = Counter(question_ids_combined) sorted_question_ids_counter = sorted(question_ids_counter.items(), key=operator.itemgetter(1)) question_appearing_more_than_once = [i for i in question_ids_counter.values() if i > 1] print ("Count of Quesitons appearing more than once = %s" %(len(question_appearing_more_than_once))) eda(test) import re import gensim from gensim import corpora from nltk.corpus import stopwords words = re.compile(r"\w+",re.I) stopword = stopwords.words('english') def tokenize_questions(df): question_1_tokenized = [] question_2_tokenized = [] for q in df.question1.tolist(): question_1_tokenized.append([i.lower() for i in words.findall(q) if i not in stopword]) for q in df.question2.tolist(): question_2_tokenized.append([i.lower() for i in words.findall(q) if i not in stopword]) df["Question_1_tok"] = question_1_tokenized df["Question_2_tok"] = question_2_tokenized return df def train_dictionary(df): questions_tokenized = df.Question_1_tok.tolist() + df.Question_2_tok.tolist() dictionary = corpora.Dictionary(questions_tokenized) dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=10000000) dictionary.compactify() return dictionary df_train = tokenize_questions(train) dictionary = train_dictionary(df_train) print ("No of words in the dictionary = %s" %len(dictionary.token2id)) def get_vectors(df, dictionary): question1_vec = [dictionary.doc2bow(text) for text in df.Question_1_tok.tolist()] question2_vec = [dictionary.doc2bow(text) for text in df.Question_2_tok.tolist()] question1_csc = gensim.matutils.corpus2csc(question1_vec, num_terms=len(dictionary.token2id)) question2_csc = gensim.matutils.corpus2csc(question2_vec, num_terms=len(dictionary.token2id)) return question1_csc.transpose(),question2_csc.transpose() q1_csc, q2_csc = get_vectors(df_train, dictionary) df_test = tokenize_questions(test) dictionary = train_dictionary(df_test) q1_csc, q2_csc = get_vectors(df_test, dictionary) from sklearn.metrics.pairwise import cosine_similarity as cs def get_cosine_similarity(q1_csc, q2_csc): cosine_sim = [] for i,j in zip(q1_csc, q2_csc): sim = cs(i,j) cosine_sim.append(sim[0][0]) return cosine_sim cosine_sim = get_cosine_similarity(q1_csc, q2_csc) from sklearn.ensemble import RandomForestClassifier as RFC from sklearn.svm import SVC from sklearn.ensemble import GradientBoostingClassifier as GBC from sklearn.linear_model import LogisticRegression as LR from sklearn.cross_validation import train_test_split from sklearn.grid_search import GridSearchCV from sklearn.metrics import f1_score, confusion_matrix from sklearn.pipeline import Pipeline np.random.seed(10) def train_rfc(X,y): X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.10, random_state=42) svm_models = [('svm', SVC(verbose=1, shrinking=False))] svm_pipeline = Pipeline(svm_models) svm_params = {'svm__kernel' : ['rbf'], 'svm__C' : [0.01,0.1,1], 'svm__gamma' :[0.1,0.2,0.4], 'svm__tol' :[0.001,0.01,0.1], 'svm__class_weight' : [{1:0.8,0:0.2}]} rfc_models = [('rfc', RFC())] rfc_pipeline = Pipeline(rfc_models) rfc_params = {'rfc__n_estimators' : [40], 'rfc__max_depth' : [40], 'rfc__min_samples_leaf' : [50]} lr_models = [('lr', LR(verbose=1))] lr_pipeline = Pipeline(lr_models) lr_params = {'lr__C': [0.1, 0.01], 'lr__tol': [0.001,0.01], 'lr__max_iter': [200,400], 'lr__class_weight' : [{1:0.8,0:0.2}]} gbc_models = [('gbc', GBC(verbose=1))] gbc_pipeline = Pipeline(gbc_models) gbc_params = {'gbc__n_estimators' : [100,200, 400, 800], 'gbc__max_depth' : [40, 80, 160, 320], 'gbc__learning_rate' : [0.01,0.1]} grid = zip([svm_pipeline, rfc_pipeline, lr_pipeline, gbc_pipeline], [svm_params, rfc_params, lr_params, gbc_params]) grid = zip([rfc_pipeline], [rfc_params]) best_clf = None for model_pipeline, param in grid: temp = GridSearchCV(model_pipeline, param_grid=param, cv=4, scoring='f1') temp.fit(X_train, y_train) if best_clf is None: best_clf = temp else: if temp.best_score_ > best_clf.best_score_: best_clf = temp model_details = {} model_details["CV Accuracy"] = best_clf.best_score_ model_details["Model Parameters"] = best_clf.best_params_ model_details["Test Data Score"] = best_clf.score(X_test, y_test) model_details["F1 score"] = f1_score(y_test, best_clf.predict(X_test)) model_details["Confusion Matrix"] = str(confusion_matrix(y_test, best_clf.predict(X_test))) return best_clf, model_details X = np.array(cosine_sim).reshape(-1,1) y = df_train.is_duplicate clf, model_details = train_rfc(X,y) print (model_details)
python
from rest_framework.views import APIView from rest_framework.response import Response from . import signals EVENTS = { 'Push Hook': signals.push_hook, 'Tag Push Hook': signals.tag_push_hook, 'Issue Hook': signals.issue_hook, 'Note Hook': signals.note_hook, 'Merge Request Hook': signals.merge_request_hook, 'Wiki Page Hook': signals.wiki_page_hook, 'Pipeline Hook': signals.pipeline_hook, 'Build Hook': signals.build_hook, } def get_event_header(request): return request.META.get('HTTP_X_GITLAB_EVENT', b'') class HookEvent(APIView): queryset = None permission_classes = () def send_signals(self, request, _format=None): event = get_event_header(request) if event not in EVENTS: return Response({}, 404) EVENTS[event].send(sender=None, payload=request.data) return Response({}, 200) def get(self, request, _format=None): return self.send_signals(request, _format) def post(self, request, _format=None): return self.send_signals(request, _format)
python
import FWCore.ParameterSet.Config as cms from RecoMuon.TrackingTools.MuonServiceProxy_cff import * muonSeedsAnalyzer = cms.EDAnalyzer("MuonSeedsAnalyzer", MuonServiceProxy, SeedCollection = cms.InputTag("ancientMuonSeed"), seedPxyzMin = cms.double(-50.0), pxyzErrMin = cms.double(-100.0), phiErrMax = cms.double(3.2), pxyzErrMax = cms.double(100.0), RecHitBin = cms.int32(25), etaErrMin = cms.double(0.0), seedPtMin = cms.double(0.0), seedPxyzBin = cms.int32(100), ThetaBin = cms.int32(100), RecHitMin = cms.double(0.0), EtaMin = cms.double(-3.0), pErrBin = cms.int32(200), phiErrBin = cms.int32(160), EtaMax = cms.double(3.0), etaErrBin = cms.int32(200), seedPxyzMax = cms.double(50.0), ThetaMin = cms.double(0.0), PhiMin = cms.double(-3.2), pxyzErrBin = cms.int32(100), RecHitMax = cms.double(25.0), ThetaMax = cms.double(3.2), pErrMin = cms.double(0.0), EtaBin = cms.int32(100), pErrMax = cms.double(200.0), seedPtMax = cms.double(200.0), seedPtBin = cms.int32(1000), phiErrMin = cms.double(0.0), PhiBin = cms.int32(100), debug = cms.bool(False), etaErrMax = cms.double(0.5), PhiMax = cms.double(3.2) )
python
import unittest from monocliche.src.Card import Card from monocliche.src.Deck import Deck from monocliche.src.actions.DrawCardAction import DrawCardAction class DrawCardActionTest(unittest.TestCase): def test_execute(self): cards = [Card('card1', '', None), Card('card2', '', None)] deck = Deck(cards) action = DrawCardAction(deck) card = action.execute(None) self.assertEqual('card1', card.title) card = action.execute(None) self.assertEqual('card2', card.title) if __name__ == '__main__': unittest.main()
python
from bitIO import * from Element import Element from PQHeap import PQHeap import os class Huffman: """ Huffman compression and decompression. Authors: - Kian Banke Larsen (kilar20) - Silas Pockendahl (silch20) """ HEADER_SIZE = 1024 def _createHuffmanTree(freqs): """ Creates and returns a Huffman tree, given a map (list) from byte to frequency. """ q = PQHeap() # Build heap with key as freq, value as Node for byte in range(256): q.insert(Element(freqs[byte], [byte])) # Build Huffman tree for i in range(255): # leave one element x = q.extractMin() y = q.extractMin() freq = x.key + y.key q.insert(Element(freq, [x.data, y.data])) # Return root of the tree return q.extractMin().data def _createLookupTable(tree): """ Create a lookup table for a Huffman tree. The table (list) maps bytes to a tuple (code, num_of_bits), where `code` is the compact binary representation, and `num_of_bits` is the number of bits in the representation. """ lookup = [None] * 256 # Function for recursive tree traversal def recurse(subtree, code, num_of_bits): if len(subtree) == 1: # `subtree` is a leaf lookup[subtree[0]] = (code, num_of_bits) else: # Not a leaf, both subtrees must exist # We are aware that we do not store the huffman codes as strings, # but this change has been approved by Rolf Fagerberg recurse(subtree[0], code << 1, num_of_bits + 1) # left => 0 recurse(subtree[1], code << 1 | 1, num_of_bits + 1) # right => 1 # Start recursion recurse(tree, 0, 0) return lookup def compress(input_file, output_file): """ Reads `input_file`, applies Huffman compression and writes to `output_file`. Returns number of bytes read, and number of bytes written to output file. """ freqs = [0] * 256 # Not necessary for functionality bits_written = 1024 * 8 # header size in bits with open(input_file, "rb") as input_file: # Count bytes byte = input_file.read(1) while byte: freqs[byte[0]] += 1 byte = input_file.read(1) tree = Huffman._createHuffmanTree(freqs) table = Huffman._createLookupTable(tree) # Count output bits () for byte in range(256): bits_written += table[byte][1] * freqs[byte] # BitWriter handles padding with BitWriter(open(output_file, "wb")) as output: # Write frequency header for byte in range(256): output.writeint32bits(freqs[byte]) # Resets the cursor state input_file.seek(0) # Encode input file byte = input_file.read(1) while byte: code, bits = table[byte[0]] byte = input_file.read(1) # Very similar to `BitWriter._writebits`, # writes the bits one by one while bits > 0: output.writebit((code >> bits-1) & 1) bits -= 1 # Return bytes read and bytes written return sum(freqs), (bits_written + 7) // 8 def decompress(input_file, output_file): """ Reads `input_file`, applies Huffman decompression and writes to `output_file`. Returns number of bytes read, and number of bytes written to output file. """ # Not necessary for functionality input_size = os.path.getsize(input_file) output_length = 0 with BitReader(open(input_file, "rb")) as input_file: # Read frequence header freqs = [input_file.readint32bits() for _ in range(256)] if not input_file.readsucces(): # not enough data for header raise Exception("Could not read header (too short)") # Count output bytes output_length = sum(freqs) # Frequency table => Huffman tree tree = Huffman._createHuffmanTree(freqs) with open(output_file, "wb") as output: # Repeat for number of characters in output for _ in range(output_length): x = tree # Traverse tree until a leaf/corresponding byte is found while len(x) == 2: bit = input_file.readbit() if not input_file.readsucces(): raise Exception("Not enough data, unexpected EOF") x = x[bit] # 0 => left, 1 => right output.write(bytes(x)) # Return bytes read and bytes written return input_size, output_length
python
# InfiniTag Copyright © 2020 AMOS-5 # Permission is hereby granted, # free of charge, to any person obtaining a copy of this software and # associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, # modify, merge, publish, distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: The above copyright notice and this # permission notice shall be included in all copies or substantial portions # of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN # NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE # USE OR OTHER DEALINGS IN THE SOFTWARE. try: # uses the config from this folder for the general setup import config except: # we run the testcase / other pass import os import shutil import pysolr from pathlib import Path from urlpath import URL import json """ This file is experimental and was used to setup a local Solr instance. We have already changed that and setup a remote instance for everybody. Still this file contains some useful informations on how a Solr core can be setup remotely. """ def get_default_config_dir(solr_home: Path): return solr_home / "configsets" / "_default" / "conf" def get_solr_home(): try: solr_home = Path(os.environ["SOLR_HOME"]) except: raise ValueError( "You have not set the SOLR_HOME environment variable!\n" "export SOLR_HOME='SOLR_ROOT/server/solr'" ) return solr_home def print_status(result: dict, corename: str): if result["responseHeader"]["status"] == 0: print(f"Core with name '{corename}' created.") else: # we are maybe good (core exists), or error print(result["error"]["msg"]) def create_admin(url: URL): admin_url = url / "admin" / "cores" admin = pysolr.SolrCoreAdmin(admin_url) return admin def create_core(config: dict): corename = config["corename"] solr_home = get_solr_home() default_dir = get_default_config_dir(solr_home) working_dir = solr_home / corename try: shutil.copytree(default_dir, working_dir) except FileExistsError: # the core has already been created once, # we don't bother and use the old config pass base_url = URL(config["url"]) admin = create_admin(base_url) # create a core with default configuration res = admin.create(corename, working_dir) res = json.loads(res) print_status(res, corename) if __name__ == "__main__": create_core(config.tag_storage)
python
# Copyright 2019 Xanadu Quantum Technologies Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module implements common shared matrix decompositions that are used to perform gate decompositions. """ import numpy as np from scipy.linalg import block_diag, sqrtm, schur from thewalrus.symplectic import sympmat def williamson(V, rtol=1e-05, atol=1e-08): r"""Williamson decomposition of positive-definite (real) symmetric matrix. See https://math.stackexchange.com/questions/1171842/finding-the-symplectic-matrix-in-williamsons-theorem/2682630#2682630 and https://strawberryfields.ai/photonics/conventions/decompositions.html#williamson-decomposition Args: V (array[float]): positive definite symmetric (real) matrix rtol (float): the relative tolerance parameter used in ``np.allclose`` atol (float): the absolute tolerance parameter used in ``np.allclose`` Returns: tuple[array,array]: ``(Db, S)`` where ``Db`` is a diagonal matrix and ``S`` is a symplectic matrix such that :math:`V = S^T Db S` """ (n, m) = V.shape if n != m: raise ValueError("The input matrix is not square") if not np.allclose(V, V.T, rtol=rtol, atol=atol): raise ValueError("The input matrix is not symmetric") if n % 2 != 0: raise ValueError("The input matrix must have an even number of rows/columns") n = n // 2 omega = sympmat(n) vals = np.linalg.eigvalsh(V) for val in vals: if val <= 0: raise ValueError("Input matrix is not positive definite") Mm12 = sqrtm(np.linalg.inv(V)).real r1 = Mm12 @ omega @ Mm12 s1, K = schur(r1) X = np.array([[0, 1], [1, 0]]) I = np.identity(2) seq = [] # In what follows I construct a permutation matrix p so that the Schur matrix has # only positive elements above the diagonal # Also the Schur matrix uses the x_1,p_1, ..., x_n,p_n ordering thus I permute using perm # to go to the ordering x_1, ..., x_n, p_1, ... , p_n for i in range(n): if s1[2 * i, 2 * i + 1] > 0: seq.append(I) else: seq.append(X) perm = np.array([2 * i for i in range(n)] + [2 * i + 1 for i in range(n)]) p = block_diag(*seq) Kt = K @ p Ktt = Kt[:, perm] s1t = p @ s1 @ p dd = [1 / s1t[2 * i, 2 * i + 1] for i in range(n)] Db = np.diag(dd + dd) S = Mm12 @ Ktt @ sqrtm(Db) return Db, np.linalg.inv(S).T
python
import telnetlib import time OK = 0 ERROR = 1 RESPONSE_DELAY_MS = 100 class AMXNMX(object): def __init__(self, host, port=50002, response_delay_ms=RESPONSE_DELAY_MS): self.conn = telnetlib.Telnet(host, port=port) self.response_delay_sec = response_delay_ms / 1000. self._initialize() def _initialize(self): pass def _wait_for_response(self): time.sleep(self.response_delay_sec) def _send_command(self, cmd): self.conn.write(cmd + '\n') self._wait_for_response() def _send_command_with_check(self, cmd, key, val): """ Send a command and check that the response includes response_dict[key] == val """ r = self._send_command_return_response(cmd) if r[key] == val: return OK else: return ERROR def _get_response(self): raw = self.conn.read_very_eager() lines = raw.split('\r')[0:-1] #Ignore last empty line r_dict = {} for line in lines: key, val = line.split(':',1) r_dict[key] = val return r_dict def _send_command_return_response(self, cmd): self._send_command(cmd) return self._get_response() def get_status(self): return self._send_command_return_response("getStatus") class AMXDecoder(AMXNMX): def hdmi_off(self): self._send_command_with_check("hdmiOff", "DVIOFF", "on") def hdmi_on(self): self._send_command_with_check("hdmiOn", "DVIOFF", "off") def set_stream(self, stream): self._send_command_with_check("set:%d" % stream, "STREAM", "%d" % stream) class AMXEncoder(AMXNMX): def _initialize(self): self.stream_id = int(self.get_status()["STREAM"])
python
# -*- coding: utf-8 -*- ''' :codeauthor: :email:`Jayesh Kariya <[email protected]>` ''' # Import Python Libs from __future__ import absolute_import # Import Salt Testing Libs from salttesting import TestCase, skipIf from salttesting.mock import ( MagicMock, patch, NO_MOCK, NO_MOCK_REASON ) from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import Salt Libs from salt.utils import etcd_util from urllib3.exceptions import ReadTimeoutError, MaxRetryError try: import etcd HAS_ETCD = True except ImportError: HAS_ETCD = False @skipIf(HAS_ETCD is False, 'python-etcd module must be installed.') @skipIf(NO_MOCK, NO_MOCK_REASON) class EtcdUtilTestCase(TestCase): ''' Test cases for salt.utils.etcd_util ''' # 'get_' function tests: 1 @patch('etcd.Client', autospec=True) def test_read(self, mock): ''' Test to make sure we interact with etcd correctly ''' etcd_client = mock.return_value etcd_return = MagicMock(value='salt') etcd_client.read.return_value = etcd_return client = etcd_util.EtcdClient({}) self.assertEqual(client.read('/salt'), etcd_return) etcd_client.read.assert_called_with('/salt', recursive=False, wait=False, timeout=None) client.read('salt', True, True, 10, 5) etcd_client.read.assert_called_with('salt', recursive=True, wait=True, timeout=10, waitIndex=5) etcd_client.read.side_effect = etcd.EtcdKeyNotFound self.assertRaises(etcd.EtcdKeyNotFound, client.read, 'salt') etcd_client.read.side_effect = etcd.EtcdConnectionFailed self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt') etcd_client.read.side_effect = etcd.EtcdValueError self.assertRaises(etcd.EtcdValueError, client.read, 'salt') etcd_client.read.side_effect = ValueError self.assertRaises(ValueError, client.read, 'salt') etcd_client.read.side_effect = ReadTimeoutError(None, None, None) self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt') etcd_client.read.side_effect = MaxRetryError(None, None) self.assertRaises(etcd.EtcdConnectionFailed, client.read, 'salt') @patch('etcd.Client') def test_get(self, mock): ''' Test if it get a value from etcd, by direct path ''' client = etcd_util.EtcdClient({}) with patch.object(client, 'read', autospec=True) as mock: mock.return_value = MagicMock(value='stack') self.assertEqual(client.get('salt'), 'stack') mock.assert_called_with('salt', recursive=False) self.assertEqual(client.get('salt', recurse=True), 'stack') mock.assert_called_with('salt', recursive=True) mock.side_effect = etcd.EtcdKeyNotFound() self.assertEqual(client.get('not-found'), None) mock.side_effect = etcd.EtcdConnectionFailed() self.assertEqual(client.get('watching'), None) # python 2.6 test mock.side_effect = ValueError self.assertEqual(client.get('not-found'), None) mock.side_effect = Exception self.assertRaises(Exception, client.get, 'some-error') @patch('etcd.Client') def test_tree(self, mock): ''' Test recursive gets ''' client = etcd_util.EtcdClient({}) with patch.object(client, 'read', autospec=True) as mock: c1, c2 = MagicMock(), MagicMock() c1.__iter__.return_value = [ MagicMock(key='/x/a', value='1'), MagicMock(key='/x/b', value='2'), MagicMock(key='/x/c', dir=True)] c2.__iter__.return_value = [ MagicMock(key='/x/c/d', value='3') ] mock.side_effect = iter([ MagicMock(children=c1), MagicMock(children=c2) ]) self.assertDictEqual(client.tree('/x'), {'a': '1', 'b': '2', 'c': {'d': '3'}}) mock.assert_any_call('/x') mock.assert_any_call('/x/c') mock.side_effect = etcd.EtcdKeyNotFound() self.assertEqual(client.tree('not-found'), None) mock.side_effect = ValueError self.assertEqual(client.tree('/x'), None) mock.side_effect = Exception self.assertRaises(Exception, client.tree, 'some-error') @patch('etcd.Client') def test_ls(self, mock): client = etcd_util.EtcdClient({}) with patch.object(client, 'read', autospec=True) as mock: c1 = MagicMock() c1.__iter__.return_value = [ MagicMock(key='/x/a', value='1'), MagicMock(key='/x/b', value='2'), MagicMock(key='/x/c', dir=True)] mock.return_value = MagicMock(children=c1) self.assertEqual(client.ls('/x'), {'/x': {'/x/a': '1', '/x/b': '2', '/x/c/': {}}}) mock.assert_called_with('/x') mock.side_effect = etcd.EtcdKeyNotFound() self.assertEqual(client.ls('/not-found'), {}) mock.side_effect = Exception self.assertRaises(Exception, client.tree, 'some-error') @patch('etcd.Client', autospec=True) def test_write(self, mock): client = etcd_util.EtcdClient({}) etcd_client = mock.return_value etcd_client.write.return_value = MagicMock(value='salt') self.assertEqual(client.write('/some-key', 'salt'), 'salt') etcd_client.write.assert_called_with('/some-key', 'salt', ttl=None, dir=False) self.assertEqual(client.write('/some-key', 'salt', ttl=5), 'salt') etcd_client.write.assert_called_with('/some-key', 'salt', ttl=5, dir=False) etcd_client.write.return_value = MagicMock(dir=True) self.assertEqual(client.write('/some-dir', 'salt', ttl=0, directory=True), True) etcd_client.write.assert_called_with('/some-dir', None, ttl=0, dir=True) etcd_client.write.side_effect = etcd.EtcdRootReadOnly() self.assertEqual(client.write('/', 'some-val'), None) etcd_client.write.side_effect = etcd.EtcdNotFile() self.assertEqual(client.write('/some-key', 'some-val'), None) etcd_client.write.side_effect = etcd.EtcdNotDir() self.assertEqual(client.write('/some-dir', 'some-val'), None) etcd_client.write.side_effect = MaxRetryError(None, None) self.assertEqual(client.write('/some-key', 'some-val'), None) etcd_client.write.side_effect = ValueError self.assertEqual(client.write('/some-key', 'some-val'), None) etcd_client.write.side_effect = Exception self.assertRaises(Exception, client.set, 'some-key', 'some-val') @patch('etcd.Client', autospec=True) def test_flatten(self, mock): client = etcd_util.EtcdClient({}) some_data = { '/x/y/a': '1', 'x': { 'y': { 'b': '2' } }, 'm/j/': '3', 'z': '4', 'd': {}, } result_path = { '/test/x/y/a': '1', '/test/x/y/b': '2', '/test/m/j': '3', '/test/z': '4', '/test/d': {}, } result_nopath = { '/x/y/a': '1', '/x/y/b': '2', '/m/j': '3', '/z': '4', '/d': {}, } result_root = { '/x/y/a': '1', '/x/y/b': '2', '/m/j': '3', '/z': '4', '/d': {}, } self.assertEqual(client._flatten(some_data, path='/test'), result_path) self.assertEqual(client._flatten(some_data, path='/'), result_root) self.assertEqual(client._flatten(some_data), result_nopath) @patch('etcd.Client', autospec=True) def test_update(self, mock): client = etcd_util.EtcdClient({}) some_data = { '/x/y/a': '1', 'x': { 'y': { 'b': '3' } }, 'm/j/': '3', 'z': '4', 'd': {}, } result = { '/test/x/y/a': '1', '/test/x/y/b': '2', '/test/m/j': '3', '/test/z': '4', '/test/d': True, } flatten_result = { '/test/x/y/a': '1', '/test/x/y/b': '2', '/test/m/j': '3', '/test/z': '4', '/test/d': {} } client._flatten = MagicMock(return_value=flatten_result) self.assertEqual(client.update('/some/key', path='/blah'), None) with patch.object(client, 'write', autospec=True) as write_mock: def write_return(key, val, ttl=None, directory=None): return result.get(key, None) write_mock.side_effect = write_return self.assertDictEqual(client.update(some_data, path='/test'), result) client._flatten.assert_called_with(some_data, '/test') self.assertEqual(write_mock.call_count, 5) @patch('etcd.Client', autospec=True) def test_rm(self, mock): etcd_client = mock.return_value client = etcd_util.EtcdClient({}) etcd_client.delete.return_value = True self.assertEqual(client.rm('/some-key'), True) etcd_client.delete.assert_called_with('/some-key', recursive=False) self.assertEqual(client.rm('/some-dir', recurse=True), True) etcd_client.delete.assert_called_with('/some-dir', recursive=True) etcd_client.delete.side_effect = etcd.EtcdNotFile() self.assertEqual(client.rm('/some-dir'), None) etcd_client.delete.side_effect = etcd.EtcdDirNotEmpty() self.assertEqual(client.rm('/some-key'), None) etcd_client.delete.side_effect = etcd.EtcdRootReadOnly() self.assertEqual(client.rm('/'), None) etcd_client.delete.side_effect = ValueError self.assertEqual(client.rm('/some-dir'), None) etcd_client.delete.side_effect = Exception self.assertRaises(Exception, client.rm, 'some-dir') @patch('etcd.Client', autospec=True) def test_watch(self, client_mock): client = etcd_util.EtcdClient({}) with patch.object(client, 'read', autospec=True) as mock: mock.return_value = MagicMock(value='stack', key='/some-key', modifiedIndex=1, dir=False) self.assertDictEqual(client.watch('/some-key'), {'value': 'stack', 'key': '/some-key', 'mIndex': 1, 'changed': True, 'dir': False}) mock.assert_called_with('/some-key', wait=True, recursive=False, timeout=0, waitIndex=None) mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, mock.return_value]) self.assertDictEqual(client.watch('/some-key'), {'value': 'stack', 'changed': False, 'mIndex': 1, 'key': '/some-key', 'dir': False}) mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, etcd.EtcdKeyNotFound]) self.assertEqual(client.watch('/some-key'), {'value': None, 'changed': False, 'mIndex': 0, 'key': '/some-key', 'dir': False}) mock.side_effect = iter([etcd_util.EtcdUtilWatchTimeout, ValueError]) self.assertEqual(client.watch('/some-key'), {}) mock.side_effect = None mock.return_value = MagicMock(value='stack', key='/some-key', modifiedIndex=1, dir=True) self.assertDictEqual(client.watch('/some-dir', recurse=True, timeout=5, index=10), {'value': 'stack', 'key': '/some-key', 'mIndex': 1, 'changed': True, 'dir': True}) mock.assert_called_with('/some-dir', wait=True, recursive=True, timeout=5, waitIndex=10) mock.side_effect = MaxRetryError(None, None) self.assertEqual(client.watch('/some-key'), {}) mock.side_effect = etcd.EtcdConnectionFailed() self.assertEqual(client.watch('/some-key'), {}) mock.return_value = None self.assertEqual(client.watch('/some-key'), {}) if __name__ == '__main__': from integration import run_tests run_tests(EtcdUtilTestCase, needs_daemon=False)
python
#!/usr/bin/env python """ A really simple module, just to demonstrate disutils """ def capitalize(infilename, outfilename): """ reads the contents of infilename, and writes it to outfilename, but with every word capitalized note: very primitive -- it will mess some files up! this is called by the capitalize script """ infile = open(infilename, 'U') outfile = open(outfilename, 'w') for line in infile: outfile.write( " ".join( [word.capitalize() for word in line.split() ] ) ) outfile.write("\n") return None
python
# -*- coding: utf-8 -*- # """*********************************************************************************************""" # FileName [ utility/helper.py ] # Synopsis [ helper functions ] # Author [ Andy T. Liu (Andi611) ] # Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ] """*********************************************************************************************""" ############### # IMPORTATION # ############### import torch ##################### # PARSE PRUNE HEADS # ##################### def parse_prune_heads(config): if 'prune_headids' in config['transformer'] and config['transformer']['prune_headids'] != 'None': heads_int = [] spans = config['transformer']['prune_headids'].split(',') for span in spans: endpoints = span.split('-') if len(endpoints) == 1: heads_int.append(int(endpoints[0])) elif len(endpoints) == 2: heads_int += torch.arange(int(endpoints[0]), int(endpoints[1])).tolist() else: raise ValueError print(f'[PRUNING] - heads {heads_int} will be pruned') config['transformer']['prune_headids'] = heads_int else: config['transformer']['prune_headids'] = None ########################## # GET TRANSFORMER TESTER # ########################## def get_transformer_tester(from_path='result/result_transformer/libri_sd1337_fmllrBase960-F-N-K-RA/model-1000000.ckpt', display_settings=False): ''' Wrapper that loads the transformer model from checkpoint path ''' # load config and paras all_states = torch.load(from_path, map_location='cpu') config = all_states['Settings']['Config'] paras = all_states['Settings']['Paras'] # handling older checkpoints if not hasattr(paras, 'multi_gpu'): setattr(paras, 'multi_gpu', False) if 'prune_headids' not in config['transformer']: config['transformer']['prune_headids'] = None # display checkpoint settings if display_settings: for cluster in config: print(cluster + ':') for item in config[cluster]: print('\t' + str(item) + ': ', config[cluster][item]) print('paras:') v_paras = vars(paras) for item in v_paras: print('\t' + str(item) + ': ', v_paras[item]) # load model with Tester from transformer.solver import Tester tester = Tester(config, paras) tester.set_model(inference=True, with_head=False, from_path=from_path) return tester
python
# module msysio.py # Requires Python 2.2 or better. """Provide helpful routines for interactive IO on the MSYS console""" # Output needs to be flushed to be seen. It is especially important # when prompting for user input. import sys import os __all__ = ['raw_input_', 'print_', 'is_msys'] # 2.x/3.x compatibility stuff try: raw_input except NameError: raw_input = input # Exported functions def raw_input_(prompt=None): """Prompt for user input in an MSYS console friendly way""" if prompt is None: prompt = '' print_(prompt, end='') return raw_input() def print_(*args, **kwds): """Print arguments in an MSYS console friendly way Keyword arguments: file, sep, end """ stream = kwds.get('file', sys.stdout) sep = kwds.get('sep', ' ') end = kwds.get('end', '\n') if args: stream.write(sep.join([str(arg) for arg in args])) if end: stream.write(end) try: stream.flush() except AttributeError: pass def is_msys(): """Return true if the execution environment is MSYS""" try: # Unfortunately there is no longer an MSYS specific identifier. return os.environ['TERM'] == 'cygwin' except KeyError: return False
python
#!/usr/bin/env python import os import sys try: here = __file__ except NameError: # Python 2.2 here = sys.argv[0] relative_paste = os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(here))), 'paste') if os.path.exists(relative_paste): sys.path.insert(0, os.path.dirname(relative_paste)) from paste.script import command command.run()
python
from practicum import find_mcu_boards, McuBoard, PeriBoard from flask import Flask, Response, jsonify, request from flask_cors import CORS import json import threading app = Flask(__name__) CORS(app) def ReadScore(): filename = "score.json" with open(filename) as file: data = json.load(file) return data @app.route('/scoreboard') def Scoreboard(): scoreBoard = ReadScore() return jsonify(scoreBoard)
python
from python_kemptech_api import * # Specify the LoadMaster connection credentials here: loadmaster_ip = "" username = "" password = "" lm = LoadMaster(loadmaster_ip, username, password) # Specify the VS parameters: vs_ip = "" new_vs = "" vs_port = "" template_file = "template.txt" # Create the VS vs = lm.create_virtual_service(vs_ip, vs_port) vs.save() # Customize your VS here vs.transparent = 'y' vs.sslacceleration = 'y' vs.update() # Export the VS as a template and write to a file template_content = vs.export() with open(template_file, 'w') as f: f.write(template_content) # Upload template file to LoadMaster lm.upload_template(template_file) # Get template name and object template_name, template_obj = lm.templates.popitem() # Apply the template to a new VS lm.apply_template(new_vs, vs_port, "tcp", template_name=template_name, nickname="VS from Template")
python
""" Title: Mammogram Mass Detector Author: David Sternheim Description: The purpose of this script is to take data regarding mass detected in a mammogram and use machine learning models to predict if this mass is malignant or benign. The data is taken form UCI public data sets. Breakdown of the data set: The data has 961 instances of masses detected in mammograms. It's stored in mammographic_masses.data.txt. The format of the file is comma separated values with each of the following as one fo the values in order: 1. BI-RADS Assessment: 1 to 5 (ordinal) 2. Age: patient's age in years (integer) 3. Shape: mass shape: round=1 oval=2 lobular=3 irregular=4 (nominal) 4. Margin: mass margin: circumscribed=1 microlobulated=2 obscured=3 ill-defined=4 spiculated=5 (nominal) 5. Density: mass density high=1 iso=2 low=3 fat-containing=4 (ordinal) 6. Severity: benign=0 malignant=1 NOTE: '?' denotes a missing data value Last Updated: 09/15/18 Known Bugs: """ import pandas as pd from sklearn import tree from sklearn import model_selection from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn import svm from sklearn.linear_model import LogisticRegression """ Reading in the data and pre-processing it. """ data = pd.read_csv('Assets/mammographic_masses.data.txt') df = pd.DataFrame(data) df.columns = ['BIRADS', 'Age', 'Shape', 'Margin', 'Density', 'Severity'] print(df.head()) d = {'1': 1.0, '2': 2.0, '3': 3.0, '4': 4.0, '5': 5.0, '?': -1.0} df['BIRADS'] = df['BIRADS'].map(d) df['Shape'] = df['Shape'].map(d) df['Margin'] = df['Margin'].map(d) df['Density'] = df['Density'].map(d) df['Age'] = pd.to_numeric(df['Age'], errors='coerce') df['Severity'] = pd.to_numeric(df['Severity'], errors='coerce') df.fillna(-1.0, inplace=True) df = df.astype('float32') print(type(df['Severity'][0])) """ Implement Decision Tree. Trained with K-Folds Cross Validation with K=10 """ y = df['Severity'] features = list(df.columns[:5]) x = df[features] x_train, x_test, y_train, y_test = model_selection.train_test_split(x, y, test_size=.4, random_state=0) clf = tree.DecisionTreeClassifier() clf = clf.fit(x_train, y_train) score = clf.score(x_test, y_test) scores = model_selection.cross_val_score(clf, x, y, cv=10) print('Decision Tree accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~76% accuracy # Random Forests clf = RandomForestClassifier(n_estimators=10) clf = clf.fit(x_train, y_train) score = clf.score(x_test, y_test) scores = model_selection.cross_val_score(clf, x, y, cv=10) print('Random Forest accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~78% accuracy """ Implement K-Nearest Neighbors. Trained with K-Folds Cross validation with K=10 """ scaler = StandardScaler() scaler = scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) clf = KNeighborsClassifier(n_neighbors=5) clf = clf.fit(x_train, y_train) score = clf.score(x_test, y_test) scores = model_selection.cross_val_score(clf, x, y, cv=10) print('K-Nearest Neighbor accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79% """ Implement Naive Bayes. Trained with K-Folds Cross Validation with K=10 """ clf = GaussianNB() clf = clf.fit(x_train, y_train) scores = model_selection.cross_val_score(clf, x, y, cv=10) print('Naive Bayes accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~78% """ Implement Support Vector Machine """ C = 1.0 svc = svm.SVC(kernel='linear', C=C).fit(x_train, y_train) scores = model_selection.cross_val_score(svc, x, y, cv=10) print('Support Vector Machine accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79% """ Implement Logistic Regression. Trained with K-Folds Cross Validation. """ lgr = LogisticRegression() lgr = lgr.fit(x_train, y_train) scores = model_selection.cross_val_score(lgr, x, y, cv=10) print('Logistic Regression accuracy: ' + str(round(scores.mean()*100, 2)) + '%') # ~79% """ Conclusions: Most machine learning models have an accuracy around 79%. DecisionTrees are by far the worst model to detect if mass is malignant or benign because test returned a result of around 76%. Any of the other test can be used to relative accuracy ~79%. The highest accuracy came from KNN at a high 79%. By adjusting hyper parameters, the models may be improved. """
python
import os path = '/content/Multilingual_Text_to_Speech/checkpoints' files = sorted(os.listdir(path))
python
import numpy as np import tensorflow as tf tfkl = tf.keras.layers def array2tensor(z, dtype=tf.float32): """Converts numpy arrays into tensorflow tensors. Keyword arguments: z -- numpy array dtype -- data type of tensor entries (default float32) """ if len(np.shape(z)) == 1: # special case where input is a vector return tf.cast(np.reshape(z, (np.shape(z)[0], 1)), dtype) else: return tf.cast(z, dtype) def reduce_logmeanexp_offdiag(x, axis=None): """Contracts the tensor x on its off-diagonal elements and takes the logarithm. Keyword arguments: x -- tensorflow tensor axis (int) -- contraction axis (default None) if axis=None, does full contraction :Authors: Ben Poole Copyright 2019 Google LLC. """ num_samples = x.shape[0].value if axis: log_num_elem = tf.math.log(num_samples - 1) else: log_num_elem = tf.math.log(num_samples * (num_samples - 1)) return tf.reduce_logsumexp(x - tf.linalg.tensor_diag(np.inf * tf.ones(num_samples)), axis=axis)\ - log_num_elem def const_fn(x, const=1.0): """Function mapping any argument to a constant float value. Keyword arguments: x -- dummy argument const (float) -- constant value of the image """ return const def mlp(hidden_dim, output_dim, layers, activation): """Constructs multi-layer perceptron (MLP) critic with given number of hidden layers. Keyword arguments: hidden_dim (int) -- dimensionality of hidden dense layers output_dim (int) -- dimensionality of the output tensor layers (int) -- number of hidden dense layers activation -- activation function of the neurons """ return tf.keras.Sequential( [tfkl.Dense(hidden_dim, activation) for _ in range(layers)] + [tfkl.Dense(output_dim)])
python
# To run all the tests, run: python -m unittest in the terminal in the project directory. from os.path import dirname, basename, isfile, join import glob # makes the modules easily loadable modules = glob.glob(join(dirname(__file__), "*.py")) __all__ = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
python