content
stringlengths
0
894k
type
stringclasses
2 values
import os import sys import time import subprocess ITERATIONS = 10 COUNT = 20000000 VERBOSE = False NL = '\n' INCLUDE = "../../../src" CC = 'gcc' CCPP = 'g++' # LoL OPT = '-O3' print(f'Using {CC} compiler') def execute_command(commands): print(' '.join(commands)) subprocess.run(commands, shell=True) # Common args args = ['-I', INCLUDE, OPT, f'-DCOUNT={COUNT}', '-DCMC_NO_CALLBACKS'] map_args = [CC, 'hashmap.c', '-o', 'hashmap.exe'] mmap_args = [CC, 'hashmultimap.c', '-o', 'hashmultimap.exe'] cpp_args = [CCPP, 'unorderedmap.cpp', '-I', INCLUDE, '-o', 'unorderedmap.exe', OPT, f'-DCOUNT={COUNT}'] map_args.extend(args) mmap_args.extend(args) cpp_args.extend(args) execute_command(map_args) execute_command(mmap_args) execute_command(cpp_args) programs = ['hashmap.exe', 'hashmultimap.exe', 'unorderedmap.exe'] for prog in programs: total = 0 for i in range(ITERATIONS): print(f'{i}...', end='') sys.stdout.flush() start_time = time.time() subprocess.run(prog, shell=True) delta = time.time() - start_time total += delta if VERBOSE: print(f'({delta})') avg = total / ITERATIONS print(f'{"" if VERBOSE else NL}{avg} seconds for {COUNT} -> AVG[{int((avg * 1e9) / COUNT)} ns] [{prog}]{NL if VERBOSE else ""}')
python
# Copyright 2011-2013 Colin Scott # Copyright 2011-2013 Andreas Wundsam # Copyright 2012-2013 Sam Whitlock # Copyright 2012-2012 Kyriakos Zarifis # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import urllib2 import logging import json import string import time from pox.lib.graph.util import NOMDecoder from pox.openflow.topology import OpenFlowSwitch from pox.openflow.flow_table import FlowTable, TableEntry from pox.openflow.libopenflow_01 import ofp_match, ofp_action_output from sts.entities import POXController, BigSwitchController log = logging.getLogger("Snapshot") class Snapshot(object): """ A Snapshot object is a description of the controllers' view of the network in terms that are meaningful to the debugger. Any snaphsot grabbed from any controller should be transformed into a Snapshot object in order to be fed to HSA """ def __int__(self): self.time = None self.switches = [] # The debugger doesn't use the next two (for now anyway) self.hosts = [] self.links = [] def __repr__(self): return "<Snapshot object: (%i switches)>"%len(self.switches) class SnapshotService(object): """ Controller-specific SnapshotServices take care of grabbing a snapshot from their controller in whatever format the controller exports it, and translating it into a Snaphot object that is meaningful to the debbuger """ def __init__(self): self.snapshot = Snapshot() def fetchSnapshot(self, controller): pass class FlexibleNOMDecoder: def __init__(self): self.pox_nom_decoder = NOMDecoder() def decode(self, json): if isinstance(json, (str, unicode)) and string.find(json, "__module__")>=0: return self.pox_nom_decoder.decode(json) else: return self.decode_switch(json) def decode_switch(self, json): flow_table = self.decode_flow_table(json["flow_table"] if "flow_table" in json else json["flowTable"]) switch = OpenFlowSwitch(json["dpid"], flow_table=flow_table) return switch def decode_flow_table(self, json): ft = FlowTable() for e in json["entries"]: ft.add_entry(self.decode_entry(e)) return ft def decode_entry(self, json): e = TableEntry() for (k, v) in json.iteritems(): if k == "match": e.match = self.decode_match(v) elif k == "actions": e.actions = [ self.decode_action(a) for a in v ] else: setattr(e, k, v) return e def decode_match(self, json): return ofp_match(**json) def decode_action(self, json): a = ofp_action_output(port = json['port']) return a class SyncProtoSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) self.myNOMDecoder = FlexibleNOMDecoder() def fetchSnapshot(self, controller): jsonNOM = controller.sync_connection.get_nom_snapshot() # Update local Snapshot object self.snapshot.switches = [self.myNOMDecoder.decode(s) for s in jsonNOM["switches"]] self.snapshot.hosts = [self.myNOMDecoder.decode(h) for h in jsonNOM["hosts"]] self.snapshot.links = [self.myNOMDecoder.decode(l) for l in jsonNOM["links"]] self.snapshot.time = time.time() return self.snapshot class PoxSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) self.port = 7790 self.myNOMDecoder = NOMDecoder() def fetchSnapshot(self, controller): from pox.lib.util import connect_socket_with_backoff import socket snapshotSocket = connect_socket_with_backoff('127.0.0.1', self.port) log.debug("Sending Request") snapshotSocket.send("{\"hello\":\"nommessenger\"}") snapshotSocket.send("{\"getnom\":0}", socket.MSG_WAITALL) log.debug("Receiving Results") jsonstr = "" while True: data = snapshotSocket.recv(1024) log.debug("%d byte packet received" % len(data)) if not data: break jsonstr += data if len(data) != 1024: break snapshotSocket.close() jsonNOM = json.loads(jsonstr) # (json string with the NOM) # Update local Snapshot object self.snapshot.switches = [self.myNOMDecoder.decode(s) for s in jsonNOM["switches"]] self.snapshot.hosts = [self.myNOMDecoder.decode(h) for h in jsonNOM["hosts"]] self.snapshot.links = [self.myNOMDecoder.decode(l) for l in jsonNOM["links"]] self.snapshot.time = time.time() return self.snapshot class BigSwitchSnapshotService(SnapshotService): def __init__(self): SnapshotService.__init__(self) def fetchSnapshot(self, controller): req = urllib2.Request('http://localhost:8080/wm/core/proact') response = urllib2.urlopen(req) json_data = response.read() l = json.loads(json_data) res = [] for m in l: res.append(Snapshot.from_json_map(m)) return res # Create local Snapshot object snapshot = Snapshot() self.snapshot = snapshot return self.snapshot def get_snapshotservice(controller_configs): '''Return a SnapshotService object determined by the name of the first controller in the controller_configs. For now, we only support a homogenous controller environment.''' # Read from config what controller we are using # TODO(cs): allow for heterogenous controllers? if controller_configs != [] and controller_configs[0].sync: snapshotService = SyncProtoSnapshotService() elif controller_configs != [] and controller_configs[0].controller_class == POXController: snapshotService = PoxSnapshotService() elif controller_configs != [] and controller_configs[0].controller_class == BigSwitchController: snapshotService = BigSwitchSnapshotService() else: # We default snapshotService to POX snapshotService = PoxSnapshotService() return snapshotService
python
# Songsheng YING # coding=utf-8 # python3.6.7 Anaconda import os, sys import numpy as np from progressbar import ProgressBar, Percentage, Bar import xml.etree.ElementTree as ET def data_file_reader_fr(file_name, lang): print(" Working on " + file_name) if lang == "French": path = os.getcwd() + '/data/input/French_wnet/' else: print("This reader function only supports French file") return tree = ET.parse(path + file_name) root = tree.getroot() rel_type_dict = { 'near_antonym': '!', 'hypernym': '@', 'instance_hypernym': '@i', 'hyponym': '~', 'instance_hyponym': '~i', 'be_in_state': '#s', # TO VERIFY 'eng_derivative': '+', 'subevent': '*', # TO VERIFY 'also_see': '^', 'verb_group': '$', 'category_domain': ';c', 'derived': '\\', 'similar_to': '&', 'usage_domain': ';u', 'region_domain': ';r', 'holo_part': '#p', 'holo_member': '#m', 'causes': '>', 'holo_portion': '#p', # TO VERIFY 'participle': '<' } file_data = {} offset_list = [] for synset in root: synsetWrds = [] synsetConnections = [] synsetRelationTypes = [] connectedSynsetPos = [] for word in synset.find('SYNONYM').getchildren(): try: synsetWrds.append(word.text.replace(' ', '_')) except: print(word.tag, word.attrib) synsetWrds.append('__unknown__') for relation in synset.findall('ILR') : synsetRelationTypes.append(rel_type_dict[relation.attrib['type']]) synsetConnections.append(relation.text) connectedSynsetPos.append(relation.text.split('-')[3]) data = (synsetWrds, synsetConnections, synsetRelationTypes, connectedSynsetPos, None) file_data.update({synset.find('ID').text:data}) offset_list.append(synset.find('ID').text) return file_data, offset_list
python
workflow_xml_start = """<workflow-app name="etk-april-2017" xmlns="uri:oozie:workflow:0.5"> <global> <configuration> <property> <name>oozie.launcher.mapreduce.map.memory.mb</name> <value>10000</value> </property> </configuration> </global> <start to="shell-1120"/> <kill name="Kill"> <message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message> </kill> <action name="shell-1120"> <shell xmlns="uri:oozie:shell-action:0.1"> <job-tracker>${jobTracker}</job-tracker> <name-node>${nameNode}</name-node> <configuration> <property> <name>mapred.input.dir.recursive</name> <value>true</value> </property> <property> <name>oozie.action.max.output.data</name> <value>8192</value> </property> </configuration> <exec>./run.sh</exec> """ workflow_xml_end = """ <capture-output/> </shell> <ok to="End"/> <error to="Kill"/> </action> <end name="End"/> </workflow-app>""" class WM(object): def __init__(self): self.workflow_xml_start = workflow_xml_start self.workflow_xml_end = workflow_xml_end @staticmethod def create_file_property_for_workflow_xml(file_path): v = file_path.split('/') file_name = v[len(v) - 1].strip() if file_name and file_name != '': return '<file>{}#{}</file>'.format(file_path, file_name) @staticmethod def create_archive_property_for_workflow_xml(file_path, extension_length=3): v = file_path.split('/') file_name = v[len(v) - 1].strip() file_name = file_name[0:len(file_name) - (extension_length + 1)] # +1 is for the '.' if file_name and file_name != '': return '<archive>{}#{}</archive>'.format(file_path, file_name) @staticmethod def create_arguments_for_workflow_xml(argument): return '<argument>{}</argument>'.format(argument) if __name__ == '__main__': # print WM.create_file_property_for_workflow_xml('/user/worker/etk/lib/dictionaries/eyecolors.json.gz') # print WM.create_archive_property_for_workflow_xml('/user/worker/etk/lib/etk_env.zip') wm = WM() files = wm.create_file_property_for_workflow_xml('/user/worker/etk/lib/dictionaries/eyecolors.json.gz') archive = wm.create_archive_property_for_workflow_xml('/user/worker/etk/lib/etk_env.zip') argument = wm.create_arguments_for_workflow_xml('extraction_config.json') wf = wm.workflow_xml_start + argument + files + archive + wm.workflow_xml_end print wf
python
from matplotlib import pyplot as plt from matplotlib_venn import venn3 def plot_confusion_matrix(cm, cmap=plt.cm.Blues): """ Args: cm (np.ndarray): Confusion matrix to plot cmap: Color map to be used in matplotlib's imshow Returns: Figure and axis on which the confusion matrix is plotted """ fig, ax = plt.subplots() ax.imshow(cm, interpolation="nearest", cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlabel("Receiver's action", fontsize=14) ax.set_ylabel("Sender's state", fontsize=14) # Loop over data dimensions and create text annotations. fmt = "d" thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text( j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black", ) fig.tight_layout() return fig, ax def plot_venn(ns): n_diff, n01, n12, n02, n012 = ns fig, ax = plt.subplots() venn3( subsets=( n_diff + n12 - n012, n_diff + n02 - n012, n01 - n012, n_diff + n01 - n012, n02 - n012, n12 - n012, n012, ), set_labels=(r"$s_1$", r"$s_2$", r"$s_3$"), ax=ax, ) return fig, ax
python
from src import network import numpy as np from src.study.utils import downloader from src.study.mnist_common import mnist_reader items = ["T-shirt/top","Trouser","Pullover","Dress","Coat","Sandal","Shirt","Sneaker","Bag","Ankle","boot"] def download_mnist_fashion_data(): downloader.download_data("http://fashion-mnist.s3-website.eu-central-1.amazonaws.com", ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz", "t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]) download_mnist_fashion_data() def train_model(): net = network.Network([784,30,10]) training_data,test_data = mnist_reader.load() net.train("mnists_fashion_classifier.learnings", training_data, epochs=70, mini_batch_size=4, eta=0.01, test_data=test_data) return net def print_result(actual,expected): print("%s is detected as %s" % (items[expected],items[actual])) def evaluate(net): training_data, test_data = mnist_reader.load() for test_sample in test_data[9000:]: print_result(np.argmax(net.feedforward(test_sample[0])),test_sample[1]) if(__name__ == "__main__"): evaluate(train_model())
python
import re def modify_en_text(text, modify_mode='BASIC', keep_emoji=True): """ Make your text easy to read and to translate. Args: text(str): target dirty text. modify_mode(str): mode selection keep_emoji(bool): call back remove_emoji func or not. Returns: text_output(str): formatted output. """ if not keep_emoji: text = remove_emoji(text) sents = split_text_to_sentences_en(text) text_output = mode_factory(modify_mode)(sents) return text_output def remove_emoji(text): """ Remove emoji. """ emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) def split_text_to_sentences_en(text): """ Make your text easy to read and to translate. Args: text(str): target dirty text. Returns: sents(list): sentences splited. """ text = text.replace(" . . . ","...") #replace LaTex ellipses with original ellipses sents = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', text) return sents def mode_factory(modify_mode): if modify_mode == "BASIC": return mode_basic if modify_mode == 'LIST-MARK': return mode_listmark def mode_basic(sents): text_output = "" for _s in sents: s = ' '.join(_s.split(), ) text_output += s + "\n\n" return text_output def mode_listmark(sents): text_output = "" for _s in sents: s = ' '.join(_s.split(), ) text_output += "- " + s + "\n\n" return text_output
python
from functools import reduce testcases = int(input()) for t in range(testcases): n = int(input()) vals = list(map(int, input().split())) missed = 0 for i in range(1, len(vals)): x = abs(vals[i] - vals[i - 1]) if x > 0: x -= 1 missed += x print(missed)
python
# Import Required Libraries from flask import Flask, render_template, request import pickle # Initialise the object to run the flask app app = Flask(__name__, static_folder='static', template_folder='templates') # Load the pickled model file model = pickle.load(open('model.pkl', 'rb+')) @app.route('/', methods=['POST', 'GET']) def index(): return render_template('index.html') @app.route('/predict', methods=['POST', 'GET']) def predict(): if request.method == 'GET': return render_template('index.html') if request.method == 'POST': features = [float(x) for x in request.form.values()] print(features) labels = model.predict([features]) print(labels) species = labels[0] # If species is 0 = setosa, if species is 1 = VersiColor, if species is 2 = Virginica if species == 0: result = "Iris-Setosa" elif species == 1: result = "Iris-VersiColor" else: result = "Iris-Virginica" return render_template('index.html', result=result) # It is the starting point of code if __name__ == '__main__': # We need to run the app to run the server this will change when deployed on AWS app.run(debug=False)
python
# Copyright 2017 Cisco Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from nose.tools import * from ..connection.info import custom_setup, custom_teardown from ucsmsdk.ucshandle import UcsHandle from ucsm_apis.admin.auth import * handle = None domain_name = "test" + datetime.date.today().strftime('%Y%b%d') def setup_module(): global handle handle = custom_setup() # handle.set_dump_xml() def teardown_module(): custom_teardown(handle) @raises(UcsOperationError) def test_001_auth_domain_delete_domainnotexist(): auth_domain_delete(handle, name=domain_name) def test_002_auth_domain_exists_domainnotexist(): (status, mo) = auth_domain_exists(handle, name=domain_name) assert not status def test_003_auth_domain_create_default(): auth_domain_create(handle, name=domain_name) def test_004_auth_domain_exist_default(): (status, mo) = auth_domain_exists(handle, name=domain_name) assert status def test_005_auth_domain_modify_default(): auth_domain_modify(handle, name=domain_name) def test_006_auth_domain_modify_descr_empty(): auth_domain_modify(handle, name=domain_name, descr="") def test_007_auth_domain_modify_descr_default(): auth_domain_modify(handle, name=domain_name, descr="default") def test_008_auth_domain_realm_configure_default(): auth_domain_realm_configure(handle, domain_name) def test_009_auth_domain_delete_default(): auth_domain_delete(handle, name=domain_name) def test_010_native_auth_configure_default(): native_auth_configure(handle) def test_011_native_auth_default(): native_auth_default(handle) def test_012_native_auth_console(): native_auth_console(handle)
python
import functools import operator import unittest from migen import * def cols(rows): """ >>> a = [ ... [1, 2], ... ['a', 'b'], ... [4, 5], ... ] >>> for c in cols(a): ... print(c) [1, 'a', 4] [2, 'b', 5] >>> a = [ ... [1, 2, 3], ... ['a', 'b', 'c'], ... ] >>> for c in cols(a): ... print(c) [1, 'a'] [2, 'b'] [3, 'c'] """ all_c = [] for ci in range(len(rows[0])): all_c.append([]) for ci in range(len(rows[0])): for ri in range(len(rows)): assert len(rows[ri]) == len(all_c), "len(%r) != %i" % (rows[ri], len(all_c)) all_c[ci].append(rows[ri][ci]) return all_c def CRC_paralelo(poly, crc_in, data): """ k== num_data_bits len(data_cur) == num_data_bits >>> for i in range(5): ... l = [0]*5; l[i] = 1 ... r = lfsr_serial_shift_crc( ... poly=[0,0,1,0,1], # (5, 2, 0) ... crc_in=l, ... data=[0,0,0,0], ... ) ... print("Min[%i] =" % i, r) Min[0] = [1, 0, 0, 0, 0] Min[1] = [0, 0, 1, 0, 1] Min[2] = [0, 1, 0, 1, 0] Min[3] = [1, 0, 1, 0, 0] Min[4] = [0, 1, 1, 0, 1] >>> for i in range(4): ... d = [0]*4; d[i] = 1 ... r = lfsr_serial_shift_crc( ... poly=[0,0,1,0,1], # (5, 2, 0) ... crc_in=[0,0,0,0,0], ... data=d, ... ) ... print("Nin[%i] =" % i, r) Nin[0] = [0, 0, 1, 0, 1] Nin[1] = [0, 1, 0, 1, 0] Nin[2] = [1, 0, 1, 0, 0] Nin[3] = [0, 1, 1, 0, 1] """ poly = poly[::-1] #Primer elemento debe ser el bit menos significativo data = data[::-1] k=len(data) p=len(poly) assert p>1 assert len(crc_in) == p crc_next = list(crc_in) for j in range(p): crc_upper_bit = crc_next[p-1] for i in range(p-1, 0, -1): if poly[i]: crc_next[i] = crc_next[i-1] ^ crc_upper_bit ^ data[j] else: crc_next[i] = crc_next[i-1] crc_next[0] = crc_upper_bit ^ data[j] return list(crc_next[::-1]) def matrices(poly, data_width): #poly: polinomio en bits (lista), bit mas significativo como primer elemento #data_width: cantidad de bits de la palabra poly_size= len(poly) # data_width*polysize matrix == lfsr(0,Nin) rows_nin = [] # (a) calculate the N values when Min=0 and Build NxM matrix # - Each value is one hot encoded (there is only one bit) # - IE N=4, 0x1, 0x2, 0x4, 0x8 # - Mout = F(Nin,Min=0) # - Each row contains the results of (a) # - IE row[0] == 0x1, row[1] == 0x2 # - Output is M-bit wide (CRC width) # - Each column of the matrix represents an output bit Mout[i] as a function of Nin info = [] for i in range(data_width): # crc_in = [0,...,0] = Min crc_in = [0,]*poly_size # data = [0,..,1,..,0] = Nin data = [0,]*data_width data[i] = 1 # Calculate the CRC rows_nin.append(CRC_paralelo(poly, crc_in, data)) info.append("lfsr(%r, %r, %r) = %r" % (poly, crc_in, data, rows_nin[-1])) assert len(rows_nin) == data_width cols_nin = cols(rows_nin)[::-1] # polysize*polysize matrix == lfsr(Min,0) info.append("") rows_min = [] for i in range(poly_size): # crc_in = [0,..,1,...,0] = Min crc_in = [0,]*poly_size crc_in[i] = 1 # data = [0,..,0] = Nin data = [0,]*data_width # Calculate the crc rows_min.append(CRC_paralelo(poly, crc_in, data)) info.append("lfsr(%r, %r, %r) = %r" % (poly, crc_in, data, rows_min[-1])) assert len(rows_min) == poly_size cols_min = cols(rows_min)[::-1] # (c) Calculate CRC for the M values when Nin=0 and Build MxM matrix # - Each value is one hot encoded # - Mout = F(Nin=0,Min) # - Each row contains results from (7) info.append("") #for i in range(data_width, -1, -1): # info.append("Mout[%i] = %r %r" % (i, cols_nin[i], cols_min[i])) return info, cols_nin, cols_min class TxParallelCrcGenerator(Module): """ width : int Width of the CRC. polynomial : int CRC polynomial in integer form. initial : int Initial value of the CRC register before data starts shifting in. Input Ports ------------ i_data_payload : Signal(8) Byte wide data to generate CRC for. i_data_strobe : Signal(1) Strobe signal for the payload. Output Ports ------------ o_crc : Signal(width) Current CRC value. """ def __init__(self, data_width, crc_width, polynomial, initial=0): self.i_data_payload = Signal(data_width) self.i_data_strobe = Signal() self.reset=Signal() self.o_crc = Signal(crc_width) crc_dat = Signal(data_width) crc_cur = Signal(crc_width, reset=initial) crc_next = Signal(crc_width, reset_less=True) crc_cur_reset_bits = [ int(i) for i in "{0:0{width}b}".format( #crc_cur.reset.value,width=crc_width)[::-1]] crc_cur.reset.value,width=crc_width)] self.comb += [ #crc_dat.eq(self.i_data_payload[::-1]), crc_dat.eq(self.i_data_payload), # FIXME: Is XOR ^ initial actually correct here? #self.o_crc.eq(crc_cur[::-1] ^ initial), self.o_crc.eq(crc_cur), ] self.sync += [ If(self.i_data_strobe, crc_cur.eq(crc_next), ), If(self.reset, crc_cur.eq(initial) ) ] poly_list = [] #convierte a binario, bit mas significativo como primer elemento for i in range(crc_width): poly_list.insert(0, polynomial >> i & 0x1) assert len(poly_list) == crc_width _, cols_nin, cols_min = matrices(poly_list, data_width) crc_next_reset_bits = list(crc_cur_reset_bits) for i in range(crc_width): to_xor = [] crc_next_reset_bit_i = [] for j, use in enumerate(cols_nin[i]): if use: to_xor.append(crc_dat[j]) crc_next_reset_bit_i.append(0) for j, use in enumerate(cols_min[i]): if use: to_xor.append(crc_cur[j]) crc_next_reset_bit_i.append(crc_cur_reset_bits[j]) crc_next_reset_bits[i] = functools.reduce(operator.xor, crc_next_reset_bit_i) self.comb += [ crc_next[i].eq(functools.reduce(operator.xor, to_xor)), ] crc_next_reset_value = int("0b"+"".join(str(i) for i in crc_next_reset_bits[::-1]), 2) crc_next.reset.value = crc_next_reset_value def tb(dut): yield dut.i_data_strobe.eq(1) yield dut.i_data_payload.eq(0x1a) yield yield dut.i_data_payload.eq(0x1b) yield yield dut.i_data_payload.eq(0x1c) yield yield dut.i_data_payload.eq(0x1d) yield yield dut.i_data_payload.eq(0x1f) yield yield dut.i_data_payload.eq(0x2a) yield yield dut.i_data_payload.eq(0x2b) yield yield dut.i_data_payload.eq(0x2c) yield yield dut.i_data_strobe.eq(0) yield yield dut.reset.eq(1) yield yield dut.reset.eq(0) yield yield dut.i_data_strobe.eq(1) yield dut.i_data_payload.eq(0x1a) yield yield dut.i_data_payload.eq(0x1b) yield yield dut.i_data_payload.eq(0x1c) yield yield dut.i_data_payload.eq(0x1d) yield yield dut.i_data_payload.eq(0x1f) yield yield dut.i_data_payload.eq(0x2a) yield yield dut.i_data_payload.eq(0x2b) yield yield dut.i_data_payload.eq(0x2c) yield dut=TxParallelCrcGenerator(data_width=32, crc_width=20, polynomial=0xc1acf,initial=0xfffff) run_simulation(dut,tb(dut), vcd_name="prueba_crc.vcd")
python
# -*- coding: utf-8 -*- """ DTSA-II Script - J. R. Minter - 2016-10-12 massFractionsTheEasyWay.py Date Who Comment ---------- --- ----------------------------------------------- 2016-10-12 JRM Mass fractions the easy way... Elapse: 0:00:00.0 ROCPW7ZC5C42 """ import sys sys.packageManager.makeJavaPackage("gov.nist.microanalysis.NISTMonte.Gen3", "CharacteristicXRayGeneration3, BremsstrahlungXRayGeneration3,FluorescenceXRayGeneration3, XRayTransport3", None) import os import glob import shutil import time import math import csv import gov.nist.microanalysis.NISTMonte as nm import gov.nist.microanalysis.NISTMonte.Gen3 as nm3 import gov.nist.microanalysis.EPQLibrary as epq import gov.nist.microanalysis.EPQLibrary.Detector as epd import gov.nist.microanalysis.Utility as epu import gov.nist.microanalysis.EPQTools as ept import dtsa2 as dt2 import dtsa2.mcSimulate3 as mc3 gitDir = os.environ['GIT_HOME'] relPrj = "/dtsa2Scripts/utility" prjDir = gitDir + relPrj rptDir = prjDir + '/massFractionsTheEasyWay Results/' azo = material("Al2Zn98O100", density=5.61) wfAl = round(azo.weightFractionU(epq.Element.Al, True).doubleValue(), 5) wfZn = round(azo.weightFractionU(epq.Element.Zn, True).doubleValue(), 5) wfO = round(azo.weightFractionU(epq.Element.O, True).doubleValue(), 5) AZO = {"Al" : wfAl, "Zn": wfZn, "O" : wfO} print(AZO) es = azo.getElementSet() # print(dir(es)) # clean up cruft shutil.rmtree(rptDir) print "Done!"
python
# Import Dependencies import numpy as np import pandas as pd import datetime as dt import sqlalchemy from sqlalchemy import desc from sqlalchemy.ext.automap import automap_base from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func, inspect from sqlalchemy import Column, Integer, String, Float, Date from flask import Flask, jsonify # Create and connect Engine engine = create_engine("sqlite:///Resources/hawaii.sqlite") conn = engine.connect() # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) Measurement = Base.classes.measurement Stations = Base.classes.station # connect session with engine session = Session(engine) app = Flask(__name__) # Homepage @app.route("/") def main(): # Gives the user the options of where they can go return ( f"Please copy and paste one of the follwing to the end of the above URL:<br/>" f"<br/>" f"/api/v1.0/precipitation<br/>" f"<br/>" f"/api/v1.0/stations<br/>" f"<br/>" f"/api/v1.0/tobs<br/>" f"<br/>" f"/api/v1.0/<start><br/>" f"or<br/>" f"/api/v1.0/<start>/<end><br/>" ) @app.route("/api/v1.0/precipitation") def precipitation(): # Creates a query that looks for the most recent date stripped from the year, month, day most_recent_date = session.query( func.max(func.strftime("%Y-%m-%d", Measurement.date))).all() # Transforms SQl query into a string string_version = most_recent_date[0][0] end_date = dt.datetime.strptime(string_version, "%Y-%m-%d") # Subtracts one year from the most recent data row start_date = end_date - dt.timedelta(365) # Creates a query taht retrieves the data and percipitation from anything greater than # or equal to the start date, filtered by the date precip_data = session.query(func.strftime("%Y-%m-%d", Measurement.date), Measurement.prcp).\ filter(func.strftime("%Y-%m-%d", Measurement.date) >= start_date).all() # Creates a dictionary and put the resutls in said dictionary results = {} for result in precip_data: results[result[0]] = result[1] return jsonify(results) @app.route("/api/v1.0/stations") def stations(): # query stations list stations_ = session.query(Stations).all() # create a list of dictionaries stations_list = [] for station in stations_: station_dict = {} station_dict["id"] = station.id station_dict["station"] = station.station station_dict["name"] = station.name station_dict["latitude"] = station.latitude station_dict["longitude"] = station.longitude station_dict["elevation"] = station.elevation stations_list.append(station_dict) return jsonify(stations_list) # @app.route("/api/v1.0/<start>") # @api.route("/api/v1.0/<start>/<end>")
python
# Normals are for all puzzles where all 3 sides are the same # 37 is Skewb # 38 is Pyraminx main # 39 is Pyraminx primary corners # 40 is Pyraminx secondary corners # 41 is Megaminx main # 42 is Megaminx secondary normals = { 2: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'", '2')}, 3: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'", '2')}, 4: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'", '2', 'w2', 'w', "w'")}, 5: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'", '2', 'w2', 'w', "w'")}, 37: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'")}, 38: {'moves': (('U', 'D'), ('F', 'B'), ('R', 'L')), 'directions': ('', "'")}, 39: {'moves': ('r', 'l'), 'directions': ('', "'")}, 40: {'moves': ('u', 'd'), 'directions': ('', "'")}, 41: {'moves': ('R', "D"), 'directions': ('++', "--")}, 42: {'moves': 'U', 'directions': ('', "'")} } # Irregulars are for puzzles with different sized sides. Use tuples as the key according to (sizeX, sizeY, sizeZ) irregulars = { None } # Others are for other, will vary case by case others = { None }
python
from setuptools import setup setup( name="scripture-burrito", version="0.0.2", description="Python library for the Scripture Burrito data interchange format", url="http://github.com/bible-technology/scripture-burrito-python", author="BT Tech Consortium", author_email="[email protected]", license="MIT", packages=["scripture_burrito"], package_data={ "scripture_burrito": ["schema"] }, install_requires=["jsonschema"], entry_points={ "console_scripts": [ "validate-sb = scripture_burrito.validate:main", ], }, )
python
import pytest import numpy as np import pennylane as qml from pennylane_qiskit import AerDevice, BasicAerDevice from conftest import U, U2, A np.random.seed(42) THETA = np.linspace(0.11, 1, 3) PHI = np.linspace(0.32, 1, 3) VARPHI = np.linspace(0.02, 1, 3) @pytest.mark.parametrize("theta, phi", list(zip(THETA, PHI))) @pytest.mark.parametrize("shots", [None, 8192]) class TestVar: """Tests for the variance""" def test_var(self, theta, phi, device, shots, tol): """Tests for variance calculation""" dev = device(2) # test correct variance for <Z> of a rotated state observable = qml.PauliZ(wires=[0]) dev.apply( [ qml.RX(phi, wires=[0]), qml.RY(theta, wires=[0]), ], rotations=[*observable.diagonalizing_gates()], ) dev._samples = dev.generate_samples() var = dev.var(observable) expected = 0.25 * (3 - np.cos(2 * theta) - 2 * np.cos(theta) ** 2 * np.cos(2 * phi)) assert np.allclose(var, expected, **tol) def test_var_hermitian(self, theta, phi, device, shots, tol): """Tests for variance calculation using an arbitrary Hermitian observable""" dev = device(2) # test correct variance for <H> of a rotated state H = np.array([[4, -1 + 6j], [-1 - 6j, 2]]) observable = qml.Hermitian(H, wires=[0]) dev.apply( [ qml.RX(phi, wires=[0]), qml.RY(theta, wires=[0]), ], rotations=[*observable.diagonalizing_gates()], ) dev._samples = dev.generate_samples() var = dev.var(observable) expected = 0.5 * ( 2 * np.sin(2 * theta) * np.cos(phi) ** 2 + 24 * np.sin(phi) * np.cos(phi) * (np.sin(theta) - np.cos(theta)) + 35 * np.cos(2 * phi) + 39 ) assert np.allclose(var, expected, **tol) @pytest.mark.parametrize("theta, phi, varphi", list(zip(THETA, PHI, VARPHI))) @pytest.mark.parametrize("shots", [None, 8192]) class TestTensorVar: """Tests for variance of tensor observables""" def test_paulix_pauliy(self, theta, phi, varphi, device, shots, tol): """Test that a tensor product involving PauliX and PauliY works correctly""" dev = device(3) obs = qml.PauliX(0) @ qml.PauliY(2) dev.apply( [ qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.RX(varphi, wires=[2]), qml.CNOT(wires=[0, 1]), qml.CNOT(wires=[1, 2]), ], rotations=obs.diagonalizing_gates(), ) dev._samples = dev.generate_samples() res = dev.var(obs) expected = ( 8 * np.sin(theta) ** 2 * np.cos(2 * varphi) * np.sin(phi) ** 2 - np.cos(2 * (theta - phi)) - np.cos(2 * (theta + phi)) + 2 * np.cos(2 * theta) + 2 * np.cos(2 * phi) + 14 ) / 16 assert np.allclose(res, expected, **tol) def test_pauliz_hadamard_pauliy(self, theta, phi, varphi, device, shots, tol): """Test that a tensor product involving PauliZ and PauliY and hadamard works correctly""" dev = device(3) obs = qml.PauliZ(0) @ qml.Hadamard(1) @ qml.PauliY(2) dev.apply( [ qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.RX(varphi, wires=[2]), qml.CNOT(wires=[0, 1]), qml.CNOT(wires=[1, 2]), ], rotations=obs.diagonalizing_gates(), ) dev._samples = dev.generate_samples() res = dev.var(obs) expected = ( 3 + np.cos(2 * phi) * np.cos(varphi) ** 2 - np.cos(2 * theta) * np.sin(varphi) ** 2 - 2 * np.cos(theta) * np.sin(phi) * np.sin(2 * varphi) ) / 4 assert np.allclose(res, expected, **tol) def test_hermitian(self, theta, phi, varphi, device, shots, tol): """Test that a tensor product involving qml.Hermitian works correctly""" dev = device(3) A = np.array( [ [-6, 2 + 1j, -3, -5 + 2j], [2 - 1j, 0, 2 - 1j, -5 + 4j], [-3, 2 + 1j, 0, -4 + 3j], [-5 - 2j, -5 - 4j, -4 - 3j, -6], ] ) obs = qml.PauliZ(0) @ qml.Hermitian(A, wires=[1, 2]) dev.apply( [ qml.RX(theta, wires=[0]), qml.RX(phi, wires=[1]), qml.RX(varphi, wires=[2]), qml.CNOT(wires=[0, 1]), qml.CNOT(wires=[1, 2]), ], rotations=obs.diagonalizing_gates(), ) dev._samples = dev.generate_samples() res = dev.var(obs) expected = ( 1057 - np.cos(2 * phi) + 12 * (27 + np.cos(2 * phi)) * np.cos(varphi) - 2 * np.cos(2 * varphi) * np.sin(phi) * (16 * np.cos(phi) + 21 * np.sin(phi)) + 16 * np.sin(2 * phi) - 8 * (-17 + np.cos(2 * phi) + 2 * np.sin(2 * phi)) * np.sin(varphi) - 8 * np.cos(2 * theta) * (3 + 3 * np.cos(varphi) + np.sin(varphi)) ** 2 - 24 * np.cos(phi) * (np.cos(phi) + 2 * np.sin(phi)) * np.sin(2 * varphi) - 8 * np.cos(theta) * ( 4 * np.cos(phi) * ( 4 + 8 * np.cos(varphi) + np.cos(2 * varphi) - (1 + 6 * np.cos(varphi)) * np.sin(varphi) ) + np.sin(phi) * ( 15 + 8 * np.cos(varphi) - 11 * np.cos(2 * varphi) + 42 * np.sin(varphi) + 3 * np.sin(2 * varphi) ) ) ) / 16 assert np.allclose(res, expected, **tol)
python
import argparse import logging import os import sys sys.path.insert(0, os.path.join(sys.path[0], "..")) import development.configuration # pylint: disable = wrong-import-position import development.environment # pylint: disable = wrong-import-position logger = logging.getLogger("Main") def main(): current_directory = os.getcwd() script_path = os.path.realpath(__file__) workspace_directory = os.path.dirname(os.path.dirname(script_path)) os.chdir(workspace_directory) try: environment_instance = development.environment.load_environment() configuration_instance = development.configuration.load_configuration(environment_instance) command_list = development.configuration.load_commands() arguments = parse_arguments(environment_instance, configuration_instance, command_list) log_level = logging.getLevelName(arguments.verbosity.upper()) development.environment.configure_logging(log_level) if arguments.log_file is not None: development.environment.configure_log_file(log_level, arguments.log_file) show_project_information(configuration_instance, arguments.simulate) arguments.func(environment_instance, configuration_instance, arguments) finally: os.chdir(current_directory) def parse_arguments(environment_instance, configuration_instance, command_list): all_log_levels = [ "debug", "info", "warning", "error", "critical" ] main_parser = argparse.ArgumentParser() main_parser.add_argument("--verbosity", choices = all_log_levels, default = "info", metavar = "<level>", help = "set the logging level (%s)" % ", ".join(all_log_levels)) main_parser.add_argument("--simulate", action = "store_true", help = "perform a test run, without writing changes") main_parser.add_argument("--log-file", metavar = "<file_path>", help = "set the log file path") main_parser.add_argument("--results", metavar = "<file_path>", help = "set the file path where to store command results") subparsers = main_parser.add_subparsers(title = "commands", metavar = "<command>") subparsers.required = True for command in [ command for command in command_list if "module" in command ]: command_parser = command["module"].configure_argument_parser(environment_instance, configuration_instance, subparsers) command_parser.set_defaults(func = command["module"].run) return main_parser.parse_args() def show_project_information(configuration_instance, simulate): logger.info("%s %s", configuration_instance["project_name"], configuration_instance["project_version"]["full"]) logger.info("Script executing in %s %s", os.getcwd(), "(simulation)" if simulate else '') print("") if __name__ == "__main__": main()
python
import sys import os import json import hashlib import EVMfunction as EVMf import EVMcompiler as EVMc import EVMparse as EVMp print('Kam1n0 script for EVM is now running...') print('start persisting...') args = sys.argv sol_file_name = args[1].split('\\')[-1] abs_file_name = os.path.abspath(args[1]) json_file_name = abs_file_name #Compile sol -> bin, asm print('compile %s' % sol_file_name) (if_pragma, contract_name_list) = EVMc.compiler(abs_file_name) #Parse data = dict() data['ida_compiler'] = 'Unknown' data['name'] = abs_file_name data['md5'] = hashlib.md5(data['name'].encode('utf-8')).hexdigest() data['architecture'] = {} data['architecture']['type'] = 'metapc' #evm data['architecture']['size'] = 'b64' #?? data['architecture']['endian'] = "le" #?? data['vulnerabilities'] = list() data['contracts'] = list() (asm, bin, ast) = EVMp.get_code_information(contract_name_list) contract_count = len(contract_name_list) ordered_ast = EVMf.get_ordered_ast(ast, contract_count, if_pragma) # get ordered_ast['Function'] #parameter block_id = 0 current_address = 0 callee = dict() prev_cont = None prev_func = None prev_block = None for i in range(contract_count): if asm[i] != None: (prev_cont, prev_func, prev_block, current_address, block_id, bin) = EVMf.call_parse( data, asm[i], ordered_ast, contract_name_list[i], current_address, block_id, callee, bin, prev_cont, prev_func, prev_block, 0) end_address = current_address if prev_cont != None: prev_cont['see'] = end_address if prev_func != None: prev_func['see'] = end_address if prev_block != None: prev_block['see'] = end_address #bytes for c in data['contracts']: for f in c['functions']: for b in f['blocks']: begin = b['sea']*2 end = b['see']*2 b['bytes'] = bin[begin: end] #print(b['name'], ': ', begin, end, ': ', b['bytes']) #call EVMf.get_call(data, callee, end_address) #Labeling print('give labels to %s' % sol_file_name) label_file = '.\\Label.json' label = EVMf.get_json(label_file); EVMf.labeling(sol_file_name, data, label); #concate contracts and functions data['functions'] = list() for c in data['contracts']: for f in c['functions']: f['name'] = f['name'] + '.' + c['name'] data['functions'].append(f) #output with open('%s.tmp0.json' % json_file_name, 'w', encoding="utf-8") as outfile: json.dump(data, outfile, ensure_ascii=False)
python
#!/usr/bin/env python3 """Bank account without synchronization cause race condition """ class UnsyncedBankAccount: """Bank account without synchronization""" balance: float def __init__(self, balance: float = 0): self.balance: float = balance def deposit(self, amount: float) -> None: if amount > 0: self.balance += amount else: raise ValueError("You can't deposit negative amount of money") def withdraw(self, amount: float) -> None: if 0 < amount <= self.balance: self.balance -= amount else: raise ValueError("Account does not contain sufficient funds")
python
# import setuptools # with open("README.md", "r") as fh: # long_description = fh.read() # setuptools.setup( # #Here is the module name. # name="TOPSIS-Shivansh-101803103", # #version of the module # version="0.0.1", # #Name of Author # author="Shivansh Kumar", # #your Email address # author_email="[email protected]", # #Small Description about module # description="TOPSIS in Python", # long_description=long_description, # #Specifying that we are using markdown file for description # long_description_content_type="text/markdown", # packages=setuptools.find_packages(), # #classifiers like program is suitable for python3, just leave as it is. # classifiers=[ # "Programming Language :: Python :: 3", # "License :: OSI Approved :: MIT License", # "Operating System :: OS Independent", # ], # ) from setuptools import setup with open("README.md","r") as fh: long_description = fh.read() setup( name = 'TOPSIS-Shivansh-101803103', version = '0.0.1', description = 'Find the Topsis Score Easily as well as preciously - Multiple Criteria Decision Making!', py_modules = ["topsis"], package_dir = {'':'TOPSIS-Shivansh-101803103'}, package_data={'':['LICENSE.txt']}, include_package_data=True, # url="https://github.com/manmeet-kaur18/Topsis", author="Shivansh Kumar", author_email="[email protected]", classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], long_description=long_description, long_description_content_type="text/markdown", extras_require={ "dev":[ "pytest>=3.7", ], }, )
python
from collections import defaultdict from contextlib import contextmanager from enum import Enum import redis_lock from passari_workflow.redis.connection import get_redis_connection from rq import Queue from rq.exceptions import NoSuchJobError from rq.job import Job from rq.registry import FailedJobRegistry, StartedJobRegistry class QueueType(Enum): """ Each queue type corresponds to a RQ queue """ DOWNLOAD_OBJECT = "download_object" CREATE_SIP = "create_sip" SUBMIT_SIP = "submit_sip" CONFIRM_SIP = "confirm_sip" ENQUEUE_OBJECTS = "enqueue_objects" class WorkflowQueue(Queue): # Workflow tasks have a default timeout of 4 hours DEFAULT_TIMEOUT = 14400 OBJECT_QUEUE_TYPES = [ QueueType.DOWNLOAD_OBJECT, QueueType.CREATE_SIP, QueueType.SUBMIT_SIP, QueueType.CONFIRM_SIP ] def job_id_to_object_id(job_id): """ Extract the object ID from a RQ job ID """ try: object_id = int(job_id.split("_")[-1]) return object_id except ValueError: return None def get_queue(queue_type): """ Get RQ queue according to its QueueType :param QueueType queue_type: Queue to return """ con = get_redis_connection() queue_type = QueueType(queue_type) queue = WorkflowQueue(queue_type.value, connection=con) return queue def delete_jobs_for_object_id(object_id): """ Delete all jobs for the given object ID """ object_id = int(object_id) redis = get_redis_connection() cancelled_count = 0 for queue_type in QueueType: job_id = f"{queue_type.value}_{object_id}" try: Job.fetch(job_id, connection=redis).delete() cancelled_count += 1 except NoSuchJobError: pass return cancelled_count def get_enqueued_object_ids(): """ Get object IDs from every object-related queue including every pending, executing and failed job. This can be used to determine which jobs can be enqueued without risk of duplicates """ object_ids = set() registry_types = (StartedJobRegistry, FailedJobRegistry) for queue_type in OBJECT_QUEUE_TYPES: queue = get_queue(queue_type) # Retrieve started and failed jobs for registry_type in registry_types: job_registry = registry_type(queue=queue) job_ids = job_registry.get_job_ids() for job_id in job_ids: object_id = job_id_to_object_id(job_id) if object_id is not None: object_ids.add(object_id) # Retrieve scheduled jobs for job_id in queue.get_job_ids(): object_id = job_id_to_object_id(job_id) if object_id is not None: object_ids.add(object_id) return object_ids def get_running_object_ids(): """ Get object IDs which are currently being executed in the workflow """ object_ids = set() for queue_type in OBJECT_QUEUE_TYPES: queue = get_queue(queue_type) job_registry = StartedJobRegistry(queue=queue) job_ids = job_registry.get_job_ids() for job_id in job_ids: object_id = job_id_to_object_id(job_id) if object_id is not None: object_ids.add(object_id) return object_ids def get_object_id2queue_map(object_ids): """ Get a {object_id: queue_names} dictionary of object IDs and the queues they currently belong to """ queue_object_ids = defaultdict(set) queue_map = {} for queue_type in OBJECT_QUEUE_TYPES: queue = get_queue(queue_type) started_registry = StartedJobRegistry(queue=queue) job_ids = started_registry.get_job_ids() + queue.get_job_ids() # Check pending or executing jobs for job_id in job_ids: object_id = job_id_to_object_id(job_id) if object_id is not None: queue_object_ids[queue_type.value].add(object_id) failed_registry = FailedJobRegistry(queue=get_queue(queue_type)) job_ids = failed_registry.get_job_ids() # Check failed jobs for job_id in job_ids: object_id = job_id_to_object_id(job_id) if object_id is not None: queue_object_ids[queue_type.value].add(object_id) queue_object_ids["failed"].add(object_id) # Check for all queues plus the catch-all failed queue queue_names = [ queue_type.value for queue_type in OBJECT_QUEUE_TYPES ] + ["failed"] for object_id in object_ids: queue_map[object_id] = [] for queue_name in queue_names: if object_id in queue_object_ids[queue_name]: queue_map[object_id].append(queue_name) return queue_map @contextmanager def lock_queues(): """ Context manager to lock all queues. This lock should be acquired when the workflow is affected directly (eg. enqueueing new jobs) or indirectly (eg. updating database so that changes an object's qualification to be enqueued or not) """ redis = get_redis_connection() lock = redis_lock.Lock(redis, "workflow-lock", expire=900) lock.acquire(blocking=True) try: yield lock finally: lock.release()
python
#!/usr/bin/env python # coding: utf-8 # # 2. Τιμές, τύποι και μεταβλητές. Συμβολοσειρές # ## Σταθερές (Constants) # # H Python δεν διαθέτει προκαθορισμένες *σταθερές* όπως άλλες γλώσσες προγραμματισμού. # Όμως κατά σύμβαση και όχι κατά κανόνα έχει συμφωνηθεί οι *σταθερές* να ονοματίζονται με κεφαλαίους χαρακτήρες. # Η αδυναμία της Python στην περίπτωση της δήλωσης *σταθερών* είναι ότι επιτρέπεται η αλλαγή των τιμών τους # Παρακάτω παρατίθεται ένα παράδειγμα δήλωσης *σταθερών*. # In[1]: RATIO_FEET_TO_METERS = 3.281 RATIO_LB_TO_KG = 2.205 PI = 3.14 # ## Κυριολεκτικές σταθερές (literal constants) # # Η κυριολεκτική *σταθερά* ή τιμή είναι ένας αριθμός, ή χαρακτήρας ή μιά συμβολοσειρά. Για παράδειγμα τα παρακάτω # αποτελούν τιμές: *3.25* (στην python η υποδιαστολή ορίζεται με . και όχι ,), *"ένα τυχαίο κείμενο"*, *5.25e-1*. # Αυτές οι τιμές δεν μεταβάλλονται κατά τη διάρκεια εκτέλεσης του προγράμματος γι' αυτό και λέγονται σταθερές. Μπορούν να εκχωρηθούν σε μεταβλητές # και να χρησιμοποιηθούν σαν τελεστέοι σε λογικές εκφράσεις ή σαν παραμέτροι σε συναρτήσεις. # # ## Τύποι δεδομένων # # Οι τιμές ανήκουν σε τρεις τύπους δεδομένων (data types) ή κλάσσεις (class): # - τους ακέραιους αριθμούς (integer) π.χ. το 15 # - τους αριθμούς κινητής υποδιαστολής (floating point) π.χ. το 201.25) # - τις συμβολοσειρές (string) π.χ. το "Time is money" # Με την εντολή `type` ο διερμηνευτής μας απαντάει με τον τύπο της τιμής, όπως παρακάτω: # In[2]: type("No news, good news.") # Η Python είναι *Dynamic typing* δηλαδή δεν ο τύπος των μεταβλητών δεν προκαθορίζεται κατά την συγγραφή αλλά κατά την εκτέλεση. # ## Κανόνες ονοματοδοσίας μεταβλητών # # Τα ονόματα των μεταβλητών στην Python υπακούουν στους παρακάτω κανόνες: # - Το όνομα μίας μεταβλητής μπορεί να ξεκινά από ένα γράμμα ή από κάτω πάυλα. # - Το όνομα μίας μεταβλητής δεν μπορεί με αριθμό. # - Το όνομα μίας μεταβλητής μπορεί να περιέχει μόνο αλφαριθμητικούς χαρακτήρες. # - Στα ονόματα των μεταβλήτών γίνεται διάκριση ανάμεσα σε πεζά και κεφαλαία (case sensitive). # - Οι δεσμευμένες λέξεις της Python (keywords) δεν μπορούν να χρησιμοποιηθούν σε ονόματα μεταβλητών. # ## Συμβολοσειρές (Strings) # Μια συμβολοσειρά είναι μια ακολουθία από χαρακτήρες όπως το `"Το πεπρωμένον φυγείν αδύνατον."`. # Μπορεί να είναι σε κάθε γλώσσα που υποστηρίζεται από το πρώτυπου Unicode. Οι συμβολοσειρές περικλείονται σε μονά, διπλά ή τριπλά εισαγωγικά. # Με τριπλά εισαγωγικά μπορούν να ενσωματωθούν με ευκολία συμβολοσειρές σε πολλές γραμμές και πολλαπλά εισαγωγικά εντός αυτόν. # Ακολουθούν παραδείγματα συμβολοσειρά. "My name is Bond, James Bond." 'There is no smoke without fire' ''' No bees no honey, no work no money. A little is better than none What’s done cannot be undone ''' # ## Χαρακτήρες διαφυγής,κενά, νέες γραμμές # Μπορούμε να σπάσουμε μια συμβολοσειρά κατά την συγγραφή σε νέα γραμμή με τον χαρακτήρα `\` και κατά την εκτέλεση με τον χαρακτήρα `\n` π.χ. # In[3]: message = 'There is no smoke without fire' print(message) # In[4]: message = 'There is no smoke \nwithout fire' print(message) # Ή να ορίσουμε κενά με το `\t` # In[5]: message = 'There is no smoke \twithout fire' print(message) # Ο χαρακτήρας `\` είναι χαρακτήρας διαφυγής που απενεργοποιεί την ειδική λειτουργία των παραπάνω ή την παράθεση εισαγωγικών μεσα σε εισαγωγικά. # In[6]: print('There is no smoke \\n without fire') # In[7]: print('Where there\'s a will, there\'s a way') # ## Ανεπεξέργαστες συμβολοσειρές (Raw Strings) # Παρόμοιο αποτέλεσμα με τα παραπάνω πετυχαίνουμε τις ανεπεξέργαστες συμβολοσειρές οι οποίες ορίζονται με ένα r σαν πρόθεμα # In[8]: print(r"It was made by \n συνέχεια") # ## Αφαίρεση κενών # Σε αρκετές περιπτώσεις οι συμβολοσειρές περιέχουν κενά είτε στην αρχή είτε στο τέλος. # Για παράδειγμα οι παρακάτω συμβολοσειρές δεν είναι το ίδιες για την Python. Και επιβεβαιώνεται σε μέσω ελέγχου ισότητας. # In[9]: departmentA='ΤΜΧΠΑ' departmentB = ' ΤΜΧΠΑ ' print(departmentA == departmentB) #not equal # Για την αφαίρεση των κένων αριστερά, δεξιά ή ταυτόχρονα και στις δύο πλευρές της συμβολοσειρας χρησιμοποιούμε την μέθοδο `strip` και τις παραλλαγές της `rstrip` και `lstrip` # In[10]: print(departmentB.rstrip()) print(departmentB.lstrip()) print(departmentB.strip()) # ## Συνένωση (Concatenation) συμβολοσειρών # # Η απλή παράθεση συμβολοσειρών οδηγεί στην συνενωσή τους δηλ. # In[11]: message = "Curiosity " "killed " 'the ' '''cat''' print(message) # ## Συνένωση συμβολοσειρών και μεταβλητών # # Η συνένωση μεταβλητών και συμβολοσειρών γίνεται με τον τελεστη `+`. # In[12]: city='Βόλος' perifereia='Θεσσαλία' print('O '+city+' είναι πόλη της Ελλάδα στην ' +perifereia) # ## Η μέθοδος format # Άλλη μια πιο πρακτική μέθοδος κατά την συννένωση μεταβλητών και συμβολοσειρών είναι η μέθοδος format. # # In[13]: print('O {0} έχει υψόμετρο {1} μέτρα'.format("Όλυμπος", 2918)) print('O {} έχει υψόμετρο {} μέτρα'.format("Όλυμπος", 2918)) print('O {name} έχει υψόμετρο {height} μέτρα'.format(name="Σμόλικας", height= 2637 )) # ## Δεσμευμένες λέξεις (reserved words) # Ορισμένες λέξεις έχουν ιδιαίτερη σημασία για την python και δεν μπορούν να χρησιμοποιηθούν σαν ονόματα μεταβλητών. Τα παρακάτω κομμάτια κώδικα θα εκδηλώσουν σφάλμα μεταγλώττισης. class="Πρώτο εξάμηνο"break='Πότε θα κάνουμε διάλειμμα;' # Πρόκειται για 33 λέξεις στην τρέχουσα έκδοση της Python. # Μπορούμε να δούμε ποιές είναι αυτές οι δεσμεύνες λέξεις με την παρακάτω εντολή: # In[14]: help("keywords") # ## Η εντολή help # Γενικά με την εντολή `help` καλούμε για βοήθεια και πληροφορίες την Python: # In[15]: help(print) # In[16]: help(abs) # In[17]: help(max) # ## Αλλαγή Πεζών Κεφαλαίων (Convert case) # Μπορούμε να κάνουμε αλλαγή ανάμεσα σε κεφαλαία και πεζά με τις παρακάτω μεθόδους συμβολοσειρών:`upper()`, `title()`, `lower()`. # Αξίζει να σημειώσουμε ότι οι μέθοδοι αυτές δεν έχουν επίδραση στην μεταβλητή που τις καλούμε αλλά πρέπει να επαναεκχωρήσουμε το αποτέλεσμα της μεθόδου στην μεταβλητή με το ίδιο όνομα. # In[18]: agios="άγιος νικόλαος" print(agios.upper()) print(agios) # ο agios παραμένει "άγιος νικόλαος" print(agios.title()) print('ΑΓΊΑ ΕΛΈΝΗ'.lower()) agios = agios.upper() print(agios) # ο agios μετά την εκχώρηση στην ίδια μεταβλητή γινεται ΆΓΙΟΣ ΝΙΚΌΛΑΟΣ # ## Οι συμβολοσειρές είναι μη μεταβαλλόμενη δομή δεδομένων # Οι συμβολοσειρές αποτελούνται από ακολουθίες χαρακτήρων με σταθερό μέγεθος # και μη μεταβαλλόμενα περιεχόμενα. Αυτό σημαίνει ότι δεν είναι δυνατόν να προστίθενται ή να αφαιρούνται # χαρακτήρες, ούτε να τροποποιούνται τα περιεχόμενα του αλφαριθμητικού. # Πρόκειται για μια μη μεταβαλλόμενη (immutable) δομή της Python. # Η αρίθμηση των χαρακτήρων σε ένα αλφαριθμητικό ξεκινάει από το 0. # # Έτσι στην συμβολοσειρά `country = Ελλάδα` έχουμε: # # `country[0]` → Ε (η αρίθμηση ξεκινά από το 0) # # `country[1]` → λ # # `country[2]` → λ # # `country[3]` → ά # # `country[4]` → δ # # `country[5]` → α # # Η παραπάνω συμβολοσειρά έχει μήκος 6 χαρακτήρες. # ## Μήκος συμβολοσειράς # Μέσω της συνάρτησης `len` η Python μας επιστρέφει το μήκος συμβολοσειράς δηλαδή το πλήθος των χαρακτήρων (μαζί με τα κενά) από τους οποιούς αποτελείται. # In[19]: message = 'Ή τώρα ή ποτέ.' len(message) # ## Η μέθοδος find # Η μέθοδος `find` μας επιτρέπει να αναζητήσουμε μια συμβολοσειρά μέσα σε μια άλλη συμβολοσειρά. # Η μέθοδος μας επιστρέφει την τοποθεσία από την ξεκινάει η αναζητούμενη συμβολοσειρά δηλαδή τον δείκτη (index) στην οποία εντοπίζεται # ο πρώτος χαρακτηρας της αναζητούμενης συμβολοσειράς μέσα στα περιεχόμενα της αρχικής συμβολοσειράς. # Στην παρακάτω συμβολοσειρά θα αναζητήσουμε την λέξη `ποτέ`. # In[20]: stixos = 'Η Ελλάδα ποτέ δεν πεθαίνει' index = stixos.find('ποτέ') # Κανονικά αν πάμε στον χαρακτήρα με ευρετηρίο (index) 9 πρέπει να εντοπίσουμε τον πρώτο χαρακτήρα της συμβολοσειράς που είναι το `π`. # Πράγματι: # In[21]: stixos[index] # Αν δεν εντοπιστεί η λέξη που αναζητούμε στην συμβολοσειρά η Python θα επιστρέψει: `-1` # In[22]: stixos.find('πάντα') # Η αναζήτηση είναι case sensitive δηλαδή γίνεται διάκριση ανάμεσα σε πεζά και κεφαλαία. # In[23]: stixos.find('Ελλάδα') # επιστρέφει τον δείκτη 2 γιατί εντοπίστηκε η λέξη κλειδί # In[24]: stixos.find('ΕΛΛΆΔΑ') # επιστρέφει -1 γιατί δεν εντοπίστηκε η λέξη κλειδί # Μια άλλη σημαντική μέθοδος των συμβολοσειρών είναι η μέθοδος `replace` κατά την οποία μπορούμε να αντικαταστήσουμε τα περιεχόμενα μιας συμβολοσειράς. Στην πρώτη παράμετρο ορίζουμε την συμβολοσειρά που θέλουμε να αντικαταστήσουμε με την δεύτερη παράμετρο. # In[25]: stixos.replace('ποτέ', 'πάντα')
python
import os,sys import numpy as np import torch import torch.nn.functional as F import h5py, time, itertools, datetime from scipy.ndimage import label from scipy.ndimage.morphology import binary_erosion from torch_connectomics.utils.net import * from torch_connectomics.utils.vis import visualize_aff def test(args, test_loader, model, device, model_io_size, volume_shape, pad_size, initial_seg=None): # switch to eval mode model.eval() volume_id = 0 ww = blend(model_io_size) NUM_OUT = args.out_channel sel_cpu = np.ones((3, 3, 3), dtype=bool) sel = torch.ones((1, 1, 3, 3, 3), dtype=torch.float32, device=device) if initial_seg is not None: result = [np.expand_dims(initial_seg, axis=0)] else: result = [np.stack([np.zeros(x, dtype=bool) for _ in range(NUM_OUT)]) for x in volume_shape] result_raw = [np.stack([np.zeros(x, dtype=np.float32) for _ in range(NUM_OUT)]) for x in volume_shape] prediction_points = [] weight = [np.zeros(x, dtype=np.float32) for x in volume_shape] print(result[0].shape, weight[0].shape) if args.test_augmentation: print("Will augment (Rotate and Flip) data during inference.") test_loader.set_out_array(result[0][0]) sz = tuple([NUM_OUT] + list(model_io_size)) start = time.time() with torch.no_grad(): itr_num = 0 while test_loader.remaining_pos() > 0 : itr_num += 1 if args.out_channel == 2: pos, volume, past_pred = test_loader.get_input_data() volume = volume.to(device) past_pred = past_pred.to(device) output_raw = model(torch.cat((volume, past_pred), 1)) else: pos, volume = test_loader.get_input_data() volume = volume.to(device) output_raw = model(volume) output = output_raw > 0.95 output_raw = output_raw.cpu().detach().numpy() for idx in range(output.shape[0]): st = pos[idx] out_mask = output[idx][0].cpu().detach().numpy().astype(bool) if out_mask[tuple(test_loader.dataset.half_input_sz)]: cc_out_mask, _ = label(out_mask) out_mask = (cc_out_mask == cc_out_mask[tuple(test_loader.dataset.half_input_sz)]) result[st[0]][0, st[1]:st[1]+sz[1], st[2]:st[2]+sz[2], st[3]:st[3]+sz[3]] |= out_mask result_raw[st[0]][:, st[1]:st[1] + sz[1], st[2]:st[2] + sz[2], st[3]:st[3] + sz[3]] \ = np.maximum(result[st[0]][:, st[1]:st[1] + sz[1], st[2]:st[2] + sz[2], st[3]:st[3] + sz[3]], \ output_raw[idx].reshape(sz)) prediction_points.append(st[1:] - test_loader.dataset.seed_points_offset) # appending center points wrt the unpadded volume if itr_num < 200: print('Iteration: ', itr_num) out_mask = torch.from_numpy(binary_erosion(out_mask, sel_cpu).astype(np.float32)).to(device) out_mask = out_mask.unsqueeze(0).unsqueeze(0) edge = (F.conv3d(out_mask, sel, padding=1))[0, 0] edge = (edge > 0) * (edge < 9) edge = F.interpolate(edge.unsqueeze(0).unsqueeze(0).float(), scale_factor=1 / 4, mode='trilinear') edge = edge > .50 edge_pos = (torch.nonzero(edge[0, 0])*4).cpu().detach().numpy().astype(np.uint32) test_loader.compute_new_pos(out_mask, edge_pos, st[1:]) end = time.time() print("prediction time:", (end-start)) for vol_id in range(len(result)): data = result[vol_id] data = data[:, pad_size[0]:-pad_size[0], pad_size[1]:-pad_size[1], pad_size[2]:-pad_size[2]] print('Output shape: ', data.shape) hf = h5py.File(args.output + '/mask_' + str(vol_id) + '.h5', 'w') hf.create_dataset('main', data=data, compression='gzip') hf.close() hf = h5py.File(args.output + '/prediction_points' + str(vol_id) + '.h5', 'w') hf.create_dataset('main', data=np.array(prediction_points), compression='gzip') hf.close() data = result_raw[vol_id] data = data[:, pad_size[0]:-pad_size[0], pad_size[1]:-pad_size[1], pad_size[2]:-pad_size[2]] data = (data*255).astype(np.uint8) hf = h5py.File(args.output + '/mask_raw' + str(vol_id) + '.h5', 'w') hf.create_dataset('main', data=data, compression='gzip') hf.close() def get_augmented(volume): # perform 16 Augmentations as mentioned in Kisuks thesis vol0 = volume vol90 = torch.rot90(vol0, 1, [3, 4]) vol180 = torch.rot90(vol90, 1, [3, 4]) vol270 = torch.rot90(vol180, 1, [3, 4]) vol0f = torch.flip(vol0, [3]) vol90f = torch.flip(vol90, [3]) vol180f = torch.flip(vol180, [3]) vol270f = torch.flip(vol270, [3]) vol0z = torch.flip(vol0, [2]) vol90z = torch.flip(vol90, [2]) vol180z = torch.flip(vol180, [2]) vol270z = torch.flip(vol270, [2]) vol0fz = torch.flip(vol0f, [2]) vol90fz = torch.flip(vol90f, [2]) vol180fz = torch.flip(vol180f, [2]) vol270fz = torch.flip(vol270f, [2]) augmented_volumes = [vol0, vol90, vol180, vol270, vol0f, vol90f, vol180f, vol270f, vol0z, vol90z, vol180z, vol270z, vol0fz, vol90fz, vol180fz, vol270fz] return augmented_volumes def combine_augmented(outputs): assert len(outputs) == 16 for i in range(8, 16): outputs[i] = torch.flip(outputs[i], [2]) for i in range(4, 8): outputs[i] = torch.flip(outputs[i], [3]) for i in range(12, 16): outputs[i] = torch.flip(outputs[i], [3]) for i in range(1, 16, 4): outputs[i] = torch.rot90(outputs[i], -1, [3, 4]) for i in range(2, 16, 4): outputs[i] = torch.rot90(outputs[i], -1, [3, 4]) outputs[i] = torch.rot90(outputs[i], -1, [3, 4]) for i in range(3, 16, 4): outputs[i] = torch.rot90(outputs[i], 1, [3, 4]) # output = torch.zeros_like(outputs[0], dtype=torch.float64) # for i in range(len(outputs)): # output += outputs[i].double() # output = output / 16.0 for i in range(len(outputs)): outputs[i] = outputs[i].unsqueeze(0) output = torch.min(torch.cat(outputs, 0), 0)[0] return output, outputs def main(): args = get_args(mode='test') print('0. initial setup') model_io_size, device = init(args) print('model I/O size:', model_io_size) print('1. setup data') test_loader, volume_shape, pad_size, initial_seg = get_input(args, model_io_size, 'test') print('2. setup model') model, _ = setup_model(args, device, exact=True, model_io_size=model_io_size) print('3. start testing') test(args, test_loader, model, device, model_io_size, volume_shape, pad_size, initial_seg) print('4. finish testing') if __name__ == "__main__": main()
python
import ML import function.saveNewsList import hook.hooks import job import json from nose.tools import with_setup from ML import Server def setup_func(): ML.init( "57f9edc887d4a7e337b8c231", master_key="elhmazJfd29ZTFBhR0M3SmJ0R2N6UQ", ) @with_setup(setup_func) def test_saveNewsDetail(): fileName = "/Users/mac/Downloads/newslist.json" fileObject = open(fileName) try: news = fileObject.read() except: news = "" response = Server.callFunction('saveNewsList', data=json.dumps({"content":news})) print "response: "+response.data
python
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2016-08-11 15:42 from django.db import migrations import utilities.fields class Migration(migrations.Migration): dependencies = [ ('dcim', '0017_rack_add_role'), ] operations = [ migrations.AddField( model_name='device', name='asset_tag', field=utilities.fields.NullableCharField(blank=True, help_text=b'A unique tag used to identify this device', max_length=50, null=True, unique=True, verbose_name=b'Asset tag'), ), ]
python
import numpy as np import sqlalchemy from sqlalchemy.ext.automap import automap_base from sqlalchemy.orm import Session from sqlalchemy import create_engine, func from flask import Flask, jsonify import datetime from dateutil.relativedelta import relativedelta from datetime import datetime from sqlalchemy import desc import pandas as pd from flask import Response ################################################# # Database Setup ################################################# engine = create_engine("sqlite:///./Resources/hawaii.sqlite") # reflect an existing database into a new model Base = automap_base() # reflect the tables Base.prepare(engine, reflect=True) # Save reference to the table Measurement = Base.classes.measurement Station = Base.classes.station ################################################# # Flask Setup ################################################# app = Flask(__name__) ################################################# # Flask Routes ################################################# @app.route("/") def welcome(): """List all available api routes.""" return ( f"Available Routes:<br/>" f"/api/v1.0/precipitation<br/>" f"/api/v1.0/stations<br/>" f"/api/v1.0/tobs<br/>" f"/api/v1.0/start/end" ) @app.route("/api/v1.0/precipitation") def precipitation(): # Create our session (link) from Python to the DB session = Session(engine) """Return a list of all dates and precipitation""" # Query all dates and precipitation most_recent_date = '2017-08-23' last_year_date = (datetime.strptime(most_recent_date, "%Y-%m-%d") + relativedelta(years=-1)).date() results = session.query(Measurement.date, Measurement.prcp).\ filter(Measurement.date >= last_year_date).\ filter(Measurement.prcp != None).\ order_by(Measurement.date).all() session.close() all_dates_prcp=[] date_dict = {date : prcp for date, prcp in results} return jsonify(date_dict) @app.route("/api/v1.0/stations") def stations(): # Create our session (link) from Python to the DB session = Session(engine) """Return a list of all stations""" # Query all stations results = session.query(Station.name, Station.station).order_by(Station.name).all() session.close() all_stations=[] # station = list(np.ravel(results)) for name, station in results: station_dict = {} station_dict['name']=name station_dict['station']=station all_stations.append(station_dict) return jsonify(all_stations) @app.route("/api/v1.0/tobs") def tobs(): # Create our session (link) from Python to the DB session = Session(engine) # Find the most recent date in the data set. most_recent_date = session.query(func.max(Measurement.date)).one() most_recent_date = most_recent_date[0] # Calculate the date one year from the last date in data set. last_year_date = datetime.strptime(most_recent_date, "%Y-%m-%d") + relativedelta(years=-1) last_year_date # Query for the dates and temperature observations of the most active station for the last year of data station_list = session.query(Measurement.station,func.count(Measurement.station).label('station count')).\ group_by(Measurement.station).\ order_by(desc('station count')).all() most_active_station_id = station_list[0][0] temp_observation_data = session.query(Measurement.date, Measurement.tobs).\ filter(Measurement.station == most_active_station_id).\ filter(Measurement.date <= most_recent_date).\ filter(Measurement.date >= last_year_date).order_by(Measurement.date) session.close() all_tobs_for_last_year=[] for date, tobs in temp_observation_data: tobs_dict = {} tobs_dict['date']=date tobs_dict['tobs']=tobs all_tobs_for_last_year.append(tobs_dict) return jsonify(all_tobs_for_last_year) @app.route("/api/v1.0/<start>") @app.route("/api/v1.0/<start>/<end>") def temperature_date(start=None, end=None): # Create our session (link) from Python to the DB session = Session(engine) # Find the most recent date in the data set. most_recent_date = session.query(func.max(Measurement.date)).one() most_recent_date = most_recent_date[0] # If start date is greater than most last date in data if(start > most_recent_date): return "Start Date is out of the range." # If end is is not given by user then assign last available date in data if(end is None): end = most_recent_date if(start <= end): temp_stats = session.query(func.min(Measurement.tobs),func.max(Measurement.tobs),func.avg(Measurement.tobs)).\ filter(Measurement.date >= start).\ filter(Measurement.date <= end).all() temp_dict = [] temp_dict.append(f'Minimum Temperature: {temp_stats[0][0]}') temp_dict.append(f'Maximum Temperature: {temp_stats[0][1]}') temp_dict.append(f'Average Temperature: {round(temp_stats[0][2],1)}') return jsonify(temp_dict) else: return "Start Date should not be greater than End Date" if __name__ == '__main__': app.run(debug=True)
python
#!/usr/bin/env python import networkx as nx import sys with open('soc-sign-bitcoinotc.csv', 'r') as fin: with open('bitcoinotc.in', 'a') as fout: G = nx.DiGraph() fout.write("Relation Edges" + '\n') fout.write("From To" + '\n') for line in fin: n1, n2, rating, time = line.split(',') fout.write(str(n1) + ' ' + str(n2) + ' ' + str(10 - int(rating)) + '\n') G.add_edge(int(n1), int(n2)) fout.write("End of Edges" + '\n') print "===== BitcoinOTC ====" print "Number of nodes = " + str(nx.number_of_nodes(G)) print "Number of edges = " + str(nx.number_of_edges(G)) degrees = nx.degree(G) degree_list = [d for (_, d) in degrees] print "Max degree = " + str(max(degree_list)) avg_degree = sum(degree_list) * 1.0 / len(degree_list) print "Average degree = " + str(avg_degree)
python
from __future__ import division import copy from Game.deck import * from Game.column import * from Game.heap import * NB_CARDS_SUBDECK = 24 NB_HEAPS = 4 NB_COLUMNS = 7 IN_HEAP = 'heap' IN_DECK = 'deck' IN_COL = 'col' UNDRAWN = 'undraw' ACTIONS = ['draw', 'deck-heap', 'deck-col', 'col-heap', 'heap-col', 'col-col'] class Solitaire_Engine: def __init__(self): # State values dictionary self.state_dict = dict() for i in range(33): if i <= 6: self.state_dict[IN_COL + str(i)] = i elif i == 7: self.state_dict[IN_HEAP] = i elif i == 8: self.state_dict[UNDRAWN] = i else: self.state_dict[IN_DECK + str(i-9)] = i # State information for each card self.cards_state = dict() # Main deck where all cards are drawn self.main_deck = Deck() # Upper-left sub-deck self.sub_deck = [] self.sub_deck_index = [] for i in range(NB_CARDS_SUBDECK): card = self.main_deck.draw() self.cards_state[(card.rank, card.color)] = self.state_dict[IN_DECK + str(i)] self.sub_deck.append(card) # Heaps init self.heaps = [] for i in range(NB_HEAPS): self.heaps.append(Heap()) # Columns init self.columns = [] for i in range(NB_COLUMNS): self.columns.append(Column(i)) card = self.main_deck.draw() self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(i)] self.columns[i].reveal_card(card) # Reward for scoring self.reward = 0 self.time = 0 self.score = 0 # States stack for avoiding cycles _, state = self.get_state() self.states_stack = [state] # Actions dictionary self.actions_dict = dict() self.legal_actions() self.action_index_name = dict() self.index_action_to_name() def is_over(self): return self.is_won() or self.is_lost() def is_won(self): """ Checks if the game is won. """ for heap in self.heaps: if not heap.is_complete(): return False return True def is_lost(self): """ Checks if a game is lost. """ if len(self.actions_dict) == 0: return True else: return False def get_reward(self): """ Retrieves the result of the game """ return self.reward def can_draw(self): """ Checks a if a draw can be performed """ return len(self.sub_deck) > 0 def draw(self): """ Draw cards from the sub-deck """ if not self.can_draw(): return False if not self.sub_deck_index: start = 0 self.sub_deck_index = [i for i in range(min(3, len(self.sub_deck)))] else: start = self.sub_deck_index[-1] + 1 if start >= len(self.sub_deck): self.sub_deck_index = [i for i in range(min(3, len(self.sub_deck)))] else: end = min(start+3, len(self.sub_deck)) self.sub_deck_index = [i for i in range(start, end)] self.reward = 0 def can_deck_to_heap(self, heap, nb_draw): """ Checks if the card from the deck can be added to a heap :param heap: (int) Number of the heap where to move the card :param nb_draw: (int) Number of times to draw before actually playing """ game_copy = copy.deepcopy(self) if not game_copy.can_draw(): return False for i in range(nb_draw): game_copy.draw() if not game_copy.sub_deck_index: return False index = game_copy.sub_deck_index[-1] card = game_copy.sub_deck[index] if not game_copy.heaps[heap].can_add(card): return False return True def deck_to_heap(self, heap, nb_draw): """ Moves the top drawn of the deck to a given heap :param heap: (int) number of the heap where to move the card :param nb_draw: (int) Number of times to draw before actually playing """ if not self.can_deck_to_heap(heap, nb_draw): return False for i in range(nb_draw): self.draw() index_heap = self.sub_deck_index.pop() card = self.sub_deck.pop(index_heap) self.heaps[heap].add_card(card) # State update for moved card self.cards_state[(card.rank, card.color)] = self.state_dict[IN_HEAP] while index_heap < len(self.sub_deck): card = self.sub_deck[index_heap] self.cards_state[(card.rank, card.color)] = self.state_dict[IN_DECK + str(index_heap)] index_heap += 1 # Reward update self.reward = 10 def can_deck_to_column(self, col, nb_draw): """ Checks if the card from the deck can be added to a column :param heap: (int) Number of the column where to move the card :param nb_draw: (int) Number of times to draw before actually playing """ game_copy = copy.deepcopy(self) if not game_copy.can_draw(): return False for i in range(nb_draw): game_copy.draw() if not game_copy.sub_deck_index: return False index = game_copy.sub_deck_index[-1] card = game_copy.sub_deck[index] if not game_copy.columns[col].can_add(card): return False return True def deck_to_column(self, col, nb_draw): """ Moves the top drawn of the deck to a given column :param col: (int) index of the column where to move the card :param nb_draw: (int) Number of times to draw before actually playing """ if not self.can_deck_to_column(col, nb_draw): return for i in range(nb_draw): self.draw() index_heap = self.sub_deck_index.pop() card = self.sub_deck.pop(index_heap) self.columns[col].add_cards([card]) # State update for moved card self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(col)] while index_heap < len(self.sub_deck): card = self.sub_deck[index_heap] self.cards_state[(card.rank, card.color)] = self.state_dict[IN_DECK + str(index_heap)] index_heap += 1 # Reward update self.reward = 5 def can_column_to_heap(self, col, heap): """ Checks if the top card of a column can be added to a heap :param col: (int) index of the column :param heap: (int) index of the heap """ if not self.columns[col].can_remove(1): return False card = self.columns[col].cards[-1] if not self.heaps[heap].can_add(card): return False return True def column_to_heap(self, col, heap): """ Moves the top card of a column to a heap :param col: (int) index of the column :param heap: (int) index of the heap """ column = self.columns[col] if not self.can_column_to_heap(col, heap): return cards = column.remove_cards(1) self.heaps[heap].add_card(cards[0]) # State update for moved card self.cards_state[(cards[0].rank, cards[0].color)] = self.state_dict[IN_HEAP] # Reward update self.reward = 10 # Check if a new card has to be revealed if column.need_reveal(): card = self.main_deck.draw(True) self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(col)] column.reveal_card(card) self.reward += 5 def can_heap_to_column(self, heap, col): """ Checks if a card can be moved from a heap to a column :param heap: (int) index of the heap :param col: (int) index of the column """ if not self.heaps[heap].can_remove(): return False card = self.heaps[heap].cards[-1] # Useless to remove an Ace from the heap if card.rank == ACE: return False if not self.columns[col].can_add(card): return False return True def heap_to_column(self, heap, col): """ Moves a card from a heap to a column :param heap: (int) index of the heap :param col: (int) index of the column """ card = self.heaps[heap].remove_card() self.columns[col].add_cards([card]) # State update for moved card self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(col)] # Reward update self.reward = -15 def can_column_to_column(self, col1, col2, nb_cards): if not self.columns[col1].can_remove(nb_cards): return False column_copy = copy.deepcopy(self.columns[col1]) cards = column_copy.remove_cards(nb_cards) if not self.columns[col2].can_add(cards[0]): return False return True def column_to_column(self, col1, col2, nb_cards): """ Moves a number of card from a column to another :param col1: (int) index of the first column :param col2: (int) index of the second column :param nb_cards: (int) number of cards to move """ column = self.columns[col1] cards = column.remove_cards(nb_cards) self.columns[col2].add_cards(cards) # State update for moved cards for card in cards: self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(col2)] self.reward = 0 # Check if a new card has to be revealed if column.need_reveal(): card = self.main_deck.draw(True) self.cards_state[(card.rank, card.color)] = self.state_dict[IN_COL + str(col1)] column.reveal_card(card) self.reward += 5 def legal_actions(self): """ Retrieves all the legal actions of the current states. """ index_action = 0 self.actions_dict.clear() table = self.actions_dict cards_list = [] nb_draw = 0 game_copy = copy.deepcopy(self) if game_copy.sub_deck_index: card = game_copy.sub_deck[game_copy.sub_deck_index[-1]] cards_list.append(card) # For each configuration of the sub-deck, check possible actions if game_copy.sub_deck: while True: # Check deck-heap for heap in range(NB_HEAPS): if game_copy.can_deck_to_heap(heap, 0): table[index_action] = (ACTIONS[1], [heap, nb_draw]) index_action += 1 # Check deck-column for col in range(NB_COLUMNS): if game_copy.can_deck_to_column(col, 0): table[index_action] = (ACTIONS[2], [col, nb_draw]) index_action += 1 # Update sub-deck game_copy.draw() card = game_copy.sub_deck[game_copy.sub_deck_index[-1]] if card in cards_list: break cards_list.append(card) nb_draw += 1 index_action = int(NB_CARDS_SUBDECK/3 +1) * NB_HEAPS + int(NB_CARDS_SUBDECK/3 +1) * NB_COLUMNS # Check col-heap for heap in range(NB_HEAPS): for col1 in range(NB_COLUMNS): if self.can_column_to_heap(col1, heap): table[index_action] = (ACTIONS[3], [col1, heap]) index_action += 1 # Check heap-col for heap in range(NB_HEAPS): for col1 in range(NB_COLUMNS): if self.can_heap_to_column(heap, col1): game_copy = copy.deepcopy(self) game_copy.heap_to_column(heap, col1) _, state = game_copy.get_state() if state not in self.states_stack: table[index_action] = (ACTIONS[4], [heap, col1]) index_action += 1 # Check col-col for col1 in range(NB_COLUMNS): for col2 in range(NB_COLUMNS): if col1 != col2: max_cards = len(self.columns[col1].cards) max_nb_cards = -1 # Choose action with the most cards for nb_cards in range(1, max_cards+1): if self.can_column_to_column(col1, col2, nb_cards): game_copy = copy.deepcopy(self) game_copy.column_to_column(col1, col2, nb_cards) _, state = game_copy.get_state() if state not in self.states_stack: max_nb_cards = nb_cards if max_nb_cards > -1: table[index_action] = (ACTIONS[5], [col1, col2, max_nb_cards]) index_action += 1 return list(self.actions_dict.keys()) def index_action_to_name(self): """ Set the dictionary for the action names """ index = 0 for nb_draw in range(int(NB_CARDS_SUBDECK/3)+1): for i in range(NB_HEAPS): self.action_index_name[index] = 'deck-to-heap'+ str(i) + "-"+ str(nb_draw) + "draws" index += 1 for i in range(NB_COLUMNS): self.action_index_name[index] = 'deck-to-col' + str(i) + "-"+ str(nb_draw) + "draws" index += 1 for i in range(NB_HEAPS): for j in range(NB_COLUMNS): self.action_index_name[index] = 'col'+ str(j) + '-to-heap' + str(i) index += 1 for i in range(NB_HEAPS): for j in range(NB_COLUMNS): self.action_index_name[index] = 'heap' + str(i) + '-to-col' + str(j) index += 1 for i in range(NB_COLUMNS): for j in range(NB_COLUMNS): if i != j: self.action_index_name[index] = 'col' + str(i) +\ '-to-col' + str(j) index += 1 def get_header(self): """ Retrieves the header of the data file of a Solitaire game """ header = [] for color in COLORS: for rank in CARDS: header.append(str(rank)+color) for action in list(self.action_index_name.values()): header.append(action) header.append('reward') return header def play(self, action): """ Plays an action :param action: (int) key of the action to play """ value = self.actions_dict[action] name = value[0] args = value[1] if name == ACTIONS[0]: self.draw() elif name == ACTIONS[1]: self.deck_to_heap(args[0], args[1]) elif name == ACTIONS[2]: self.deck_to_column(args[0], args[1]) elif name == ACTIONS[3]: self.column_to_heap(args[0], args[1]) elif name == ACTIONS[4]: self.heap_to_column(args[0], args[1]) else: self.column_to_column(args[0], args[1], args[2]) self.legal_actions() # Score updates self.score += self.reward self.time += 1 # Stack update if name != ACTIONS[0]: _, state = self.get_state() self.states_stack.append(state) def chance_action(self, action): """ Checks if an action involves randomness. :param action: The action to perform """ game = copy.deepcopy(self) cards_before = len(game.main_deck.cards) game.play(action) cards_after = len(game.main_deck.cards) if cards_after >= cards_before: return False return True def get_state(self): """ Provides an aggregation of the state of the game. """ state = [] for color in COLORS: for rank in CARDS: if (rank, color) in self.cards_state: state.append(self.cards_state[(rank, color)]) else: state.append(self.state_dict[UNDRAWN]) return state, ''.join(str(x) for x in state) def render(self): """ Renders game in the console """ print("State:") print("Draw deck:") string = str(len(self.sub_deck))+" cards - "+\ " ".join(str(x) for x in self.sub_deck_index) if self.sub_deck_index: card = self.sub_deck[self.sub_deck_index[-1]] string += " - "+str(card.rank)+card.color print(string) print("Heaps:") string = "" for heap in self.heaps: if heap.cards: card = heap.cards[-1] string += str(card.rank)+card.color+" " else: string += "-1 " print(string) print("Columns:") for column in self.columns: string = "" string += ("? " * column.nb_todraw) for card in column.cards: string += str(card.rank)+card.color+" " print(string) print("Score") print(str(self.score))
python
from .transformer_factory import function_transformer # noqa
python
import sys import requests import mysql.connector import datetime from mysql.connector import errorcode from bs4 import BeautifulSoup def get_link(argv): hari = str(datetime.datetime.now().day) bulan = str(datetime.datetime.now().month) tahun = str(datetime.datetime.now().year) url = "http://www.viva.co.id/indeks/berita/sainstek/"+tahun+"/"+bulan+"/"+hari url = "http://www.viva.co.id/indeks/berita/sainstek/2015/6/3" r = requests.get(url) soup = BeautifulSoup(r.content) if r.status_code == 400 or r.status_code == 408 or r.status_code == 302: print("Error! Halaman gagal dimuat") else: g_data = soup.find("ul", {"class": "indexlist"}) if g_data.li is None: print("Tidak ada berita terbaru") else: for list in g_data.findAll("li"): alamat = list.a.get("href") string = str(alamat) if url_check(string): get_content(string) pass def url_check(url): r = requests.get(url, allow_redirects=False) #print(r.status_code, r.history) if r.status_code == 302: return False else: return True def get_content(url): r = requests.get(url, allow_redirects=False) soup = BeautifulSoup(r.content) title = soup.title.text date = soup.find("div", {"class": "date"}) content = soup.find(id="article-content") image = soup.find("div", {"class": "thumbcontainer"}) news_title = title news_img = image.img.get("src") news_date = date.contents[0] for tag in content.find_all('aside'): tag.replaceWith('') for tag in content.find_all('script'): tag.replaceWith('') for tag in content.findAll("div", {"class": ['portlet', 'sideskycrapper']}): tag.replaceWith('') news_content = content.text news_content = news_content.strip('\t\r\n') if not title: print("Data tidak ada") else: insert_data(news_title, news_date, news_content, news_img) pass def insert_data(title, date, content, img): try: cnx = mysql.connector.connect(user='root', password='root', database='web') cursor = cnx.cursor() title = (title.replace("'", "\\\'")).replace('"', '\\\"') content = (content.replace("'", "\\\'")).replace('"', '\\\"') query = "INSERT INTO berita (news_title, news_date, news_content, news_img) VALUES ('"+title+"','"+date+"','"+content+"','"+img+"')" cursor.execute(query) #print(query) cnx.commit() except mysql.connector.Error as err: if err.errno == errorcode.ER_ACCESS_DENIED_ERROR: print("Something is wrong with your user name or password") elif err.errno == errorcode.ER_BAD_DB_ERROR: print("Database does not exist") else: print(err) else: cursor.close() cnx.close() pass if __name__ == "__main__": get_link(sys.argv)
python
from ..registry_tools import iso_register from .core import UnitedStates @iso_register('US-NY') class NewYork(UnitedStates): """New York""" include_lincoln_birthday = True include_election_day_every_year = True
python
"""jc - JSON CLI output utility `sfdisk` command output parser Supports the following `sfdisk` options: - `-l` - `-F` - `-d` (deprecated - only for older versions of util-linux) - `-uM` (deprecated - only for older versions of util-linux) - `-uC` (deprecated - only for older versions of util-linux) - `-uS` (deprecated - only for older versions of util-linux) - `-uB` (deprecated - only for older versions of util-linux) Usage (cli): # sfdisk -l | jc --sfdisk or # jc sfdisk -l Usage (module): import jc result = jc.parse('sfdisk', sfdisk_command_output) or import jc.parsers.sfdisk result = jc.parsers.sfdisk.parse(sfdisk_command_output) Schema: [ { "disk": string, "disk_size": string, "free_disk_size": string, "bytes": integer, "free_bytes": integer, "sectors": integer, "free_sectors": integer, "cylinders": integer, "heads": integer, "sectors_per_track": integer, "units": string, "logical_sector_size": integer, "physical_sector_size": integer, "min_io_size": integer, "optimal_io_size": integer, "disk_label_type": string, "disk_identifier": string, "disk_model": string, "partitions": [ { "device": string, "boot": boolean, "start": integer, "end": integer, "size": string, # [0] "cyls": integer, "mib": integer, "blocks": integer, "sectors": integer, "id": string, "system": string, "type": string } ] } ] [0] will be integer when using deprecated -d sfdisk option Examples: # sfdisk -l | jc --sfdisk -p [ { "disk": "/dev/sda", "cylinders": 2610, "heads": 255, "sectors_per_track": 63, "units": "cylinders of 8225280 bytes, blocks of 1024 bytes, ...", "partitions": [ { "device": "/dev/sda1", "boot": true, "start": 0, "end": 130, "cyls": 131, "blocks": 1048576, "id": "83", "system": "Linux" }, { "device": "/dev/sda2", "boot": false, "start": 130, "end": 2610, "cyls": 2481, "blocks": 19921920, "id": "8e", "system": "Linux LVM" }, { "device": "/dev/sda3", "boot": false, "start": 0, "end": null, "cyls": 0, "blocks": 0, "id": "0", "system": "Empty" }, { "device": "/dev/sda4", "boot": false, "start": 0, "end": null, "cyls": 0, "blocks": 0, "id": "0", "system": "Empty" } ] }, { "disk": "/dev/mapper/centos-root", "cylinders": 2218, "heads": 255, "sectors_per_track": 63 }, { "disk": "/dev/mapper/centos-swap", "cylinders": 261, "heads": 255, "sectors_per_track": 63 } ] # sfdisk -l | jc --sfdisk -p -r [ { "disk": "/dev/sda", "cylinders": "2610", "heads": "255", "sectors_per_track": "63", "units": "cylinders of 8225280 bytes, blocks of 1024 bytes, co...", "partitions": [ { "device": "/dev/sda1", "boot": "*", "start": "0+", "end": "130-", "cyls": "131-", "blocks": "1048576", "id": "83", "system": "Linux" }, { "device": "/dev/sda2", "boot": null, "start": "130+", "end": "2610-", "cyls": "2481-", "blocks": "19921920", "id": "8e", "system": "Linux LVM" }, { "device": "/dev/sda3", "boot": null, "start": "0", "end": "-", "cyls": "0", "blocks": "0", "id": "0", "system": "Empty" }, { "device": "/dev/sda4", "boot": null, "start": "0", "end": "-", "cyls": "0", "blocks": "0", "id": "0", "system": "Empty" } ] }, { "disk": "/dev/mapper/centos-root", "cylinders": "2218", "heads": "255", "sectors_per_track": "63" }, { "disk": "/dev/mapper/centos-swap", "cylinders": "261", "heads": "255", "sectors_per_track": "63" } ] """ import jc.utils import jc.parsers.universal class info(): """Provides parser metadata (version, author, etc.)""" version = '1.2' description = '`sfdisk` command parser' author = 'Kelly Brazil' author_email = '[email protected]' compatible = ['linux'] magic_commands = ['sfdisk'] __version__ = info.version def _process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (List of Dictionaries) raw structured data to process Returns: List of Dictionaries. Structured to conform to the schema. """ int_list = [ 'cylinders', 'heads', 'sectors_per_track', 'start', 'end', 'cyls', 'mib', 'blocks', 'sectors', 'bytes', 'logical_sector_size', 'physical_sector_size', 'min_io_size', 'optimal_io_size', 'free_bytes', 'free_sectors' ] bool_list = ['boot'] for entry in proc_data: for key in entry: if key in int_list: entry[key] = jc.utils.convert_to_int(entry[key].replace('-', '')) if 'partitions' in entry: for p in entry['partitions']: for key in p: # legacy conversion for -d option if key == 'size': if p[key].isnumeric(): p[key] = jc.utils.convert_to_int(p[key]) # normal conversions if key in int_list: p[key] = jc.utils.convert_to_int(p[key].replace('-', '')) if key in bool_list: p[key] = jc.utils.convert_to_bool(p[key]) return proc_data def parse(data, raw=False, quiet=False): """ Main text parsing function Parameters: data: (string) text data to parse raw: (boolean) unprocessed output if True quiet: (boolean) suppress warning messages if True Returns: List of Dictionaries. Raw or processed structured data. """ jc.utils.compatibility(__name__, info.compatible, quiet) jc.utils.input_type_check(data) raw_output = [] item = {} partitions = [] option = '' section = '' if jc.utils.has_data(data): for line in data.splitlines(): # deprecated - only for older versions of util-linux if line.startswith('# partition table of'): if item: raw_output.append(item) item = {} partitions = [] option = 'd' item['disk'] = line.split()[4] continue # deprecated - only for older versions of util-linux if option == 'd': if line.startswith('unit: '): item['units'] = line.split()[1] section = 'partitions' continue if section == 'partitions' and line: part = {} part['device'] = line.split()[0] line = line.replace(',', ' ').replace('=', ' ') part['start'] = line.split()[3] part['size'] = line.split()[5] part['id'] = line.split()[7] part['boot'] = '*' if 'bootable' in line else None partitions.append(part) item['partitions'] = partitions continue else: # older versions of util-linux # Disk /dev/sda: 2610 cylinders, 255 heads, 63 sectors/track if line.startswith('Disk ') and 'sectors/track' in line: if item: raw_output.append(item) item = {} partitions = [] line = line.replace(':', '').replace(',', '') fields = line.split() item['disk'] = fields[1] item['cylinders'] = fields[2] item['heads'] = fields[4] item['sectors_per_track'] = fields[6] continue # util-linux v2.32.0+ (?) # Disk /dev/sda: 20 GiB, 21474836480 bytes, 41943040 sectors if line.startswith('Disk ') and line.endswith('sectors'): if item: raw_output.append(item) item = {} partitions = [] line = line.replace(':', '').replace(',', '') fields = line.split() item['disk'] = fields[1] item['disk_size'] = ' '.join(fields[2:4]) item['bytes'] = fields[4] item['sectors'] = fields[6] continue if line.startswith('Disk model: '): item['disk_model'] = line.split(':', maxsplit=1)[1].strip() continue if line.startswith('Sector size (logical/physical)'): fields = line.split() item['logical_sector_size'] = fields[3] item['physical_sector_size'] = fields[6] continue if line.startswith('I/O size (minimum/optimal)'): fields = line.split() item['min_io_size'] = fields[3] item['optimal_io_size'] = fields[6] continue if line.startswith('Disklabel type'): item['disk_label_type'] = line.split(':', maxsplit=1)[1].strip() continue if line.startswith('Disk identifier'): item['disk_identifier'] = line.split(':', maxsplit=1)[1].strip() continue if line.startswith('Units: '): item['units'] = line.split(':')[1].strip() continue # sfdisk -F if line.startswith('Unpartitioned space'): line = line.replace(':', '').replace(',', '') fields = line.split() item['disk'] = fields[2] item['free_disk_size'] = ' '.join(fields[3:5]) item['free_bytes'] = fields[5] item['free_sectors'] = fields[7] continue # partition lines if 'Start' in line and 'End' in line and ('Sectors' in line or 'Device' in line): section = 'partitions' partitions.append(line.lower().replace('#', ' ')) continue if section == 'partitions' and line: partitions.append(line) continue if section == 'partitions' and line == '': item['partitions'] = jc.parsers.universal.sparse_table_parse(partitions) section = '' partitions = [] continue # get final partitions if there are any left over if section == 'partitions' and option != 'd' and partitions: item['partitions'] = jc.parsers.universal.sparse_table_parse(partitions) if item: raw_output.append(item) if raw: return raw_output else: return _process(raw_output)
python
from __future__ import print_function # vim: set fileencoding= UTF-8 #!usr/bin/python """Word Spot ENG: Enter string. Output. All world in order they came in, if any word have appired more then once, print index in (paranthesise) input: qwe sdf tyu qwe sdf try sdf qwe sdf rty sdf wer sdf wer output:qwe(7) sdf(12) tyu try rty wer(13) lecture 7 task 3. http://uneex.ru/LecturesCMC/PythonIntro2014/07_LanguageExtensions """ __author__ = "JayIvhen" from collections import OrderedDict words_dict = OrderedDict() words = raw_input().strip(" ").split(" ") # add words in dict as keys and if it already added add its index. Otherwise add 0 for index in xrange(len(words)): if words_dict.has_key(words[index]): words_dict[words[index]] = index else: words_dict[words[index]] = 0 # Print words in order they came in. if value = 0 print just word, otherwise print word(index, when last seen in text) for index in words_dict: if words_dict[index]: print("{}({})".format(index, words_dict[index]), end=' ') else: print(index, end=' ') print()
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os from typing import Dict, Tuple, Any, Set from tabun_stat import utils from tabun_stat.processors.base import BaseProcessor class BirthdaysProcessor(BaseProcessor): def __init__(self) -> None: super().__init__() # {(день, месяц): айдишники пользователей} self._birthdays = {} # type: Dict[Tuple[int, int], Set[int]] def process_user(self, user: Dict[str, Any]) -> None: if not user['birthday']: return day = (user['birthday'].day, user['birthday'].month) if day not in self._birthdays: self._birthdays[day] = set() self._birthdays[day].add(user['user_id']) def end_users(self, stat: Dict[str, Any]) -> None: assert self.stat with open(os.path.join(self.stat.destination, 'birthdays.csv'), 'w', encoding='utf-8') as fp: fp.write(utils.csvline('День рождения', 'Число пользователей')) for day, user_ids in sorted(self._birthdays.items(), key=lambda x: len(x[1]), reverse=True): fp.write(utils.csvline('{:02d}.{:02d}'.format(*day), len(user_ids)))
python
#!/usr/bin/python """ This script provides a set of tools to parse the vector and scalar files dumped out of omnet simulation and indexed data and stats for use of other modules. """ from numpy import * from glob import glob from optparse import OptionParser from pprint import pprint from functools import partial from xml.dom import minidom import math import os import subprocess import random import re import sys import warnings class AttrDict(dict): """A mapping with string keys that aliases x.y syntax to x['y'] syntax. The attribute syntax is easier to read and type than the item syntax. """ def __getattr__(self, name): if name not in self: self[name] = AttrDict() return self[name] def __setattr__(self, name, value): self[name] = value def __delattr__(self, name): del self[name] def assign(self, path, value): """ Given a hierarchical path such as 'x.y.z' and a value, perform an assignment as if the statement self.x.y.z had been invoked. """ names = path.split('.') container = self for name in names[0:-1]: if name not in container: container[name] = AttrDict() container = container[name] container[names[-1]] = value def access(self, path): """ Given a hierarchical path such as 'x.y.z' returns the value as if the statement self.x.y.z had been invoked. """ names = path.split('.') container = self for name in names[0:-1]: if name not in container: raise Exception, 'path does not exist: {0}'.format(path) container = container[name] return container[names[-1]] class VectorParser(): """ open vector and index files and provide interface for seeking and reading through them """ def __init__(self, vciFile, vecFile): self.vciFile = vciFile self.vecFile = vecFile self.paramDic = AttrDict() self.vecDesc = AttrDict() self.vecBlockInfo = AttrDict() self.parseIndex() def parseIndex(self): vciFd = open(self.vciFile) vciFd.seek(0) paramListEnd = False net="" vecId = -1 vecPath = "" for line in vciFd: if not(paramListEnd): # At the top of the index file, the general information about # parameter list of the simulation is expected. This list is # expected to be separated by a blank line from the rest of the # file if not line or line.isspace(): paramListEnd = True match = re.match('attr\s+(\S+)\s+(".+"|\S+)', line) if match: self.paramDic.assign(match.group(1), match.group(2)) if match.group(1) == 'network': net = match.group(2) else: # After the parameter list, we expect only one of the three # types of line in the index file: # # 1. Lines that starts with 'vector' and contain information # about id number, source, and name of the vector # 2. Each line that starts with vector, will be followed by few # other lines that start with 'attr' # 3. lines that start with a vector id number of a vector and # stores the location and stats of blocks for that vector in # the vectror result file match1 = re.match( '{0}\s+(\d+)\s+{1}\.(\S+)\s+("(.+)"|\S+)\s*(\S*)'.format( 'vector', net), line) match2 = re.match('attr\s+(\S+)\s+("(.+)"|\S+)', line) match3 = re.match('(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+' '(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+)\s+(\d+\.?\d*)\s+' '(\d+\.?\d*)\s+(\d+\.?\d*)\s+(\d+\.?\d*e?\+?\d*)', line) if match1: vecId = int(match1.group(1)) moduleName = match1.group(2) vecName = match1.group(3) vecPath = moduleName+'.'+vecName self.vecDesc.assign(vecPath, AttrDict()) self.vecDesc.access(vecPath).vecId = vecId elif match2: if match2.group(1) == 'source': self.vecDesc.access(vecPath).srcSignal = match2.group(2) elif match2.group(1) == 'title': if match2.group(3) == None: self.vecDesc.access(vecPath).title = match2.group(2) else: self.vecDesc.access(vecPath).title = match2.group(3) elif match3: vecId = int(match3.group(1)) try: self.vecBlockInfo[vecId] except KeyError: self.vecBlockInfo[vecId] = AttrDict() self.vecBlockInfo[vecId].offsets = [] self.vecBlockInfo[vecId].bytelens = [] self.vecBlockInfo[vecId].firstEventNos = [] self.vecBlockInfo[vecId].lastEventNos = [] self.vecBlockInfo[vecId].firstSimTimes = [] self.vecBlockInfo[vecId].lastSimTimes = [] self.vecBlockInfo[vecId].counts = [] self.vecBlockInfo[vecId].mins = [] self.vecBlockInfo[vecId].maxs = [] self.vecBlockInfo[vecId].sums = [] self.vecBlockInfo[vecId].sqrsums = [] self.vecBlockInfo[vecId].offsets.append(int(match3.group(2))) self.vecBlockInfo[vecId].bytelens.append(int(match3.group(3))) self.vecBlockInfo[vecId].firstEventNos.append(int(match3.group(4))) self.vecBlockInfo[vecId].lastEventNos.append(int(match3.group(5))) self.vecBlockInfo[vecId].firstSimTimes.append(float(match3.group(6))) self.vecBlockInfo[vecId].lastSimTimes.append(float(match3.group(7))) self.vecBlockInfo[vecId].counts.append(int(match3.group(8))) self.vecBlockInfo[vecId].mins.append(float(match3.group(9))) self.vecBlockInfo[vecId].maxs.append(float(match3.group(10))) self.vecBlockInfo[vecId].sums.append(float(match3.group(11))) self.vecBlockInfo[vecId].sqrsums.append(float(match3.group(12))) else: raise Exception, "No match pattern found for line:\n{0}".format(line) vciFd.close() def getVecDicRecursive(self, attrdict, vecName): """ returns a list of tuples (t1, t2) where for each module having vector vecName, t1 is the module path in the network and t2 is a dictionary containing vecId, source signal and title of the vector correspoding to vecName in that module. """ retList = list() for key in attrdict: if key == vecName: retList.append(('', attrdict.access(key))) else: nextLevelDict = attrdict.access(key) if not(type(nextLevelDict) == type(AttrDict())): nextLevelDict = AttrDict() recList = self.getVecDicRecursive(nextLevelDict, vecName) for pair in recList: if not(pair[1] == AttrDict()): if pair[0] == '': retList.append((key, pair[1])) else: retList.append((key + '.' + pair[0], pair[1])) if retList == list(): return [('', AttrDict())] else: return retList def getIdsByNames(self, vecNames, moduleNames = ''): if moduleNames == '': # Returns all of the instances of the vecNames in vecDesc for all # possible modules that record results from vector 'vecName' vecIdDic = AttrDict() for vecName in vecNames: vecIds = [] vecIdTupleList = self.getVecDicRecursive(self.vecDesc, vecName) for vecIdPair in vecIdTupleList: if not(vecIdPair[0] == ''): vecIds.append(vecIdPair[1].vecId) vecIdDic.assign(vecName, vecIds) return vecIdDic else: vecIdDic = AttrDict() for vecName in vecNames: vecIds = [] for moduleName in moduleNames: try: vecDic = self.vecDesc.access(moduleName) vecIds.append(vecDic.access(vecName).vecId) except KeyError: pass vecIdDic.assign(vecName, vecIds) return vecIdDic def getVecDataByIds(self, vecId): """ Returns a list of values recorded for the 'vecId' sorted by recording time. Performs relavent checks to make sure the results are valid based on the information provided in vecBlockInfo for that vecId """ vecFd = open(self.vecFile) blockInfo = self.vecBlockInfo[vecId] vecData = [] prevEvenNo = 0 prevSimTime = 0 for firstEventNo in blockInfo.firstEventNos: currentBlockData = [] i = blockInfo.firstEventNos.index(firstEventNo) offset = blockInfo.offsets[i] bytelen = blockInfo.bytelens[i] lastEventNo = blockInfo.lastEventNos[i] firstSimTime = blockInfo.firstSimTimes[i] lastSimTime = blockInfo.lastSimTimes[i] vecFd.seek(offset) for line in vecFd.read(bytelen).splitlines(): recordedVals = line.split() assert int(recordedVals[0]) == vecId eventNo = int(recordedVals[1]) simTime = float(recordedVals[2]) val = recordedVals[3] currentBlockData.append((eventNo, simTime, val)) assert eventNo >= prevEvenNo and simTime >= prevSimTime prevEvenNo = eventNo prevSimTime = simTime assert currentBlockData[0][0] == firstEventNo assert currentBlockData[0][1] == firstSimTime assert currentBlockData[-1][0] == lastEventNo assert currentBlockData[-1][1] == lastSimTime vecData += currentBlockData vecFd.close() return vecData class ScalarParser(): """ Scan a result file containing scalar statistics for omnet simulation, and returns a list of AttrDicts, one containing the metrics for each server. """ def __init__(self, scaFile): self.scaFile = scaFile self.hosts = AttrDict() self.tors = AttrDict() self.aggrs = AttrDict() self.cores = AttrDict() self.apps = AttrDict() self.generalInfo = AttrDict() self.globalListener = AttrDict() self.parse() def parse(self): self.hosts = AttrDict() self.tors = AttrDict() self.aggrs = AttrDict() self.cores = AttrDict() self.generalInfo = AttrDict() self.globalListener = AttrDict() net = "" scaFd = open(self.scaFile) for line in scaFd: if not line or line.isspace(): break; match = re.match('attr\s+(\S+)\s+(".+"|\S+)', line) if match: self.generalInfo.assign(match.group(1), match.group(2)) if match.group(1) == 'network': net = match.group(2) if net == "": raise Exception, 'no network name in file: {0}'.format(scaFd.name) currDict = AttrDict() for line in scaFd: match = re.match('(\S+)\s+{0}\.(([a-zA-Z]+).+\.\S+)\s+(".+"|\S+)\s*(\S*)'.format(net), line) if match: topLevelModule = match.group(3) if topLevelModule == 'tor': currDict = self.tors elif topLevelModule == 'nic': currDict = self.hosts elif topLevelModule == 'aggRouter': currDict = self.aggrs elif topLevelModule == 'core': currDict = self.cores elif topLevelModule == 'app': currDict = self.apps else: raise Exception, 'no such module defined for parser: {0}'.format(topLevelModule) entryType = match.group(1) if entryType == 'statistic': var = match.group(2)+'.'+match.group(4) currDict.assign(var+'.bins', []) elif entryType == 'scalar': var = match.group(2)+'.'+match.group(4) subVar = var + '.value' value = float(match.group(5)) currDict.assign(subVar, value) else: raise Exception, '{0}: not defined for this parser'.format(match.group(1)) continue match = re.match('(\S+)\s+{0}\.([a-zA-Z]+)\s+(".+"|\S+)\s*(\S*)'.format(net), line) if match: topLevelModule = match.group(2) if topLevelModule == 'globalListener': currDict = self.globalListener else: raise Exception, 'no such module defined for parser: {0}'.format(topLevelModule) entryType = match.group(1) if entryType == 'statistic': var = match.group(3) currDict.assign(var+'.bins', []) elif entryType == 'scalar': var = match.group(3) subVar = var + '.value' value = float(match.group(4)) currDict.assign(subVar, value) else: raise Exception, '{0}: not defined for this parser'.format(match.group(1)) continue match = re.match('(\S+)\s+(".+"|\S+)\s+(".+"|\S+)', line) if not match and not line.isspace(): warnings.warn('Parser cant find a match for line: {0}'.format(line), RuntimeWarning) if currDict: entryType = match.group(1) subVar = var + '.' + match.group(2) value = match.group(3) if entryType == 'field': currDict.assign(subVar, float(value)) elif entryType == 'attr': currDict.assign(subVar, value) elif entryType == 'bin': subVar = var + '.bins' valuePair = (float(match.group(2)), float(match.group(3))) currDict.access(subVar).append(valuePair) else: warnings.warn('Entry type not known to parser: {0}'.format(entryType), RuntimeWarning) scaFd.close()
python
from Proxy import ProxyFactory, Proxy from LRUCache import LRUCache from utils import distance, stress, MAX_DISTANCE, LOWEST_STRESS from typing import Hashable, List, Tuple, Dict, Any class Origin: def __init__(self, database, max_size_of_LRUCache : int, max_age_of_LRUCache: int, load_balancing_interval : int): self.database = database # the repository storing the centralized version of the data that is used by the proxy servers self.proxyFactory = ProxyFactory(max_size_of_LRUCache, max_age_of_LRUCache) # The following two instance variables are dicts of the form {coordinates: proxy, ...}, where coordinates is a tuple of type Tuple[float, float], and proxy is a Proxy instance self.proxies = dict() # a one-to-many association between Origin and Proxy. The association is stored as a dictionary of proxies indexed by their coordinates for O(1) lookup time in some scenarios self.failed_proxies = dict() # keep track of reported proxies (presumably having suffered a network failure or crash as detected by one of the proxy's assigned client(s)) for logging and maintenance purposes self.potential_servers = dict() # an abstract dictionary of potential Server instances (kept abstract for simplicity, outside assignment scope) indexed by the physical coordinates of the server # used to deploy a Proxy instance onto a physical server on which it will be hosted, either in the context of Admin simply creating a new proxy, or Admin calling the load balance method to automatically add a proxy server to alleviate the load of the most stressed proxy server. def _set_potential_servers(self, potential_servers : Dict[Tuple[float, float], Any]): self.potential_servers = potential_servers def get(self, key : Hashable): ''' Called by a LRUCache instance when a cache miss occurs while its owner Proxy instance is handling a get request it received from a client, in order to retrieve the value by key as argument from the origin server's central database. The returned value is forwarded to the requesting client via the Proxy instance assigned to that client (the Proxy instance that owns the LRUCache instance that called this method), and it is put in the calling LRUCache instance as the MRU item. ''' return self.database.get(key) def put(self, key : Hashable, value : Any): ''' Called by a Proxy instance upon handling every put request it receives from clients, in order to ensure that data updated (i.e. put) in the cache of a client's assigned proxy is progressively reflected in the other proxy instances. Each request to put data in a proxy server's cache triggers a request to put that same data in the central database, to ensure that the data changes are reflected in other proxies. The data changes are not immediately reflected in other proxies, since other proxies may still hold a cached version of the value that is yet to expire that they might continue serving to their client (this is the behavior intended by having cache expiry). However, since all Proxy instances have an LRUCache instance with the same max age, it is guaranteed that after max age time has elapsed after an arbitrary proxy handled a put request updating some value, at least that updated version of that value (if not an even newer version by a more recent put request by some other arbitrary proxy) will be served to every client requesting that value. That is, it is impossible for one proxy to receive a put request to update or create some value (thus updating or creating a value in the central database), then max age time later, for any proxy to serve a client a version of that data that is older (i.e. an older version of the data that does not reflect the update made by the first proxy, or a missing value if the first proxy was creating a value) than the version of the value put by the first proxy. ''' self.database.put(key, value) def _get_coordinates_of_nearest_proxy(self, request_coordinates : Tuple[float, float]): least_distance = MAX_DISTANCE for coordinates, proxy in self.proxies.items(): distance_request_to_proxy = distance(request_coordinates, coordinates) if (distance_request_to_proxy < least_distance): least_distance = distance_request_to_proxy # update coordinates_of_nearest_proxy = coordinates # keep track of return coordinates_of_nearest_proxy def get_nearest_proxy(self, request_coordinates : Tuple[float, float]): ''' Called by Client to receive a Proxy instance with which it will interact with until the Proxy instance suffers a network failure or crash. ''' return self.proxies[self._get_coordinates_of_nearest_proxy(request_coordinates)] def _get_coordinates_of_stressed_proxy(self): highest_stress = LOWEST_STRESS if (len(self.proxies.keys() == 0)): raise ValueError(self.proxies) for coordinates, proxy in self.proxies.items(): stress_of_proxy = stress(proxy.LRUCache.cache_info()) if (stress_of_proxy > highest_stress): highest_stress = stress_of_proxy coordinates_of_stressed_proxy = coordinates return coordinates_of_stressed_proxy def _get_stressed_proxy(self): return self.proxies[self._get_coordinates_of_stressed_proxy()] def _deploy_proxy(self, proxy): server_hosting_proxy = self.potential_servers.pop(proxy.coordinates) # deploy ... def _add_proxy(self, coordinates : Tuple[float, float]): return self.proxyFactory.produce(coordinates) def balance_load(self): ''' Called by Admin to add a proxy to the network to balance the load of the most stressed proxy server. This is acheived by adding a single proxy at the coordinates of the potential server (from the list of coordinates supplied as argument) that is nearest to the most stressed proxy server, so that the added proxy server may take on some of the most stressed proxy server's load. ''' coordinates_of_stressed_proxy = self._get_coordinates_of_stressed_proxy() least_distance = MAX_DISTANCE potential_servers_coordinates = self.potential_servers.keys() if (len(potential_servers_coordinates) == 0): raise ValueError(potential_servers_coordinates) for potential_server_coordinates in potential_servers_coordinates: distance_stressed_to_potential = distance(coordinates_of_stressed_proxy, potential_server_coordinates) if (distance_stressed_to_potential < least_distance): least_distance = distance_stressed_to_potential # update coordinates_of_nearest_potential = potential_server_coordinates # keep track of self._add_proxy(coordinates_of_nearest_potential) def report_failure(self, failed_coordinates : Tuple[float, float]): ''' Called by client to report a failed proxy server. Ensures that the proxy instance reported by its coordinates is no longer assigned to other clients until it is manually added back to the origin's proxies references. Reported proxies (presumably having suffered a network failure or crash as detected by one of the proxy's assigned client(s)) are kept in the failed proxies for logging and maintenance purposes. ''' failed_proxy = self.proxies.pop(failed_coordinates) self.failed_proxies[failed_coordinates] = failed_proxy
python
from __future__ import print_function from .client import Client __title__ = 'Steamfront' __author__ = 'Callum Bartlett' __license__ = 'MIT' __copyright__ = 'Copyright 2017 Callum Bartlett' __version__ = '0.1.0' if __name__ == '__main__': from sys import argv if len(argv) < 2: print('Please give the name of a game you want to find the information of.') gameName = ' '.join(argv[1:]) c = Client() g = c.getApp(name=gameName) i = '{0.name} :: {0.appid} :: {0.type}' print(i.format(g))
python
import sys from ctypes import * sys.path.append('../') #import vscp.udp as udp #from vscp.vscp_class import * #from vscp.vscp_type import * from vscp import * def makeClass2StrMeasurement( vscpclass, vscptype, strval ): ex = vscp.vscpEventEx() return ex e = vscp.vscpEvent() ex = vscp.vscpEventEx() print(type(e), type(ex)) ex.head = 0 # Measurement Temperature str ex.vscpclass = vscp_class.VSCP_CLASS2_MEASUREMENT_STR ex.vscptype = vscp_type.VSCP_TYPE_MEASUREMENT_TEMPERATURE ex.sizedata = 2 ex.dump() # Temperature temperature = "27.235" #temperature = -22.872 b = bytearray() #b.extend(temperature) #print(int(temperature[0].encode("hex")), len(b)) ex = makeClass2StrMeasurement( 1, 2, temperature ) # must use vscpEventEx not vscpEvent frame = udp.makeVscpFrame( 0, ex )
python
""" Test cases for linear programming based on CVXOPT This case is an economic dispatch code """ from cvxopt import matrix from cvxopt import solvers class LinearProgramming(): def run(self, PG_MAX, PG_MIN, CG, PD): """ :param PG_MAX: maximal generator output :param PG_MIN: minimal generator output :param PD: total demand :return: """ ## Step 1: Physical model ## # 1) PG_MIN <= pg, \forall g # 2) pg <= PG_MAX, \forall g # 3) sum(pg) = PD # obj: sum_{g} CG_{g}*pg_{g} ## Step 2: Compact Model ## ## min_{x} c^{T}x ## s.t. Gx <= h ## Ax = b nx = len(PG_MAX) ng = len(PG_MIN) assert len(PG_MAX) == len(PG_MIN) # 1) PG_MIN <= pg, \forall g # -pg_{g} <= -PG_MIN_{g}, \forall g G = matrix([0.0] * ng * nx, (ng, nx)) h = matrix([0.0] * ng, (ng, 1)) for i in range(ng): G[i, i] = -1 h[i, 0] = -PG_MIN[i] # 2) pg <= PG_MAX, \forall g # pg_{g} <= PG_MAX_{g}, \forall g G_temp = matrix([0.0] * ng * nx, (ng, nx)) h_temp = matrix([0.0] * ng, (ng, 1)) for i in range(ng): G_temp[i, i] = 1 h_temp[i, 0] = PG_MAX[i] G = matrix([G, G_temp]) h = matrix([h, h_temp]) # 3) sum(pg) = PD A = matrix([1.0] * ng, (1, nx)) b = matrix([PD]) # 4) Objective function c = matrix(CG, (nx, 1)) sol = solvers.lp(c=c, G=G, h=h, A=A, b=b) pg = sol['x'] return pg if __name__ == "__main__": linear_programming = LinearProgramming() linear_programming.run(PG_MAX=[10., 10., 10.], PG_MIN=[0., 0., 0.], CG=[1., 2., 3.], PD=24.)
python
import secrets import redis from app.core.config import settings ok_status = {'detail': 'Ok'} def generate_id(): return secrets.token_urlsafe(8) class SingletonMeta(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls] class RedisManager(metaclass=SingletonMeta): def __init__(self, host, port, password): self.__r = redis.Redis( host=host, port=port, password=password, decode_responses=True ) def get_item(self, key: str): return self.__r.get(key) def set_item(self, key, value): return self.__r.set(key, value) def set_dict(self, key: str, d: dict): return self.__r.hmset(key, d) def get_dict(self, key: str): return self.__r.hgetall(key) def delete(self, key: str): return self.__r.delete(key) def list_keys(self, pattern="*"): return self.__r.keys(pattern) def exists(self, item_id: str): return self.__r.exists(item_id) redis_manager = RedisManager( host=settings.REDIS_HOST, port=settings.REDIS_PORT, password=settings.REDIS_PASSWORD )
python
import json import logging import os import pytest from fhir2dataset.query import Query from tests.tools import create_resource_test log_format = "[%(asctime)s] [%(levelname)s] - %(message)s" logging.basicConfig(level=logging.INFO, format=log_format) @pytest.mark.parametrize( "dirname, fhir_api_url", [ ("tests/1", "http://hapi.fhir.org/baseR4/"), # ("tests/2", "http://hapi.fhir.org/baseR4/"), ("tests/3", "http://hapi.fhir.org/baseR4/"), # ("tests/4", "http://hapi.fhir.org/baseR4/"), ("tests/5", "http://hapi.fhir.org/baseR4/"), ], ) def test_resources_in_dataframe(dirname, fhir_api_url): create_resource_test(dirname, fhir_api_url) with open(os.path.join(dirname, "config.json")) as json_file: config = json.load(json_file) with open(os.path.join(dirname, "infos_test", "config_checks.json")) as json_file: checks = json.load(json_file) with open(os.path.join(dirname, "infos_test", "info_hapi.json")) as json_file: info_hapi = json.load(json_file) query = Query(fhir_api_url) query.from_config(config) query.execute(debug=True) df = query.main_dataframe lines = checks["line"] for line in lines: cols = [] conditions = [] for alias, filename in line: cols.append(f"{alias}:from_id") conditions.append(info_hapi[filename]) condition = " & ".join( [f"(df['{col}'].str.contains('{cond}'))" for col, cond in zip(cols, conditions)] ) logging.info(condition) result = df[eval(condition)] logging.info(result) assert len(result.index) >= 1, f"{dirname} failed"
python
''' Tst cases fro get_names method ''' import unittest from spydrnet import ir from spydrnet_physical.util import get_names from spydrnet_physical.util import get_attr class TestGetNames(unittest.TestCase): ''' Test case class ''' def setUp(self): ''' Basic element setup ''' self.definition = ir.Definition(name="Definition0") self.cable = self.definition.create_cable(name="Cable0") self.port = self.definition.create_port(name="Port0") self.instance = ir.Instance(name="Instance0") def test_get_names(self): ''' Test correctness of retruned string ''' # Single object self.assertEqual(["Cable0", ], get_names(self.cable)) # Iterarable objects self.assertEqual(["Cable0", "Port0", "Definition0", "Instance0"], get_names([self.cable, self.port, self.definition, self.instance])) # Genrator object self.assertEqual(["Port0", ], get_names(self.definition.get_ports())) def test_get_attr(self): ''' Test correctness of retruned string ''' # Single object self.assertEqual(["Cable0", ], get_names(self.cable)) # Iterarable objects self.assertEqual(["Cable0", "Port0", "Definition0", "Instance0"], get_attr([self.cable, self.port, self.definition, self.instance], 'name'))
python
import random from hashlib import sha256 def gen_password(user_password): '''产生一个安全的密码''' bin_password = user_password.encode('utf8') # 将密码转成 bytes 类型 hash_value = sha256(bin_password).hexdigest() # 计算用户密码的哈希值 salt = '%x' % random.randint(0x10000000, 0xffffffff) # 产生随机盐 safe_password = salt + hash_value return safe_password def check_password(user_password, safe_password): '''检查用户密码是否正确''' bin_password = user_password.encode('utf8') # 将密码转成 bytes 类型 hash_value = sha256(bin_password).hexdigest() # 计算用户密码的哈希值 return hash_value == safe_password[8:]
python
__author__ = 'Eric Weast' __copyright__ = "Copyright 2014, Eric Weast" __license__ = "GPL v2" from .exceptions import EncodingError from .exceptions import DecodingError from .decoder import decode from .decoder import decode_from_file from .encode import encode
python
from copy import copy from meerk40t.core.cutcode import RasterCut from meerk40t.core.element_types import * from meerk40t.core.node.node import Node from meerk40t.core.parameters import Parameters from meerk40t.core.units import Length from meerk40t.image.actualize import actualize from meerk40t.svgelements import Color, Path, Polygon MILS_IN_MM = 39.3701 class ImageOpNode(Node, Parameters): """ Default object defining any operation done on the laser. This is a Node of type "op image". """ def __init__(self, *args, **kwargs): if "setting" in kwargs: kwargs = kwargs["settings"] if "type" in kwargs: del kwargs["type"] Node.__init__(self, type="op image", **kwargs) Parameters.__init__(self, None, **kwargs) self.settings.update(kwargs) if len(args) == 1: obj = args[0] if hasattr(obj, "settings"): self.settings = dict(obj.settings) elif isinstance(obj, dict): self.settings.update(obj) def __repr__(self): return "ImageOpNode()" def __str__(self): parts = list() if not self.output: parts.append("(Disabled)") if self.default: parts.append("✓") if self.passes_custom and self.passes != 1: parts.append("%dX" % self.passes) parts.append("Image") if self.speed is not None: parts.append("%gmm/s" % float(self.speed)) if self.frequency is not None: parts.append("%gkHz" % float(self.frequency)) if self.raster_swing: raster_dir = "-" else: raster_dir = "=" if self.raster_direction == 0: raster_dir += "T2B" elif self.raster_direction == 1: raster_dir += "B2T" elif self.raster_direction == 2: raster_dir += "R2L" elif self.raster_direction == 3: raster_dir += "L2R" elif self.raster_direction == 4: raster_dir += "X" else: raster_dir += "%d" % self.raster_direction parts.append(raster_dir) if self.power is not None: parts.append("%gppi" % float(self.power)) parts.append("±{overscan}".format(overscan=self.overscan)) parts.append("%s" % self.color.hex) if self.acceleration_custom: parts.append("a:%d" % self.acceleration) return " ".join(parts) def __copy__(self): return ImageOpNode(self) @property def bounds(self): if self._bounds_dirty: self._bounds = Node.union_bounds(self.flat(types=elem_ref_nodes)) self._bounds_dirty = False return self._bounds def default_map(self, default_map=None): default_map = super(ImageOpNode, self).default_map(default_map=default_map) default_map["element_type"] = "Image" default_map["enabled"] = "(Disabled) " if not self.output else "" default_map["pass"] = ( f"{self.passes}X " if self.passes_custom and self.passes != 1 else "" ) default_map["penpass"] = ( f"(p:{self.penbox_pass}) " if self.penbox_pass else "" ) default_map["penvalue"] = ( f"(v:{self.penbox_value}) " if self.penbox_value else "" ) if self.raster_swing: raster_swing = "-" else: raster_swing = "=" if self.raster_direction == 0: raster_dir = "T2B" elif self.raster_direction == 1: raster_dir = "B2T" elif self.raster_direction == 2: raster_dir = "R2L" elif self.raster_direction == 3: raster_dir = "L2R" elif self.raster_direction == 4: raster_dir = "X" else: raster_dir = str(self.raster_direction) default_map["direction"] = f"{raster_swing}{raster_dir} " default_map["speed"] = "default" default_map["power"] = "default" default_map["frequency"] = "default" default_map.update(self.settings) return default_map def drop(self, drag_node): if drag_node.type.startswith("elem"): if drag_node.type != "elem image": return False # Dragging element onto operation adds that element to the op. self.add_reference(drag_node, pos=0) return True elif drag_node.type == "reference": # Disallow drop of image refelems onto a Dot op. if drag_node.type == "elem image": return False # Move a refelem to end of op. self.append_child(drag_node) return True elif drag_node.type in op_nodes: # Move operation to a different position. self.insert_sibling(drag_node) return True elif drag_node.type in ("file", "group"): some_nodes = False for e in drag_node.flat("elem"): # Add element to operation self.add_reference(e) some_nodes = True return some_nodes return False def load(self, settings, section): settings.read_persistent_attributes(section, self) update_dict = settings.read_persistent_string_dict(section, suffix=True) self.settings.update(update_dict) self.validate() hexa = self.settings.get("hex_color") if hexa is not None: self.color = Color(hexa) self.notify_update() def save(self, settings, section): settings.write_persistent_attributes(section, self) settings.write_persistent(section, "hex_color", self.color.hexa) settings.write_persistent_dict(section, self.settings) def copy_children(self, obj): for element in obj.children: self.add_reference(element) def copy_children_as_real(self, copy_node): for node in copy_node.children: self.add_node(copy(node.node)) def time_estimate(self): estimate = 0 for node in self.children: if node.type == "reference": node = node.node try: e = node.image except AttributeError: continue step = node.step_x estimate += (e.image_width * e.image_height * step) / ( MILS_IN_MM * self.speed ) hours, remainder = divmod(estimate, 3600) minutes, seconds = divmod(remainder, 60) return "%s:%s:%s" % ( int(hours), str(int(minutes)).zfill(2), str(int(seconds)).zfill(2), ) def preprocess(self, context, matrix, commands): """ Process the scale to native resolution done with the given matrix. In the case of image ops we are scaling the overscan length into usable native units. @param matrix: @return: """ overscan = float(Length(self.settings.get("overscan", "1mm"))) transformed_vector = matrix.transform_vector([0, overscan]) self.overscan = abs(complex(transformed_vector[0], transformed_vector[1])) for node in self.children: dpi = node.dpi oneinch_x = context.device.physical_to_device_length("1in", 0)[0] oneinch_y = context.device.physical_to_device_length(0, "1in")[1] step_x = float(oneinch_x / dpi) step_y = float(oneinch_y / dpi) node.step_x = step_x node.step_y = step_y m1 = node.matrix # Transformation must be uniform to permit native rastering. if m1.a != step_x or m1.b != 0.0 or m1.c != 0.0 or m1.d != step_y: def actual(image_node, s_x, s_y): def actualize_images(): image_node.image, image_node.matrix = actualize( image_node.image, image_node.matrix, step_x=s_x, step_y=s_y ) image_node.cache = None return actualize_images commands.append(actual(node, step_x, step_y)) break def as_cutobjects(self, closed_distance=15, passes=1): """ Generator of cutobjects for the image operation. This takes any image node children and converts them into rastercut cutobjects. """ for image_node in self.children: # Process each child. All settings are different for each child. if image_node.type != "elem image": continue settings = self.derive() # Set overscan overscan = self.overscan if not isinstance(overscan, float): overscan = float(Length(overscan)) # Set steps step_x = image_node.step_x step_y = image_node.step_y # Set variables by direction if image_node.direction is not None: direction = image_node.direction else: direction = self.raster_direction horizontal = False start_on_left = False start_on_top = False if direction == 0 or direction == 4: horizontal = True start_on_top = True elif direction == 1: horizontal = True start_on_top = False elif direction == 2: horizontal = False start_on_left = False elif direction == 3: horizontal = False start_on_left = True bidirectional = bool(self.raster_swing) # Perform correct actualization if image_node.needs_actualization(): image_node.make_actual() # Set variables matrix = image_node.matrix pil_image = image_node.image offset_x = matrix.value_trans_x() offset_y = matrix.value_trans_y() # Establish path min_x = offset_x min_y = offset_y max_x = offset_x + pil_image.width * step_x max_y = offset_y + pil_image.height * step_y path = Path( Polygon( (min_x, min_y), (min_x, max_y), (max_x, max_y), (max_x, min_y), ) ) # Create Cut Object cut = RasterCut( image=pil_image, offset_x=offset_x, offset_y=offset_y, step_x=step_x, step_y=step_y, inverted=False, bidirectional=bidirectional, horizontal=horizontal, start_on_top=start_on_top, start_on_left=start_on_left, overscan=overscan, settings=settings, passes=passes, ) cut.path = path cut.original_op = self.type yield cut if direction == 4: # Create optional crosshatch cut horizontal = False start_on_left = False cut = RasterCut( image=pil_image, offset_x=offset_x, offset_y=offset_y, step_x=step_x, step_y=step_y, inverted=False, bidirectional=bidirectional, horizontal=horizontal, start_on_top=start_on_top, start_on_left=start_on_left, overscan=overscan, settings=settings, passes=passes, ) cut.path = path cut.original_op = self.type yield cut
python
""" training script date: 10/4 author: arabian9ts """ # escape matplotlib error import matplotlib matplotlib.use('Agg') # escape tensorflow warning import os os.environ['TF_CPP_MIN_LOG_LEVEL']='2' import datetime import tensorflow as tf import numpy as np import pickle import threading import matplotlib.pyplot as plt from util.util import * from tqdm import trange from model.ssd300 import * # ====================== Training Parameters ====================== # BATCH_SIZE = 10 EPOCH = 200 EPOCH_LOSSES = [] SHUFFLED_INDECES = [] USE_MANGA109 = True # ============================== END ============================== # if __name__ == '__main__': sess = tf.Session() buff = [] if USE_MANGA109: pickle_file = 'Manga109.pkl' folder_name = 'Manga109/' else: pickle_file = 'VOC2007.pkl' folder_name = 'voc2007/' # load pickle data set annotation with open(pickle_file, 'rb') as f: data = pickle.load(f) keys = sorted(data.keys()) BATCH = int(len(keys) / BATCH_SIZE) def next_batch(): global buff, BATCH_SIZE ,SHUFFLED_INDECES mini_batch = [] actual_data = [] if 0 == len(SHUFFLED_INDECES): SHUFFLED_INDECES = list(np.random.permutation(len(keys))) indices = SHUFFLED_INDECES[:min(BATCH_SIZE, len(SHUFFLED_INDECES))] del SHUFFLED_INDECES[:min(BATCH_SIZE, len(SHUFFLED_INDECES))] for idx in indices: # make images mini batch img, _, _, _, = preprocess(folder_name + keys[idx]) actual_data.append(data[keys[idx]]) mini_batch.append(img) buff.append((mini_batch, actual_data)) # tensorflow session ssd = SSD300(sess) sess.run(tf.global_variables_initializer()) # parameter saver saver = tf.train.Saver() # saver.restore(sess, './checkpoints/params.ckpt') SHUFFLED_INDECES = list(np.random.permutation(len(keys))) print('\nSTART LEARNING') print('==================== '+str(datetime.datetime.now())+' ====================') for _ in range(5): next_batch() for ep in range(EPOCH): BATCH_LOSSES = [] for ba in trange(BATCH): batch, actual = buff.pop(0) threading.Thread(name='load', target=next_batch).start() _, _, batch_loc, batch_conf, batch_loss = ssd.train(batch, actual) BATCH_LOSSES.append(batch_loss) # print('BATCH: {0} / EPOCH: {1}, LOSS: {2}'.format(ba+1, ep+1, batch_loss)) EPOCH_LOSSES.append(np.mean(BATCH_LOSSES)) print('\n*** AVERAGE: '+str(EPOCH_LOSSES[-1])+' ***') saver.save(sess, './checkpoints/params.ckpt') print('\n========== EPOCH: '+str(ep+1)+' END ==========') print('\nEND LEARNING') plt.xlabel('Epoch') plt.ylabel('Loss') plt.plot(np.array(range(EPOCH)), EPOCH_LOSSES) plt.grid() plt.savefig("loss.png") plt.show() print('==================== '+str(datetime.datetime.now())+' ====================')
python
import unittest from contiguous.structures import DataSection, Group from contiguous.types import String class TestContiguous(unittest.TestCase): """ Test basic functionality with native Python types in schemas.""" def test_lengths(self): data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) length = data.length self.assertEqual(length, 68) def test_get_member(self): data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) street = data.get_member("street") pass def test_set(self): data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) data.set("name", "Ben") data.set("surname", "Collier") self.assertEqual(data._data[0:3].decode(), "Ben") self.assertEqual(data._data[25:32].decode(), "Collier") def test_set_nested(self): data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) data.set("street", "Verney Close") pass def test_get_nested(self): data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) data.set("address", "14 Verney Close") house = data.get("house_number") self.assertEqual(int(house), 14) def test_basic_structure(self): """ :return: """ data = DataSection( String("name", 25), String("surname", 25), Group("address", String("house_number", 3), String("street", 15) ) ) data.set("address", "14 Verney Close") self.assertEqual(data._data[50:65].decode(), "14 Verney Close")
python
from typing import List class ListNode: def __init__(self, val=0): self.val = val self.next = None def insert_at_tail(nodes: List[int]) -> ListNode: head = ListNode(nodes[0]) for node in nodes[1:]: insert_helper(head, node) return head def insert_helper(head: ListNode, node: int) -> None: while head.next: head = head.next head.next = ListNode(node) def print_list(head: ListNode) -> None: while head: print(head.val, end=" -> ") head = head.next print() class Solution: @staticmethod def delete_nodes(head: ListNode, m: int, n: int) -> ListNode: current = head i = 0 while current: if i < m - 1: i += 1 else: j = 0 while j < n and current.next: current.next = current.next.next j += 1 i = 0 current = current.next return head list_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] list_head = insert_at_tail(list_nodes) print_list(list_head) s = Solution() d_nodes = s.delete_nodes(list_head, 2, 3) print_list(d_nodes)
python
import ast import rest_framework.serializers as module_to_patch from drf_jsonpatch.patcher import Patcher from drf_jsonpatch.nodes.serialiazers import ( import_jsonpatch, import_apply_json_patch, if_apply_jsonpatch, ) patch = Patcher(module_to_patch) # ---- import jsonpatch ---- # added between django imports and drf (just for the beauty) patch.tree.body.insert(13, import_jsonpatch) # ---- from drf_jsonpatch import apply_json_patch ---- # added between django imports and drf (just for the beauty) patch.tree.body.insert(14, import_apply_json_patch) # ---- if isinstance(data, jsonpatch.JsonPatch): ---- # Must be added in BaseSerializer.__init__ # before `self.initial_data`'s assignment # Get class's node BaseSerializer = None for branch in patch.tree.body: if isinstance(branch, ast.ClassDef) and branch.name == "BaseSerializer": BaseSerializer = branch break else: raise ValueError("Can't find BaseSerialiser") # Get __init__'s node init_method = None for branch in BaseSerializer.body: if isinstance(branch, ast.FunctionDef) and branch.name == "__init__": init_method = branch break else: raise ValueError("Can't find BaseSerialiser.__init__") # Add the if condition at the begining of the __init__'s body init_method.body.insert(0, if_apply_jsonpatch) patch.apply()
python
begin_unit comment|'# Copyright 2012 Red Hat, Inc.' nl|'\n' comment|'# All Rights Reserved.' nl|'\n' comment|'#' nl|'\n' comment|'# Licensed under the Apache License, Version 2.0 (the "License"); you may' nl|'\n' comment|'# not use this file except in compliance with the License. You may obtain' nl|'\n' comment|'# a copy of the License at' nl|'\n' comment|'#' nl|'\n' comment|'# http://www.apache.org/licenses/LICENSE-2.0' nl|'\n' comment|'#' nl|'\n' comment|'# Unless required by applicable law or agreed to in writing, software' nl|'\n' comment|'# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT' nl|'\n' comment|'# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the' nl|'\n' comment|'# License for the specific language governing permissions and limitations' nl|'\n' comment|'# under the License.' nl|'\n' nl|'\n' string|'"""Tests for network API."""' newline|'\n' nl|'\n' name|'import' name|'itertools' newline|'\n' name|'import' name|'uuid' newline|'\n' nl|'\n' name|'import' name|'mock' newline|'\n' name|'from' name|'oslo_policy' name|'import' name|'policy' name|'as' name|'oslo_policy' newline|'\n' nl|'\n' name|'from' name|'nova' op|'.' name|'compute' name|'import' name|'flavors' newline|'\n' name|'from' name|'nova' name|'import' name|'context' newline|'\n' name|'from' name|'nova' name|'import' name|'exception' newline|'\n' name|'from' name|'nova' name|'import' name|'network' newline|'\n' name|'from' name|'nova' op|'.' name|'network' name|'import' name|'api' newline|'\n' name|'from' name|'nova' op|'.' name|'network' name|'import' name|'base_api' newline|'\n' name|'from' name|'nova' op|'.' name|'network' name|'import' name|'floating_ips' newline|'\n' name|'from' name|'nova' op|'.' name|'network' name|'import' name|'model' name|'as' name|'network_model' newline|'\n' name|'from' name|'nova' name|'import' name|'objects' newline|'\n' name|'from' name|'nova' op|'.' name|'objects' name|'import' name|'fields' newline|'\n' name|'from' name|'nova' name|'import' name|'policy' newline|'\n' name|'from' name|'nova' name|'import' name|'test' newline|'\n' name|'from' name|'nova' op|'.' name|'tests' op|'.' name|'unit' op|'.' name|'api' op|'.' name|'openstack' name|'import' name|'fakes' newline|'\n' name|'from' name|'nova' op|'.' name|'tests' op|'.' name|'unit' name|'import' name|'fake_instance' newline|'\n' name|'from' name|'nova' op|'.' name|'tests' op|'.' name|'unit' op|'.' name|'objects' name|'import' name|'test_fixed_ip' newline|'\n' name|'from' name|'nova' op|'.' name|'tests' op|'.' name|'unit' op|'.' name|'objects' name|'import' name|'test_virtual_interface' newline|'\n' name|'from' name|'nova' op|'.' name|'tests' name|'import' name|'uuidsentinel' name|'as' name|'uuids' newline|'\n' nl|'\n' DECL|variable|FAKE_UUID name|'FAKE_UUID' op|'=' string|"'a47ae74e-ab08-547f-9eee-ffd23fc46c16'" newline|'\n' nl|'\n' DECL|variable|fake_info_cache name|'fake_info_cache' op|'=' op|'{' nl|'\n' string|"'created_at'" op|':' name|'None' op|',' nl|'\n' string|"'updated_at'" op|':' name|'None' op|',' nl|'\n' string|"'deleted_at'" op|':' name|'None' op|',' nl|'\n' string|"'deleted'" op|':' name|'False' op|',' nl|'\n' string|"'instance_uuid'" op|':' name|'uuids' op|'.' name|'instance' op|',' nl|'\n' string|"'network_info'" op|':' string|"'[]'" op|',' nl|'\n' op|'}' newline|'\n' nl|'\n' nl|'\n' DECL|class|NetworkPolicyTestCase name|'class' name|'NetworkPolicyTestCase' op|'(' name|'test' op|'.' name|'TestCase' op|')' op|':' newline|'\n' DECL|member|setUp indent|' ' name|'def' name|'setUp' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'NetworkPolicyTestCase' op|',' name|'self' op|')' op|'.' name|'setUp' op|'(' op|')' newline|'\n' nl|'\n' name|'policy' op|'.' name|'reset' op|'(' op|')' newline|'\n' name|'policy' op|'.' name|'init' op|'(' op|')' newline|'\n' nl|'\n' name|'self' op|'.' name|'context' op|'=' name|'context' op|'.' name|'get_admin_context' op|'(' op|')' newline|'\n' nl|'\n' DECL|member|tearDown dedent|'' name|'def' name|'tearDown' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'NetworkPolicyTestCase' op|',' name|'self' op|')' op|'.' name|'tearDown' op|'(' op|')' newline|'\n' name|'policy' op|'.' name|'reset' op|'(' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'policy' op|',' string|"'enforce'" op|')' newline|'\n' DECL|member|test_check_policy name|'def' name|'test_check_policy' op|'(' name|'self' op|',' name|'mock_enforce' op|')' op|':' newline|'\n' indent|' ' name|'target' op|'=' op|'{' nl|'\n' string|"'project_id'" op|':' name|'self' op|'.' name|'context' op|'.' name|'project_id' op|',' nl|'\n' string|"'user_id'" op|':' name|'self' op|'.' name|'context' op|'.' name|'user_id' op|',' nl|'\n' op|'}' newline|'\n' name|'api' op|'.' name|'check_policy' op|'(' name|'self' op|'.' name|'context' op|',' string|"'get_all'" op|')' newline|'\n' name|'mock_enforce' op|'.' name|'assert_called_once_with' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' string|"'network:get_all'" op|',' name|'target' op|')' newline|'\n' nl|'\n' DECL|member|test_skip_policy dedent|'' name|'def' name|'test_skip_policy' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'policy' op|'.' name|'reset' op|'(' op|')' newline|'\n' name|'rules' op|'=' op|'{' string|"'network:get_all'" op|':' string|"'!'" op|'}' newline|'\n' name|'policy' op|'.' name|'set_rules' op|'(' name|'oslo_policy' op|'.' name|'Rules' op|'.' name|'from_dict' op|'(' name|'rules' op|')' op|')' newline|'\n' name|'api' op|'=' name|'network' op|'.' name|'API' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'assertRaises' op|'(' name|'exception' op|'.' name|'PolicyNotAuthorized' op|',' nl|'\n' name|'api' op|'.' name|'get_all' op|',' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'api' op|'=' name|'network' op|'.' name|'API' op|'(' name|'skip_policy_check' op|'=' name|'True' op|')' newline|'\n' name|'api' op|'.' name|'get_all' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' nl|'\n' nl|'\n' DECL|class|ApiTestCase dedent|'' dedent|'' name|'class' name|'ApiTestCase' op|'(' name|'test' op|'.' name|'TestCase' op|')' op|':' newline|'\n' DECL|member|setUp indent|' ' name|'def' name|'setUp' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'ApiTestCase' op|',' name|'self' op|')' op|'.' name|'setUp' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'=' name|'network' op|'.' name|'API' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'context' op|'=' name|'context' op|'.' name|'RequestContext' op|'(' string|"'fake-user'" op|',' nl|'\n' name|'fakes' op|'.' name|'FAKE_PROJECT_ID' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.NetworkList.get_all'" op|')' newline|'\n' DECL|member|test_get_all name|'def' name|'test_get_all' op|'(' name|'self' op|',' name|'mock_get_all' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_all' op|'.' name|'return_value' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_all' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_all' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_all' op|'(' name|'self' op|'.' name|'context' op|')' op|')' newline|'\n' name|'mock_get_all' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'project_only' op|'=' name|'True' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.NetworkList.get_all'" op|')' newline|'\n' DECL|member|test_get_all_liberal name|'def' name|'test_get_all_liberal' op|'(' name|'self' op|',' name|'mock_get_all' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'flags' op|'(' name|'network_manager' op|'=' string|"'nova.network.manager.FlatDHCPManaager'" op|')' newline|'\n' name|'mock_get_all' op|'.' name|'return_value' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_all' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_all' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_all' op|'(' name|'self' op|'.' name|'context' op|')' op|')' newline|'\n' name|'mock_get_all' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'project_only' op|'=' string|'"allow_none"' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.NetworkList.get_all'" op|')' newline|'\n' DECL|member|test_get_all_no_networks name|'def' name|'test_get_all_no_networks' op|'(' name|'self' op|',' name|'mock_get_all' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_all' op|'.' name|'side_effect' op|'=' name|'exception' op|'.' name|'NoNetworksFound' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' op|'[' op|']' op|',' name|'self' op|'.' name|'network_api' op|'.' name|'get_all' op|'(' name|'self' op|'.' name|'context' op|')' op|')' newline|'\n' name|'mock_get_all' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'project_only' op|'=' name|'True' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' DECL|member|test_get name|'def' name|'test_get' op|'(' name|'self' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'mock_get' op|'.' name|'return_value' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_by_uuid' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_by_uuid' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get' op|'(' name|'self' op|'.' name|'context' op|',' name|'uuids' op|'.' name|'instance' op|')' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_id'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.db.virtual_interface_get_by_instance'" op|')' newline|'\n' DECL|member|test_get_vifs_by_instance name|'def' name|'test_get_vifs_by_instance' op|'(' name|'self' op|',' name|'mock_get_by_instance' op|',' nl|'\n' name|'mock_get_by_id' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_by_instance' op|'.' name|'return_value' op|'=' op|'[' nl|'\n' name|'dict' op|'(' name|'test_virtual_interface' op|'.' name|'fake_vif' op|',' nl|'\n' name|'network_id' op|'=' number|'123' op|')' op|']' newline|'\n' name|'mock_get_by_id' op|'.' name|'return_value' op|'=' name|'objects' op|'.' name|'Network' op|'(' op|')' newline|'\n' name|'mock_get_by_id' op|'.' name|'return_value' op|'.' name|'uuid' op|'=' name|'uuids' op|'.' name|'network_1' newline|'\n' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'uuids' op|'.' name|'instance' op|')' newline|'\n' name|'vifs' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'get_vifs_by_instance' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' number|'1' op|',' name|'len' op|'(' name|'vifs' op|')' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' number|'123' op|',' name|'vifs' op|'[' number|'0' op|']' op|'.' name|'network_id' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'uuids' op|'.' name|'network_1' op|',' name|'vifs' op|'[' number|'0' op|']' op|'.' name|'net_uuid' op|')' newline|'\n' name|'mock_get_by_instance' op|'.' name|'assert_called_once_with' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'uuids' op|'.' name|'instance' op|')' newline|'\n' name|'mock_get_by_id' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' number|'123' op|',' nl|'\n' name|'project_only' op|'=' string|"'allow_none'" op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_id'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.db.virtual_interface_get_by_address'" op|')' newline|'\n' DECL|member|test_get_vif_by_mac_address name|'def' name|'test_get_vif_by_mac_address' op|'(' name|'self' op|',' name|'mock_get_by_address' op|',' nl|'\n' name|'mock_get_by_id' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_by_address' op|'.' name|'return_value' op|'=' name|'dict' op|'(' nl|'\n' name|'test_virtual_interface' op|'.' name|'fake_vif' op|',' name|'network_id' op|'=' number|'123' op|')' newline|'\n' name|'mock_get_by_id' op|'.' name|'return_value' op|'=' name|'objects' op|'.' name|'Network' op|'(' nl|'\n' name|'uuid' op|'=' name|'uuids' op|'.' name|'network_1' op|')' newline|'\n' name|'vif' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'get_vif_by_mac_address' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'mac' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' number|'123' op|',' name|'vif' op|'.' name|'network_id' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'uuids' op|'.' name|'network_1' op|',' name|'vif' op|'.' name|'net_uuid' op|')' newline|'\n' name|'mock_get_by_address' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'mac' op|')' newline|'\n' name|'mock_get_by_id' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' number|'123' op|',' nl|'\n' name|'project_only' op|'=' string|"'allow_none'" op|')' newline|'\n' nl|'\n' DECL|member|test_allocate_for_instance_handles_macs_passed dedent|'' name|'def' name|'test_allocate_for_instance_handles_macs_passed' op|'(' name|'self' op|')' op|':' newline|'\n' comment|"# If a macs argument is supplied to the 'nova-network' API, it is just" nl|'\n' comment|'# ignored. This test checks that the call down to the rpcapi layer' nl|'\n' comment|"# doesn't pass macs down: nova-network doesn't support hypervisor" nl|'\n' comment|'# mac address limits (today anyhow).' nl|'\n' indent|' ' name|'macs' op|'=' name|'set' op|'(' op|'[' string|"'ab:cd:ef:01:23:34'" op|']' op|')' newline|'\n' name|'with' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'network_rpcapi' op|',' nl|'\n' string|'"allocate_for_instance"' op|')' name|'as' name|'mock_alloc' op|':' newline|'\n' indent|' ' name|'kwargs' op|'=' name|'dict' op|'(' name|'zip' op|'(' op|'[' string|"'host'" op|',' string|"'instance_id'" op|',' string|"'project_id'" op|',' nl|'\n' string|"'requested_networks'" op|',' string|"'rxtx_factor'" op|',' string|"'vpn'" op|',' nl|'\n' string|"'macs'" op|',' string|"'dhcp_options'" op|']' op|',' nl|'\n' name|'itertools' op|'.' name|'repeat' op|'(' name|'mock' op|'.' name|'ANY' op|')' op|')' op|')' newline|'\n' name|'mock_alloc' op|'.' name|'return_value' op|'=' op|'[' op|']' newline|'\n' name|'flavor' op|'=' name|'flavors' op|'.' name|'get_default_flavor' op|'(' op|')' newline|'\n' name|'flavor' op|'[' string|"'rxtx_factor'" op|']' op|'=' number|'0' newline|'\n' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'id' op|'=' number|'1' op|',' name|'uuid' op|'=' name|'uuids' op|'.' name|'instance' op|',' nl|'\n' name|'project_id' op|'=' string|"'project_id'" op|',' nl|'\n' name|'host' op|'=' string|"'host'" op|',' name|'system_metadata' op|'=' op|'{' op|'}' op|',' nl|'\n' name|'flavor' op|'=' name|'flavor' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'allocate_for_instance' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'instance' op|',' string|"'vpn'" op|',' string|"'requested_networks'" op|',' name|'macs' op|'=' name|'macs' op|')' newline|'\n' name|'mock_alloc' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' op|'**' name|'kwargs' op|')' newline|'\n' nl|'\n' DECL|member|_do_test_associate_floating_ip dedent|'' dedent|'' name|'def' name|'_do_test_associate_floating_ip' op|'(' name|'self' op|',' name|'orig_instance_uuid' op|')' op|':' newline|'\n' indent|' ' string|'"""Test post-association logic."""' newline|'\n' nl|'\n' name|'new_instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'FAKE_UUID' op|')' newline|'\n' nl|'\n' DECL|function|fake_associate name|'def' name|'fake_associate' op|'(' op|'*' name|'args' op|',' op|'**' name|'kwargs' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'orig_instance_uuid' newline|'\n' nl|'\n' DECL|function|fake_instance_get_by_uuid dedent|'' name|'def' name|'fake_instance_get_by_uuid' op|'(' name|'context' op|',' name|'instance_uuid' op|',' nl|'\n' name|'columns_to_join' op|'=' name|'None' op|',' nl|'\n' name|'use_slave' op|'=' name|'None' op|')' op|':' newline|'\n' indent|' ' name|'if' name|'instance_uuid' op|'==' name|'orig_instance_uuid' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'assertIn' op|'(' string|"'extra.flavor'" op|',' name|'columns_to_join' op|')' newline|'\n' dedent|'' name|'return' name|'fake_instance' op|'.' name|'fake_db_instance' op|'(' name|'uuid' op|'=' name|'instance_uuid' op|')' newline|'\n' nl|'\n' DECL|function|fake_get_nw_info dedent|'' name|'def' name|'fake_get_nw_info' op|'(' name|'ctxt' op|',' name|'instance' op|')' op|':' newline|'\n' DECL|class|FakeNWInfo indent|' ' name|'class' name|'FakeNWInfo' op|'(' name|'object' op|')' op|':' newline|'\n' DECL|member|json indent|' ' name|'def' name|'json' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'pass' newline|'\n' dedent|'' dedent|'' name|'return' name|'FakeNWInfo' op|'(' op|')' newline|'\n' nl|'\n' dedent|'' name|'if' name|'orig_instance_uuid' op|':' newline|'\n' indent|' ' name|'expected_updated_instances' op|'=' op|'[' name|'new_instance' op|'.' name|'uuid' op|',' nl|'\n' name|'orig_instance_uuid' op|']' newline|'\n' dedent|'' name|'else' op|':' newline|'\n' indent|' ' name|'expected_updated_instances' op|'=' op|'[' name|'new_instance' op|'.' name|'uuid' op|']' newline|'\n' nl|'\n' DECL|function|fake_instance_info_cache_update dedent|'' name|'def' name|'fake_instance_info_cache_update' op|'(' name|'context' op|',' name|'instance_uuid' op|',' name|'cache' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'assertEqual' op|'(' name|'instance_uuid' op|',' nl|'\n' name|'expected_updated_instances' op|'.' name|'pop' op|'(' op|')' op|')' newline|'\n' name|'return' name|'fake_info_cache' newline|'\n' nl|'\n' DECL|function|fake_update_instance_cache_with_nw_info dedent|'' name|'def' name|'fake_update_instance_cache_with_nw_info' op|'(' name|'api' op|',' name|'context' op|',' name|'instance' op|',' nl|'\n' name|'nw_info' op|'=' name|'None' op|',' nl|'\n' name|'update_cells' op|'=' name|'True' op|')' op|':' newline|'\n' indent|' ' name|'return' newline|'\n' nl|'\n' dedent|'' name|'with' name|'test' op|'.' name|'nested' op|'(' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'floating_ips' op|'.' name|'FloatingIP' op|',' string|"'associate_floating_ip'" op|',' nl|'\n' name|'fake_associate' op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'db' op|',' string|"'instance_get_by_uuid'" op|',' nl|'\n' name|'fake_instance_get_by_uuid' op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|',' string|"'_get_instance_nw_info'" op|',' nl|'\n' name|'fake_get_nw_info' op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'db' op|',' nl|'\n' string|"'instance_info_cache_update'" op|',' nl|'\n' name|'fake_instance_info_cache_update' op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'base_api' op|',' string|'"update_instance_cache_with_nw_info"' op|',' nl|'\n' name|'fake_update_instance_cache_with_nw_info' op|')' nl|'\n' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'network_api' op|'.' name|'associate_floating_ip' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'new_instance' op|',' nl|'\n' string|"'172.24.4.225'" op|',' nl|'\n' string|"'10.0.0.2'" op|')' newline|'\n' nl|'\n' DECL|member|test_associate_preassociated_floating_ip dedent|'' dedent|'' name|'def' name|'test_associate_preassociated_floating_ip' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_do_test_associate_floating_ip' op|'(' name|'uuids' op|'.' name|'orig_uuid' op|')' newline|'\n' nl|'\n' DECL|member|test_associate_unassociated_floating_ip dedent|'' name|'def' name|'test_associate_unassociated_floating_ip' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_do_test_associate_floating_ip' op|'(' name|'None' op|')' newline|'\n' nl|'\n' DECL|member|test_get_floating_ip_invalid_id dedent|'' name|'def' name|'test_get_floating_ip_invalid_id' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'assertRaises' op|'(' name|'exception' op|'.' name|'InvalidID' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_floating_ip' op|',' nl|'\n' name|'self' op|'.' name|'context' op|',' string|"'123zzz'" op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FloatingIP.get_by_id'" op|')' newline|'\n' DECL|member|test_get_floating_ip name|'def' name|'test_get_floating_ip' op|'(' name|'self' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'floating' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'floating' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'floating' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_floating_ip' op|'(' name|'self' op|'.' name|'context' op|',' number|'123' op|')' op|')' newline|'\n' name|'mock_get' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' number|'123' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FloatingIP.get_pool_names'" op|')' newline|'\n' DECL|member|test_get_floating_ip_pools name|'def' name|'test_get_floating_ip_pools' op|'(' name|'self' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'pools' op|'=' op|'[' string|"'foo'" op|',' string|"'bar'" op|']' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'pools' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'pools' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_floating_ip_pools' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|')' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FloatingIP.get_by_address'" op|')' newline|'\n' DECL|member|test_get_floating_ip_by_address name|'def' name|'test_get_floating_ip_by_address' op|'(' name|'self' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'floating' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'floating' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'floating' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_floating_ip_by_address' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'mock' op|'.' name|'sentinel' op|'.' name|'address' op|')' op|')' newline|'\n' name|'mock_get' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'address' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FloatingIPList.get_by_project'" op|')' newline|'\n' DECL|member|test_get_floating_ips_by_project name|'def' name|'test_get_floating_ips_by_project' op|'(' name|'self' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'floatings' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating_ips' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'floatings' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'floatings' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_floating_ips_by_project' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|')' op|')' newline|'\n' name|'mock_get' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'context' op|'.' name|'project_id' op|')' newline|'\n' nl|'\n' DECL|member|_stub_migrate_instance_calls dedent|'' name|'def' name|'_stub_migrate_instance_calls' op|'(' name|'self' op|',' name|'method' op|',' name|'multi_host' op|',' name|'info' op|')' op|':' newline|'\n' indent|' ' name|'fake_flavor' op|'=' name|'flavors' op|'.' name|'get_default_flavor' op|'(' op|')' newline|'\n' name|'fake_flavor' op|'[' string|"'rxtx_factor'" op|']' op|'=' number|'1.21' newline|'\n' name|'fake_instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' nl|'\n' name|'uuid' op|'=' name|'uuid' op|'.' name|'uuid4' op|'(' op|')' op|'.' name|'hex' op|',' nl|'\n' name|'project_id' op|'=' string|"'fake_project_id'" op|',' nl|'\n' name|'instance_type_id' op|'=' name|'fake_flavor' op|'[' string|"'id'" op|']' op|',' nl|'\n' name|'flavor' op|'=' name|'fake_flavor' op|',' nl|'\n' name|'system_metadata' op|'=' op|'{' op|'}' op|')' newline|'\n' name|'fake_migration' op|'=' op|'{' string|"'source_compute'" op|':' string|"'fake_compute_source'" op|',' nl|'\n' string|"'dest_compute'" op|':' string|"'fake_compute_dest'" op|'}' newline|'\n' nl|'\n' DECL|function|fake_mig_inst_method name|'def' name|'fake_mig_inst_method' op|'(' op|'*' name|'args' op|',' op|'**' name|'kwargs' op|')' op|':' newline|'\n' indent|' ' name|'info' op|'[' string|"'kwargs'" op|']' op|'=' name|'kwargs' newline|'\n' nl|'\n' DECL|function|fake_get_multi_addresses dedent|'' name|'def' name|'fake_get_multi_addresses' op|'(' op|'*' name|'args' op|',' op|'**' name|'kwargs' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'multi_host' op|',' op|'[' string|"'fake_float1'" op|',' string|"'fake_float2'" op|']' newline|'\n' nl|'\n' dedent|'' name|'self' op|'.' name|'stub_out' op|'(' string|"'nova.network.rpcapi.NetworkAPI.'" op|'+' name|'method' op|',' nl|'\n' name|'fake_mig_inst_method' op|')' newline|'\n' name|'self' op|'.' name|'stub_out' op|'(' string|"'nova.network.api.API._get_multi_addresses'" op|',' nl|'\n' name|'fake_get_multi_addresses' op|')' newline|'\n' nl|'\n' name|'expected' op|'=' op|'{' string|"'instance_uuid'" op|':' name|'fake_instance' op|'.' name|'uuid' op|',' nl|'\n' string|"'source_compute'" op|':' string|"'fake_compute_source'" op|',' nl|'\n' string|"'dest_compute'" op|':' string|"'fake_compute_dest'" op|',' nl|'\n' string|"'rxtx_factor'" op|':' number|'1.21' op|',' nl|'\n' string|"'project_id'" op|':' string|"'fake_project_id'" op|',' nl|'\n' string|"'floating_addresses'" op|':' name|'None' op|'}' newline|'\n' name|'if' name|'multi_host' op|':' newline|'\n' indent|' ' name|'expected' op|'[' string|"'floating_addresses'" op|']' op|'=' op|'[' string|"'fake_float1'" op|',' string|"'fake_float2'" op|']' newline|'\n' dedent|'' name|'return' name|'fake_instance' op|',' name|'fake_migration' op|',' name|'expected' newline|'\n' nl|'\n' DECL|member|test_migrate_instance_start_with_multhost dedent|'' name|'def' name|'test_migrate_instance_start_with_multhost' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'info' op|'=' op|'{' string|"'kwargs'" op|':' op|'{' op|'}' op|'}' newline|'\n' name|'arg1' op|',' name|'arg2' op|',' name|'expected' op|'=' name|'self' op|'.' name|'_stub_migrate_instance_calls' op|'(' nl|'\n' string|"'migrate_instance_start'" op|',' name|'True' op|',' name|'info' op|')' newline|'\n' name|'expected' op|'[' string|"'host'" op|']' op|'=' string|"'fake_compute_source'" newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'migrate_instance_start' op|'(' name|'self' op|'.' name|'context' op|',' name|'arg1' op|',' name|'arg2' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'info' op|'[' string|"'kwargs'" op|']' op|',' name|'expected' op|')' newline|'\n' nl|'\n' DECL|member|test_migrate_instance_start_without_multhost dedent|'' name|'def' name|'test_migrate_instance_start_without_multhost' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'info' op|'=' op|'{' string|"'kwargs'" op|':' op|'{' op|'}' op|'}' newline|'\n' name|'arg1' op|',' name|'arg2' op|',' name|'expected' op|'=' name|'self' op|'.' name|'_stub_migrate_instance_calls' op|'(' nl|'\n' string|"'migrate_instance_start'" op|',' name|'False' op|',' name|'info' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'migrate_instance_start' op|'(' name|'self' op|'.' name|'context' op|',' name|'arg1' op|',' name|'arg2' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'info' op|'[' string|"'kwargs'" op|']' op|',' name|'expected' op|')' newline|'\n' nl|'\n' DECL|member|test_migrate_instance_finish_with_multhost dedent|'' name|'def' name|'test_migrate_instance_finish_with_multhost' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'info' op|'=' op|'{' string|"'kwargs'" op|':' op|'{' op|'}' op|'}' newline|'\n' name|'arg1' op|',' name|'arg2' op|',' name|'expected' op|'=' name|'self' op|'.' name|'_stub_migrate_instance_calls' op|'(' nl|'\n' string|"'migrate_instance_finish'" op|',' name|'True' op|',' name|'info' op|')' newline|'\n' name|'expected' op|'[' string|"'host'" op|']' op|'=' string|"'fake_compute_dest'" newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'migrate_instance_finish' op|'(' name|'self' op|'.' name|'context' op|',' name|'arg1' op|',' name|'arg2' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'info' op|'[' string|"'kwargs'" op|']' op|',' name|'expected' op|')' newline|'\n' nl|'\n' DECL|member|test_migrate_instance_finish_without_multhost dedent|'' name|'def' name|'test_migrate_instance_finish_without_multhost' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'info' op|'=' op|'{' string|"'kwargs'" op|':' op|'{' op|'}' op|'}' newline|'\n' name|'arg1' op|',' name|'arg2' op|',' name|'expected' op|'=' name|'self' op|'.' name|'_stub_migrate_instance_calls' op|'(' nl|'\n' string|"'migrate_instance_finish'" op|',' name|'False' op|',' name|'info' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'migrate_instance_finish' op|'(' name|'self' op|'.' name|'context' op|',' name|'arg1' op|',' name|'arg2' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'info' op|'[' string|"'kwargs'" op|']' op|',' name|'expected' op|')' newline|'\n' nl|'\n' DECL|member|test_is_multi_host_instance_has_no_fixed_ip dedent|'' name|'def' name|'test_is_multi_host_instance_has_no_fixed_ip' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'with' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'db' op|',' string|"'fixed_ip_get_by_instance'" op|',' nl|'\n' name|'side_effect' op|'=' name|'exception' op|'.' name|'FixedIpNotFoundForInstance' op|'(' nl|'\n' name|'instance_uuid' op|'=' name|'uuid' op|')' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'FAKE_UUID' op|')' newline|'\n' name|'result' op|',' name|'floats' op|'=' op|'(' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'_get_multi_addresses' op|'(' name|'self' op|'.' name|'context' op|',' name|'instance' op|')' op|')' newline|'\n' name|'self' op|'.' name|'assertFalse' op|'(' name|'result' op|')' newline|'\n' nl|'\n' dedent|'' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid'" op|')' newline|'\n' DECL|member|_test_is_multi_host_network_has_no_project_id name|'def' name|'_test_is_multi_host_network_has_no_project_id' op|'(' name|'self' op|',' name|'is_multi_host' op|',' nl|'\n' name|'fip_get' op|')' op|':' newline|'\n' indent|' ' name|'network' op|'=' name|'objects' op|'.' name|'Network' op|'(' nl|'\n' name|'id' op|'=' number|'123' op|',' name|'project_id' op|'=' name|'None' op|',' nl|'\n' name|'multi_host' op|'=' name|'is_multi_host' op|')' newline|'\n' name|'fip_get' op|'.' name|'return_value' op|'=' op|'[' nl|'\n' name|'objects' op|'.' name|'FixedIP' op|'(' name|'instance_uuid' op|'=' name|'FAKE_UUID' op|',' name|'network' op|'=' name|'network' op|',' nl|'\n' name|'floating_ips' op|'=' name|'objects' op|'.' name|'FloatingIPList' op|'(' op|')' op|')' op|']' newline|'\n' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'FAKE_UUID' op|')' newline|'\n' name|'result' op|',' name|'floats' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'_get_multi_addresses' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'is_multi_host' op|',' name|'result' op|')' newline|'\n' nl|'\n' DECL|member|test_is_multi_host_network_has_no_project_id_multi dedent|'' name|'def' name|'test_is_multi_host_network_has_no_project_id_multi' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_test_is_multi_host_network_has_no_project_id' op|'(' name|'True' op|')' newline|'\n' nl|'\n' DECL|member|test_is_multi_host_network_has_no_project_id_non_multi dedent|'' name|'def' name|'test_is_multi_host_network_has_no_project_id_non_multi' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_test_is_multi_host_network_has_no_project_id' op|'(' name|'False' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid'" op|')' newline|'\n' DECL|member|_test_is_multi_host_network_has_project_id name|'def' name|'_test_is_multi_host_network_has_project_id' op|'(' name|'self' op|',' name|'is_multi_host' op|',' nl|'\n' name|'fip_get' op|')' op|':' newline|'\n' indent|' ' name|'network' op|'=' name|'objects' op|'.' name|'Network' op|'(' nl|'\n' name|'id' op|'=' number|'123' op|',' name|'project_id' op|'=' name|'self' op|'.' name|'context' op|'.' name|'project_id' op|',' nl|'\n' name|'multi_host' op|'=' name|'is_multi_host' op|')' newline|'\n' name|'fip_get' op|'.' name|'return_value' op|'=' op|'[' nl|'\n' name|'objects' op|'.' name|'FixedIP' op|'(' name|'instance_uuid' op|'=' name|'FAKE_UUID' op|',' name|'network' op|'=' name|'network' op|',' nl|'\n' name|'floating_ips' op|'=' name|'objects' op|'.' name|'FloatingIPList' op|'(' op|')' op|')' op|']' newline|'\n' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'FAKE_UUID' op|')' newline|'\n' name|'result' op|',' name|'floats' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'_get_multi_addresses' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'is_multi_host' op|',' name|'result' op|')' newline|'\n' nl|'\n' DECL|member|test_is_multi_host_network_has_project_id_multi dedent|'' name|'def' name|'test_is_multi_host_network_has_project_id_multi' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_test_is_multi_host_network_has_project_id' op|'(' name|'True' op|')' newline|'\n' nl|'\n' DECL|member|test_is_multi_host_network_has_project_id_non_multi dedent|'' name|'def' name|'test_is_multi_host_network_has_project_id_non_multi' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'self' op|'.' name|'_test_is_multi_host_network_has_project_id' op|'(' name|'False' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.disassociate'" op|')' newline|'\n' DECL|member|test_network_disassociate_project name|'def' name|'test_network_disassociate_project' op|'(' name|'self' op|',' name|'mock_disassociate' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'net_obj' op|'=' name|'objects' op|'.' name|'Network' op|'(' name|'context' op|'=' name|'self' op|'.' name|'context' op|',' name|'id' op|'=' number|'1' op|')' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'net_obj' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'associate' op|'(' name|'self' op|'.' name|'context' op|',' name|'FAKE_UUID' op|',' name|'project' op|'=' name|'None' op|')' newline|'\n' name|'mock_disassociate' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'net_obj' op|'.' name|'id' op|',' nl|'\n' name|'host' op|'=' name|'False' op|',' name|'project' op|'=' name|'True' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.disassociate'" op|')' newline|'\n' DECL|member|test_network_disassociate_host name|'def' name|'test_network_disassociate_host' op|'(' name|'self' op|',' name|'mock_disassociate' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'net_obj' op|'=' name|'objects' op|'.' name|'Network' op|'(' name|'context' op|'=' name|'self' op|'.' name|'context' op|',' name|'id' op|'=' number|'1' op|')' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'net_obj' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'associate' op|'(' name|'self' op|'.' name|'context' op|',' name|'FAKE_UUID' op|',' name|'host' op|'=' name|'None' op|')' newline|'\n' name|'mock_disassociate' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'net_obj' op|'.' name|'id' op|',' nl|'\n' name|'host' op|'=' name|'True' op|',' name|'project' op|'=' name|'False' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.associate'" op|')' newline|'\n' DECL|member|test_network_associate_project name|'def' name|'test_network_associate_project' op|'(' name|'self' op|',' name|'mock_associate' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'net_obj' op|'=' name|'objects' op|'.' name|'Network' op|'(' name|'context' op|'=' name|'self' op|'.' name|'context' op|',' name|'id' op|'=' number|'1' op|')' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'net_obj' newline|'\n' name|'project' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'project' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'associate' op|'(' name|'self' op|'.' name|'context' op|',' name|'FAKE_UUID' op|',' name|'project' op|'=' name|'project' op|')' newline|'\n' name|'mock_associate' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'project' op|',' nl|'\n' name|'network_id' op|'=' name|'net_obj' op|'.' name|'id' op|',' nl|'\n' name|'force' op|'=' name|'True' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.save'" op|')' newline|'\n' DECL|member|test_network_associate_host name|'def' name|'test_network_associate_host' op|'(' name|'self' op|',' name|'mock_save' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'net_obj' op|'=' name|'objects' op|'.' name|'Network' op|'(' name|'context' op|'=' name|'self' op|'.' name|'context' op|',' name|'id' op|'=' number|'1' op|')' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'net_obj' newline|'\n' name|'host' op|'=' name|'str' op|'(' name|'mock' op|'.' name|'sentinel' op|'.' name|'host' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'associate' op|'(' name|'self' op|'.' name|'context' op|',' name|'FAKE_UUID' op|',' name|'host' op|'=' name|'host' op|')' newline|'\n' name|'mock_save' op|'.' name|'assert_called_once_with' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'host' op|',' name|'net_obj' op|'.' name|'host' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.get_by_uuid'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.Network.disassociate'" op|')' newline|'\n' DECL|member|test_network_disassociate name|'def' name|'test_network_disassociate' op|'(' name|'self' op|',' name|'mock_disassociate' op|',' name|'mock_get' op|')' op|':' newline|'\n' indent|' ' name|'mock_get' op|'.' name|'return_value' op|'=' name|'objects' op|'.' name|'Network' op|'(' name|'context' op|'=' name|'self' op|'.' name|'context' op|',' name|'id' op|'=' number|'123' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'disassociate' op|'(' name|'self' op|'.' name|'context' op|',' name|'FAKE_UUID' op|')' newline|'\n' name|'mock_disassociate' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' number|'123' op|',' nl|'\n' name|'project' op|'=' name|'True' op|',' name|'host' op|'=' name|'True' op|')' newline|'\n' nl|'\n' DECL|member|_test_refresh_cache dedent|'' name|'def' name|'_test_refresh_cache' op|'(' name|'self' op|',' name|'method' op|',' op|'*' name|'args' op|',' op|'**' name|'kwargs' op|')' op|':' newline|'\n' comment|'# This test verifies that no call to get_instance_nw_info() is made' nl|'\n' comment|'# from the @refresh_cache decorator for the tested method.' nl|'\n' indent|' ' name|'with' name|'test' op|'.' name|'nested' op|'(' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'network_rpcapi' op|',' name|'method' op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'self' op|'.' name|'network_api' op|'.' name|'network_rpcapi' op|',' nl|'\n' string|"'get_instance_nw_info'" op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'network_model' op|'.' name|'NetworkInfo' op|',' string|"'hydrate'" op|')' op|',' nl|'\n' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'objects' op|'.' name|'InstanceInfoCache' op|',' string|"'save'" op|')' op|',' nl|'\n' op|')' name|'as' op|'(' nl|'\n' name|'method_mock' op|',' name|'nwinfo_mock' op|',' name|'hydrate_mock' op|',' name|'save_mock' nl|'\n' op|')' op|':' newline|'\n' indent|' ' name|'nw_info' op|'=' name|'network_model' op|'.' name|'NetworkInfo' op|'(' op|'[' op|']' op|')' newline|'\n' name|'method_mock' op|'.' name|'return_value' op|'=' name|'nw_info' newline|'\n' name|'hydrate_mock' op|'.' name|'return_value' op|'=' name|'nw_info' newline|'\n' name|'getattr' op|'(' name|'self' op|'.' name|'network_api' op|',' name|'method' op|')' op|'(' op|'*' name|'args' op|',' op|'**' name|'kwargs' op|')' newline|'\n' name|'hydrate_mock' op|'.' name|'assert_called_once_with' op|'(' name|'nw_info' op|')' newline|'\n' name|'self' op|'.' name|'assertFalse' op|'(' name|'nwinfo_mock' op|'.' name|'called' op|')' newline|'\n' nl|'\n' DECL|member|test_allocate_for_instance_refresh_cache dedent|'' dedent|'' name|'def' name|'test_allocate_for_instance_refresh_cache' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'vpn' op|'=' string|"'fake-vpn'" newline|'\n' name|'requested_networks' op|'=' string|"'fake-networks'" newline|'\n' name|'self' op|'.' name|'_test_refresh_cache' op|'(' string|"'allocate_for_instance'" op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|',' name|'vpn' op|',' name|'requested_networks' op|')' newline|'\n' nl|'\n' DECL|member|test_add_fixed_ip_to_instance_refresh_cache dedent|'' name|'def' name|'test_add_fixed_ip_to_instance_refresh_cache' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'network_id' op|'=' string|"'fake-network-id'" newline|'\n' name|'self' op|'.' name|'_test_refresh_cache' op|'(' string|"'add_fixed_ip_to_instance'" op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|',' name|'network_id' op|')' newline|'\n' nl|'\n' DECL|member|test_remove_fixed_ip_from_instance_refresh_cache dedent|'' name|'def' name|'test_remove_fixed_ip_from_instance_refresh_cache' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'address' op|'=' string|"'fake-address'" newline|'\n' name|'self' op|'.' name|'_test_refresh_cache' op|'(' string|"'remove_fixed_ip_from_instance'" op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|',' name|'address' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.db.fixed_ip_get_by_address'" op|')' newline|'\n' DECL|member|test_get_fixed_ip_by_address name|'def' name|'test_get_fixed_ip_by_address' op|'(' name|'self' op|',' name|'fip_get' op|')' op|':' newline|'\n' indent|' ' name|'fip_get' op|'.' name|'return_value' op|'=' name|'test_fixed_ip' op|'.' name|'fake_fixed_ip' newline|'\n' name|'fip' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'get_fixed_ip_by_address' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' string|"'fake-addr'" op|')' newline|'\n' name|'self' op|'.' name|'assertIsInstance' op|'(' name|'fip' op|',' name|'objects' op|'.' name|'FixedIP' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FixedIP.get_by_id'" op|')' newline|'\n' DECL|member|test_get_fixed_ip name|'def' name|'test_get_fixed_ip' op|'(' name|'self' op|',' name|'mock_get_by_id' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_by_id' op|'.' name|'return_value' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'fixed_ip' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'mock' op|'.' name|'sentinel' op|'.' name|'fixed_ip' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_fixed_ip' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'id' op|')' op|')' newline|'\n' name|'mock_get_by_id' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'mock' op|'.' name|'sentinel' op|'.' name|'id' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FixedIP.get_by_floating_address'" op|')' newline|'\n' DECL|member|test_get_instance_by_floating_address name|'def' name|'test_get_instance_by_floating_address' op|'(' name|'self' op|',' name|'mock_get_by_floating' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_by_floating' op|'.' name|'return_value' op|'=' name|'objects' op|'.' name|'FixedIP' op|'(' nl|'\n' name|'instance_uuid' op|'=' name|'uuids' op|'.' name|'instance' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'uuids' op|'.' name|'instance' op|',' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_instance_id_by_floating_address' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' op|')' op|')' newline|'\n' name|'mock_get_by_floating' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.objects.FixedIP.get_by_floating_address'" op|')' newline|'\n' DECL|member|test_get_instance_by_floating_address_none name|'def' name|'test_get_instance_by_floating_address_none' op|'(' name|'self' op|',' name|'mock_get_by_floating' op|')' op|':' newline|'\n' indent|' ' name|'mock_get_by_floating' op|'.' name|'return_value' op|'=' name|'None' newline|'\n' name|'self' op|'.' name|'assertIsNone' op|'(' nl|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'get_instance_id_by_floating_address' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' op|')' op|')' newline|'\n' name|'mock_get_by_floating' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'mock' op|'.' name|'sentinel' op|'.' name|'floating' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.network.api.API.migrate_instance_start'" op|')' newline|'\n' DECL|member|test_cleanup_instance_network_on_host name|'def' name|'test_cleanup_instance_network_on_host' op|'(' name|'self' op|',' name|'fake_migrate_start' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'cleanup_instance_network_on_host' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'instance' op|',' string|"'fake_compute_source'" op|')' newline|'\n' name|'fake_migrate_start' op|'.' name|'assert_called_once_with' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'instance' op|',' nl|'\n' op|'{' string|"'source_compute'" op|':' string|"'fake_compute_source'" op|',' string|"'dest_compute'" op|':' name|'None' op|'}' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.network.api.API.migrate_instance_finish'" op|')' newline|'\n' DECL|member|test_setup_instance_network_on_host name|'def' name|'test_setup_instance_network_on_host' op|'(' name|'self' op|',' name|'fake_migrate_finish' op|')' op|':' newline|'\n' indent|' ' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'self' op|'.' name|'network_api' op|'.' name|'setup_instance_network_on_host' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'instance' op|',' string|"'fake_compute_source'" op|')' newline|'\n' name|'fake_migrate_finish' op|'.' name|'assert_called_once_with' op|'(' nl|'\n' name|'self' op|'.' name|'context' op|',' name|'instance' op|',' nl|'\n' op|'{' string|"'source_compute'" op|':' name|'None' op|',' string|"'dest_compute'" op|':' string|"'fake_compute_source'" op|'}' op|')' newline|'\n' nl|'\n' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'oslo_concurrency.lockutils.lock'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'.' name|'object' op|'(' name|'api' op|'.' name|'API' op|',' string|"'_get_instance_nw_info'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.network.base_api.update_instance_cache_with_nw_info'" op|')' newline|'\n' DECL|member|test_get_instance_nw_info name|'def' name|'test_get_instance_nw_info' op|'(' name|'self' op|',' name|'mock_update' op|',' name|'mock_get' op|',' name|'mock_lock' op|')' op|':' newline|'\n' indent|' ' name|'fake_result' op|'=' name|'mock' op|'.' name|'sentinel' op|'.' name|'get_nw_info_result' newline|'\n' name|'mock_get' op|'.' name|'return_value' op|'=' name|'fake_result' newline|'\n' name|'instance' op|'=' name|'fake_instance' op|'.' name|'fake_instance_obj' op|'(' name|'self' op|'.' name|'context' op|')' newline|'\n' name|'result' op|'=' name|'self' op|'.' name|'network_api' op|'.' name|'get_instance_nw_info' op|'(' name|'self' op|'.' name|'context' op|',' name|'instance' op|')' newline|'\n' name|'mock_get' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'instance' op|')' newline|'\n' name|'mock_update' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'network_api' op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'instance' op|',' name|'nw_info' op|'=' name|'fake_result' op|',' nl|'\n' name|'update_cells' op|'=' name|'False' op|')' newline|'\n' name|'self' op|'.' name|'assertEqual' op|'(' name|'fake_result' op|',' name|'result' op|')' newline|'\n' nl|'\n' nl|'\n' dedent|'' dedent|'' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.network.api.API'" op|')' newline|'\n' op|'@' name|'mock' op|'.' name|'patch' op|'(' string|"'nova.db.instance_info_cache_update'" op|',' name|'return_value' op|'=' name|'fake_info_cache' op|')' newline|'\n' DECL|class|TestUpdateInstanceCache name|'class' name|'TestUpdateInstanceCache' op|'(' name|'test' op|'.' name|'NoDBTestCase' op|')' op|':' newline|'\n' DECL|member|setUp indent|' ' name|'def' name|'setUp' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'super' op|'(' name|'TestUpdateInstanceCache' op|',' name|'self' op|')' op|'.' name|'setUp' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'context' op|'=' name|'context' op|'.' name|'get_admin_context' op|'(' op|')' newline|'\n' name|'self' op|'.' name|'instance' op|'=' name|'objects' op|'.' name|'Instance' op|'(' name|'uuid' op|'=' name|'FAKE_UUID' op|')' newline|'\n' name|'vifs' op|'=' op|'[' name|'network_model' op|'.' name|'VIF' op|'(' name|'id' op|'=' string|"'super_vif'" op|')' op|']' newline|'\n' name|'self' op|'.' name|'nw_info' op|'=' name|'network_model' op|'.' name|'NetworkInfo' op|'(' name|'vifs' op|')' newline|'\n' name|'self' op|'.' name|'nw_json' op|'=' name|'fields' op|'.' name|'NetworkModel' op|'.' name|'to_primitive' op|'(' name|'self' op|',' string|"'network_info'" op|',' nl|'\n' name|'self' op|'.' name|'nw_info' op|')' newline|'\n' nl|'\n' DECL|member|test_update_nw_info_none dedent|'' name|'def' name|'test_update_nw_info_none' op|'(' name|'self' op|',' name|'db_mock' op|',' name|'api_mock' op|')' op|':' newline|'\n' indent|' ' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'return_value' op|'=' name|'self' op|'.' name|'nw_info' newline|'\n' nl|'\n' name|'base_api' op|'.' name|'update_instance_cache_with_nw_info' op|'(' name|'api_mock' op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'instance' op|',' name|'None' op|')' newline|'\n' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'instance' op|')' newline|'\n' name|'db_mock' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|'.' name|'uuid' op|',' nl|'\n' op|'{' string|"'network_info'" op|':' name|'self' op|'.' name|'nw_json' op|'}' op|')' newline|'\n' nl|'\n' DECL|member|test_update_nw_info_one_network dedent|'' name|'def' name|'test_update_nw_info_one_network' op|'(' name|'self' op|',' name|'db_mock' op|',' name|'api_mock' op|')' op|':' newline|'\n' indent|' ' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'return_value' op|'=' name|'self' op|'.' name|'nw_info' newline|'\n' name|'base_api' op|'.' name|'update_instance_cache_with_nw_info' op|'(' name|'api_mock' op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'instance' op|',' name|'self' op|'.' name|'nw_info' op|')' newline|'\n' name|'self' op|'.' name|'assertFalse' op|'(' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'called' op|')' newline|'\n' name|'db_mock' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|'.' name|'uuid' op|',' nl|'\n' op|'{' string|"'network_info'" op|':' name|'self' op|'.' name|'nw_json' op|'}' op|')' newline|'\n' nl|'\n' DECL|member|test_update_nw_info_empty_list dedent|'' name|'def' name|'test_update_nw_info_empty_list' op|'(' name|'self' op|',' name|'db_mock' op|',' name|'api_mock' op|')' op|':' newline|'\n' indent|' ' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'return_value' op|'=' name|'self' op|'.' name|'nw_info' newline|'\n' name|'base_api' op|'.' name|'update_instance_cache_with_nw_info' op|'(' name|'api_mock' op|',' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'instance' op|',' nl|'\n' name|'network_model' op|'.' name|'NetworkInfo' op|'(' op|'[' op|']' op|')' op|')' newline|'\n' name|'self' op|'.' name|'assertFalse' op|'(' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'called' op|')' newline|'\n' name|'db_mock' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|'.' name|'uuid' op|',' nl|'\n' op|'{' string|"'network_info'" op|':' string|"'[]'" op|'}' op|')' newline|'\n' nl|'\n' DECL|member|test_decorator_return_object dedent|'' name|'def' name|'test_decorator_return_object' op|'(' name|'self' op|',' name|'db_mock' op|',' name|'api_mock' op|')' op|':' newline|'\n' indent|' ' op|'@' name|'base_api' op|'.' name|'refresh_cache' newline|'\n' DECL|function|func name|'def' name|'func' op|'(' name|'self' op|',' name|'context' op|',' name|'instance' op|')' op|':' newline|'\n' indent|' ' name|'return' name|'network_model' op|'.' name|'NetworkInfo' op|'(' op|'[' op|']' op|')' newline|'\n' dedent|'' name|'func' op|'(' name|'api_mock' op|',' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|')' newline|'\n' name|'self' op|'.' name|'assertFalse' op|'(' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'called' op|')' newline|'\n' name|'db_mock' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|'.' name|'uuid' op|',' nl|'\n' op|'{' string|"'network_info'" op|':' string|"'[]'" op|'}' op|')' newline|'\n' nl|'\n' DECL|member|test_decorator_return_none dedent|'' name|'def' name|'test_decorator_return_none' op|'(' name|'self' op|',' name|'db_mock' op|',' name|'api_mock' op|')' op|':' newline|'\n' indent|' ' op|'@' name|'base_api' op|'.' name|'refresh_cache' newline|'\n' DECL|function|func name|'def' name|'func' op|'(' name|'self' op|',' name|'context' op|',' name|'instance' op|')' op|':' newline|'\n' indent|' ' name|'pass' newline|'\n' dedent|'' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'return_value' op|'=' name|'self' op|'.' name|'nw_info' newline|'\n' name|'func' op|'(' name|'api_mock' op|',' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|')' newline|'\n' name|'api_mock' op|'.' name|'_get_instance_nw_info' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' nl|'\n' name|'self' op|'.' name|'instance' op|')' newline|'\n' name|'db_mock' op|'.' name|'assert_called_once_with' op|'(' name|'self' op|'.' name|'context' op|',' name|'self' op|'.' name|'instance' op|'.' name|'uuid' op|',' nl|'\n' op|'{' string|"'network_info'" op|':' name|'self' op|'.' name|'nw_json' op|'}' op|')' newline|'\n' nl|'\n' nl|'\n' DECL|class|NetworkHooksTestCase dedent|'' dedent|'' name|'class' name|'NetworkHooksTestCase' op|'(' name|'test' op|'.' name|'BaseHookTestCase' op|')' op|':' newline|'\n' DECL|member|test_instance_network_info_hook indent|' ' name|'def' name|'test_instance_network_info_hook' op|'(' name|'self' op|')' op|':' newline|'\n' indent|' ' name|'info_func' op|'=' name|'base_api' op|'.' name|'update_instance_cache_with_nw_info' newline|'\n' name|'self' op|'.' name|'assert_has_hook' op|'(' string|"'instance_network_info'" op|',' name|'info_func' op|')' newline|'\n' dedent|'' dedent|'' endmarker|'' end_unit
python
#!/usr/bin/env python import argparse import os import sys from lib.config import enable_verbose_mode, get_target_arch from lib.util import execute_stdout SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) def main(): os.chdir(SOURCE_ROOT) args = parse_args() if args.verbose: enable_verbose_mode() # ./script/bootstrap # ./script/update -t x64 --defines='' # ./script/build --no_shared_library -t x64 # ./script/create-dist -c static_library -t x64 --no_zip script_dir = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'libchromiumcontent', 'script') bootstrap = os.path.join(script_dir, 'bootstrap') update = os.path.join(script_dir, 'update') build = os.path.join(script_dir, 'build') create_dist = os.path.join(script_dir, 'create-dist') execute_stdout([sys.executable, bootstrap]) execute_stdout([sys.executable, update, '-t', args.target_arch, '--defines', args.defines]) execute_stdout([sys.executable, build, '-R', '-t', args.target_arch]) execute_stdout([sys.executable, create_dist, '-c', 'static_library', '--no_zip', '-t', args.target_arch]) def parse_args(): parser = argparse.ArgumentParser(description='Build libchromiumcontent') parser.add_argument('--target_arch', help='Specify the arch to build for') parser.add_argument('--defines', default='', help='The definetions passed to gyp') parser.add_argument('-v', '--verbose', action='store_true', help='Prints the output of the subprocesses') return parser.parse_args() if __name__ == '__main__': sys.exit(main())
python
from .pipenv_setup_comp import compare_deps from ._version import (__title__, __description__, __url__, __version__, __author__, __author_email__, __license__) __all__ = ["compare_deps", "__title__", "__description__", "__url__", "__version__", "__author__", "__author_email__", "__license__"]
python
""" @file: This file contains the database helper class that manages both 'users' and 'contacts' collections @Note: Calling this with python will do nothing """ #------------------------------STANDARD DEPENDENCIES-----------------------------# import json import pickle, copyreg, ssl # for serializing User objects (SSL obj requires more work) from itertools import count # to keep track of # initializations #-----------------------------3RD PARTY DEPENDENCIES-----------------------------# from pymongo import MongoClient from bson.binary import Binary # for serializing/derializing User objects #--------------------------------OUR DEPENDENCIES--------------------------------# from backend.src import utils from backend.src.database.usersCollectionManager import UsersCollectionManager from backend.src.database.contactsCollectionManager import ContactsCollectionManager class DatabaseManager(UsersCollectionManager, ContactsCollectionManager): _numInits = 0 def __init__(self, printCollectionCreation=True): """ \n@Brief: This class is meant to help manage the database of users' information \n@Note: Will create the database if it does not already exist \n@Note: Will inheret other database managers to consolidate into one """ # Inheret all functions and 'self' variables super().__init__() # only check & create database collections once if DatabaseManager._numInits == 0: # if db or collection(s) don't exist, add dummy data to them to create it allCollections = [self.usersColl, self.contactsColl] for collObj in allCollections: self._createCollDNE(collObj, printCreation=printCollectionCreation) DatabaseManager._numInits += 1
python
# https://leetcode.com/problems/keyboard-row import re class Solution(object): def findWords(self, words): """ :type words: List[str] :rtype: List[str] """ match_r1 = lambda w: bool(re.match("^[qwertyuiop]*$", w.lower())) match_r2 = lambda w: bool(re.match("^[asdfghjkl]*$", w.lower())) match_r3 = lambda w: bool(re.match("^[zxcvbnm]*$", w.lower())) res = [w for w in words if match_r1(w) or match_r2(w) or match_r3(w)] return res
python
from flask import Flask from flask_sqlalchemy import SQLAlchemy from flask_migrate import Migrate app = Flask(__name__) app.config.from_object("app.config.Config") db = SQLAlchemy(app) #database object migrate = Migrate(app,db) #object responsible for tracking changes in DB from app import routes, models
python
#! /usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014 Junko Tsuji # This script generates bed files as final outputs of # DNA methylation pipeline by taking a CX_report file # generated from bismark_methylation_extractor. from optparse import OptionParser import sys, os.path, string # RGB strings and integer values rgbDict = { (0 , 5) : "0,255,0", # 65280, (6 , 15): "55,255,0", # 3669760, (16, 25): "105,255,0", # 6946560, (26, 35): "155,255,0", # 10223360, (36, 45): "205,255,0", # 13500160, (46, 55): "255,255,0", # 16776960 (56, 65): "255,205,0", # 16764160, (66, 75): "255,155,0", # 16751360, (76, 85): "255,105,0", # 16738560, (86, 95): "255,55,0", # 16725760, (96,100): "255,0,0" # 16711680 } def write(w, line, name): chrom = line[0] chromStart = line[1] chromEnd = str( int(line[1])+1 ) strand = line[2] meth = float(line[3]) nometh = int(line[4]) readCount = int(meth) + nometh itemRgb = rgbDict[(0,5)] if (meth + nometh) > 0: percentMeth = int(round(meth/(meth+nometh)*100)) for L in rgbDict: if L[0] <= percentMeth and percentMeth <= L[1]: itemRgb = rgbDict[L] break else: percentMeth = 0 w.write("\t".join([ chrom, chromStart, chromEnd, name, str(min(readCount, 1000)), strand, chromStart, chromEnd, itemRgb, str(readCount), str(percentMeth) ])+"\n") def outAll(files, f, name): for line in open(f): line = line.rstrip("\n").split("\t") write(files[line[5]], line, name) def outExact(files, f, name): for line in open(f): line = line.rstrip("\n").split("\t") if line[6][1] == "N": continue elif line[5] != "CG": if line[6][2] == "N": continue write(files[line[5]], line, name) def cxrepoBed(opts, args): f = os.path.basename(args[0]) files = {'CG' :open(opts.output+"/CG_" +f, "w"), 'CHG':open(opts.output+"/CHG_"+f, "w"), 'CHH':open(opts.output+"/CHH_"+f, "w")} if opts.ns == True: outAll(files, args[0], opts.name) else: outExact(files, args[0], opts.name) for of in files: files[of].close() if __name__ == "__main__": prog = os.path.basename(sys.argv[0]) fPrefix = os.path.basename(sys.argv[-1]).split(".CX_report")[0] usage = "%prog [option] cxReport" description = "Convert a 'CX_report' file to bed files" op = OptionParser(usage=usage, description=description) op.add_option("-n","--output-Ns", dest="ns", action="store_true", default=False, help="Output cytosine contexts in which include 'N' (default=%default)") op.add_option("-N","--bedmethyl-name", dest="name", type="string", action="store", default=fPrefix, help="Insert name into the 3rd column of the bedMethyl format (default=[cxReport filename])", metavar="NAME") op.add_option("-o","--output-place", dest="output", type="string", action="store", default="./", help="Place for output files (default='./')", metavar="PATH") (opts, args) = op.parse_args() try: cxrepoBed(opts, args) except KeyboardInterrupt: pass except Exception, e: sys.exit(prog + ": error: " + str(e))
python
# -*- coding: utf-8 -*- import random from source.tree.heap import MinHeap, MaxHeap # region INSERTION SORT # 直接插入排序 def straight_insertion_sort(a, reverse=False): """ 直接插入排序 基本操作:将一个记录插入到已排好序的有序表中,从而得到一个新的,记录数增加1的有序表。 :param a: :param reverse: :return: """ # 从无序表的第二条记录开始 for i in range(1, len(a)): j = i # 将索引为i的元素插入到前面的有序表的适当位置 while j > 0 and (a[j] > a[j - 1] if reverse else a[j] < a[j - 1]): # 通过依次交换的方式移动记录 a[j - 1], a[j] = a[j], a[j - 1] # 索引指针前移 j -= 1 # 折半插入排序 def binary_insertion_sort(a, reverse=False): """ 折半插入排序 基本操作:在直接插入排序的基础上改进,基本操作与直接插入排序一致,使用"折半查找"来确定插入位置 :param a: :param reverse: :return: """ # 从无序表的第二条记录开始 for i in range(1, len(a)): # 折半查找插入位置 low = 0 high = i - 1 while low <= high: m = (low + high) // 2 # 折半 if a[i] >= a[m] if reverse else a[i] <= a[m]: if a[i] == a[m]: high = m break high = m - 1 else: low = m + 1 v = a[i] # 记录依次后移,空出目标位置 for j in range(i - 1, high, -1): a[j + 1], a[j] = a[j], a[j + 1] # 目标值插入目标位置 a[high + 1] = v # 2路插入排序 def two_way_insertion_sort(a, reverse=False): """ 2路插入排序 基本操作:在“折半插入排序算法”的基础上进行改进,目的是减少排序过程中记录移动的次数。 使用一个长度为n的循环表作为辅助空间,将原表中的记录依次的插入循环表。并使用指针first和final指示有序序列的最小和最大值。 对于记录i: 1. 如果小于first:直接插入到有序序列之前; 2. 如果大于final:直接追加到有序序列之后; 3. 如果介于两者之间:查找位置并插入(可用高效查找算法提高效率)。 :param a: :param reverse: :return: """ n = len(a) deque = [0] * n deque[0] = a[0] first, final = 0, 0 for i, v in enumerate(a): if i > 0: if v < deque[first]: # 情况1:插入到有序序列之前 first = (first - 1 + n) % n deque[first] = v elif v > deque[final]: # 情况2:追加到有序序列之后 final = (final + 1 + n) % n deque[final] = v else: # 情况3:查找并插入 # 为了简化代码,此处使用了直接插入;可以使用高效查找算法提高效率,如折半查找 i = first while deque[i] < v: deque[(i - 1 + n) % n] = deque[i] i = (i + 1 + n) % n deque[i - 1] = v first = (first - 1 + n) % n # 将循环表中的记录还原到原表 for i in range(n): if reverse: a[n - 1 - i] = deque[(first + i + n) % n] else: a[i] = deque[(first + i + n) % n] # 链表结点 class SLNode(object): """ 链表结点,用于表插入排序 """ def __init__(self, rc, _next): self.rc = rc # 记录值 self.next = _next # 链接指针 # 重排链表 def _arrange(sl): """ 重排链表 表插入排序的辅助方法 顺序遍历有序链表,将链表中的第i个结点移动至数组的第i个分量中。 链表方便移动,但是无法实现高效查找;所以对链表进行重排,最终变成有序数据,进而支持高效查找。 :param sl: :return: """ p = sl[0].next # 当前操作的结点 for i in range(1, len(sl) - 1): q = sl[p].next # 当前结点的下一个结点 ''' 使用p的next来暂存交换后的下一个结点的实际位置 此处比较绕,当前结点p在与数组索引为i的结点k进行对调之后,k失去了原来的位置。 链表中k之前的结点k'的next依然是指向i,但这时候k'通过next已经无法找到k了,因为i位置现在存储的是结点p。 同时p因为已经被放置在了有序数组中的恰当位置,其next字段已经无用,所以我们使用p的next字段来指示当前结点k的实际位置。 当k'通过next访问下一个结点时,实际访问到的是结点p,我们判断结点p已经处在数组中的有序区域,因此继续访问next, 直到next不在有序数组范围,即为原链表实际的next位置。 ''' # 暂存k的位置 sl[p].next = p # 结点交换 sl[i], sl[p] = sl[p], sl[i] # 循环获取next的实际位置 while q <= i: q = sl[q].next # 指针后移 p = q # 表插入排序 def list_insertion_sort(a, reverse=False): """ 表插入排序 插入排序的改进。普通插入排序使用数组来存储数据,无法避免移动记录。表插入排序改用链表来存储数据,完全避免的记录移动。 :param a: :param reverse: :return: """ # 使用数组+链表结点来模拟链表 sl = [SLNode(None, None) for _ in range(len(a) + 1)] # 首结点作为哨兵 sl[0].rc = float('-inf' if reverse else 'inf') sl[0].next = 0 # 遍历原始数组,依次将记录插入链表 for i, v in enumerate(a): i += 1 curr = 0 # 遍历链表,找到合适的插入位置 while v < sl[sl[curr].next].rc if reverse else v > sl[sl[curr].next].rc: curr = sl[curr].next # 向已排序链表中插入新记录 sl[i].rc = v sl[i].next = sl[curr].next sl[curr].next = i c = sl[0].next # 遍历已排序链表,反写灰原始数组 for i in range(len(a)): a[i] = sl[c].rc c = sl[c].next def _shell_insert(a, dk, reverse=False): """ 希尔插入 希尔排序的辅助方法,完成一趟排序操作 对每个分组内的记录,分别进行直接插入排序 :param a: :param dk: :param reverse: :return: """ for i in range(dk, len(a)): j = i - dk while j >= 0 and (a[i] > a[j] if reverse else a[i] < a[j]): a[i], a[j] = a[j], a[i] i, j = j, j - dk # 希尔排序 def shell_sort(a, dlta, reverse=False): """ 希尔排序(Shell Sort) 又称“缩小增量排序(Diminishing Increment Sort)” 直接插入排序在待排序列基本有序或记录较少时,效率非常高。希尔排序就是借助这个特点, 将待排序列分割成若干个子序列分别进行插入排序,待整个序列基本有序时,再对全体记录做一次直接插入排序。 通过多次由大到小的分割,逐趟进行希尔插入操作,直到间隔为1,也就是全体排序。 分割的方式为:增量为k,间隔为k的记录为一组 :param a: :param dlta: 增加序列 :param reverse: :return: """ # 根据增量序列逐趟进行希尔插入操作 for dk in dlta: _shell_insert(a, dk, reverse) # endregion # region QUICK SORT # 冒泡排序 def bubble_sort(a, reverse=False): """ 冒泡排序 一趟的操作: 升序为例,按顺序比较相邻的两条记录,如果两者不是升序排列,则交换两记录。 一趟结束时,待排序列中最大的记录会沉底,下一趟则从待排序列中排除掉该记录。 直到待排序列只剩一条记录时,排序完成。 :param a: :param reverse: :return: """ n = len(a) for i in range(n - 1): for j in range(0, n - i - 1): if (a[j] < a[j + 1]) if reverse else (a[j] > a[j + 1]): a[j], a[j + 1] = a[j + 1], a[j] # 分割待排记录 def _partition(a, p, r, reverse=False): """ 分割待排记录 将索引从p到r的记录,已a[r]为支点分割为独立的两部分,其中一部分的记录均小于另一部分。 :param a: :param p: :param r: :param reverse: :return: """ i = p - 1 # 两部分支点,第一部分的最后一条记录 # 遍历待排记录,并将记录移动到所属的部分 for j in range(p, r): compare = (a[j] > a[r]) if reverse else (a[j] < a[r]) if compare: # 需要移动 i += 1 # 支点后移 a[i], a[j] = a[j], a[i] # 移动记录 a[i + 1], a[r] = a[r], a[i + 1] # 将最后一条记录(索引为r)与第二部分的第一个元素交换,r正式称为支点 return i + 1 # 返回支点索引 # 随机分割 def _randomized_partition(a, p, r, reverse=False): """ 随机分割 普通分割策略使用待排记录中的最后一条记录作为支点,如果分割后的两部分严重不平衡,会造成算法性能下降。 所以该方法使用随机选取支点记录的策略,使两部分平衡的概率稳定,进而使整个排序算法的性能稳定。 :param a: :param p: :param r: :param reverse: :return: """ k = random.randint(p, r) # 随机选取支点记录 a[k], a[r] = a[r], a[k] return _partition(a, p, r, reverse) # 快速排序 def _quick_sort(a, p, r, reverse=False, randomized_partition=False): """ 快速排序 计算支点索引,并递归排序前后两部分 :param a: :param p: :param r: :param reverse: :param randomized_partition: :return: """ if p < r: # 待排记录中有多于一条记录 # 获取支点索引 q = _randomized_partition(a, p, r, reverse) if randomized_partition else _partition(a, p, r, reverse) # 递归排序前后两部分 _quick_sort(a, p, q - 1, reverse) _quick_sort(a, q + 1, r, reverse) # 快速排序封装方法 def quick_sort(a, reverse=False, randomized_partition=False): """ 快速排序封装方法 :param a: :param reverse: :param randomized_partition: :return: """ _quick_sort(a, 0, len(a) - 1, reverse, randomized_partition) # endregion # region SELECTION SORT # 简单选择排序 def simple_selection_sort(a, reverse=False): """ 简单选择排序 实现最简单的选择排序 基本操作:每次选出待排序类中最小(或最大)的记录,移动至有序序列中;循环多趟直至最后一条记录。 :param a: :param reverse: :return: """ n = len(a) for i in range(n - 1): k = i for j in range(i + 1, n): if (a[k] < a[j]) if reverse else (a[k] > a[j]): k = j if k != i: a[k], a[i] = a[i], a[k] # 树形选择排序 def tree_selection_sort(a): """ 树形选择排序 因为过程类似锦标赛制,又称锦标赛排序(Tournament Sort)。 是对“简单选择排序”的改进。简单选择排序主要操作是记录的对比,如果能减少对比,则可以提高效率 树形选择排序基本操作:对n个记录进行两两对比,然后其中1/2较小的记录再进行两两对比,如此反复,直至选出最小记录。 过程可用一棵完全二叉树表示。n条记录依次放入叶子结点,按如上规则生成二叉树。 获取第二小的记录:已知树根结点记录为最小记录,找到最小记录在叶节点中的位置,将其置为无穷大; 然后沿该结点到根结点的路径重新生成二叉树,则生成完毕之后,树根为第二小的记录;后续以此类推。 该排序方法有辅助空间较多和最大值进行多余比较等缺点,另一种选择排序:堆排序弥补了这些缺点。 所以树形选择排序只是一种过度,实现比较复杂,应用很少。 :param a: :return: """ # 树结点 class Node(object): def __init__(self, key, parent=None, left=None, right=None): self.key = key self.parent = parent self.left = left self.right = right # 哨兵值 sentry = float('inf') # 根据待排序列生成二叉树 def generate_tree(): l1, l2 = [], [] # 生成叶子结点 for key in a: l1.append(Node(key)) # 向上逐层生成,直至只剩一个结点,即为根结点 while len(l1) > 1: # 如果结点数为奇数,追加一个凑为偶数个,方便二叉树生成 if len(l1) & 1: l1.append(Node(sentry)) # 步进为2,两两对比选出父结点 for i in range(0, len(l1), 2): node = Node(key=l1[i].key if (l1[i].key < l1[i + 1].key) else l1[i + 1].key, left=l1[i], right=l1[i + 1]) l1[i].parent = node l1[i + 1].parent = node l2.append(node) l1, l2 = l2, [] # 最后一个结点即为根结点 return l1.pop() # 迭代获取最小值 def get_extremum_from_tree(): node = tree_root while node.key != sentry: # 使用生成器函数来进行迭代 # 返回最小值后,将最小结点记录变为无穷大,重新生成二叉树 yield node.key # 寻找最小记录所在的叶子结点 while node and node.left: node = node.left if node.key == node.left.key else node.right # 最小记录标记为无穷大 node.key = sentry while node and node.parent: node = node.parent node.key = node.left.key if node.left.key < node.right.key else node.right.key return # 生成二叉树 tree_root = generate_tree() # 迭代生成有序序列,并返回 return [v for v in get_extremum_from_tree()] # 堆排序 def heap_sort(a, reverse=False): """ 堆排序 借助最大堆(或最小堆)的性质实现排序,是对“树形选择排序”的改进 基本操作:从堆的根结点获取最小记录,放入有序序列,剩余的序列重新建堆。以此类推,直至所有记录有序。 :param a: :param reverse: :return: """ # 使用待排序列初始化最大堆(最小堆) heap = (MinHeap if reverse else MaxHeap)(a) for i in range(heap.heap_size - 1, 0, -1): # 最小记录移动至序列尾部形成有序序列 heap.A[0], heap.A[i] = heap.A[i], heap.A[0] # 堆元素减少 heap.heap_size -= 1 # 重新建堆 heap.heapify(0) # endregion # region MERGE SORT # 合并有序序列 def _merge(a, p, q, r, reverse=False): """ 合并有序序列 将两个有序序列合并为一个新的有序序列 “归并排序”辅助方法 依次对比前后两部分的记录,按顺序合并进原序列 :param a: :param p: :param q: :param r: :param reverse: :return: """ # 哨兵 sentry = float('-inf') if reverse else float('inf') # 获取前半部分有序序列 l_a = [v for i, v in enumerate(a) if p <= i <= q] # 追加哨兵 l_a.append(sentry) # 获取后半部分有序序列 r_a = [v for i, v in enumerate(a) if q < i <= r] # 追加哨兵 r_a.append(sentry) # 遍历l_a和l_b,对比两个序列中的记录,按顺序回写如原序列 i, j, k = 0, 0, p while l_a[i] is not sentry or r_a[j] is not sentry: if l_a[i] > r_a[j] if reverse else l_a[i] < r_a[j]: a[k] = l_a[i] i += 1 else: a[k] = r_a[j] j += 1 k += 1 # 归并排序 def _merge_sort(a, p, r, reverse=False): """ 归并排序 主要思想:将两个或两个以上的有序序列组合成一个新的有序序列 该方法实现的是2路归并排序。主要操作是,递归的将待排序类均分为两部分, 直到细分到每部分只有一条记录(一条记录天然有序),然后逐层合并,最终得到有序序列。 :param a: :param p: :param r: :param reverse: :return: """ if p < r: # 均分待排序列 q = (r + p) // 2 # 递归排序前后两部分 _merge_sort(a, p, q, reverse=reverse) _merge_sort(a, q + 1, r, reverse=reverse) # 前后两部分分别有序后,合并之 _merge(a, p, q, r, reverse=reverse) # 归并排序封装方法 def merge_sort(a, reverse=False): """ 归并排序封装方法 :param a: :param reverse: :return: """ _merge_sort(a, 0, len(a) - 1, reverse) # endregion
python
# Generated by Django 2.2.13 on 2021-02-25 16:22 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("data_refinery_common", "0063_auto_20210212_1855"), ] operations = [ migrations.AlterModelOptions(name="dataset", options={"base_manager_name": "objects"},), ]
python
from django.conf.urls import url from views import FakeImageView from . import views urlpatterns = [ url(r'^', FakeImageView.as_view(),name='index.html'), #url(r'^tweet_json/', FakeJSONView.as_view(),name='tweet_json.html') ]
python
#!/usr/bin/env python import logging import unittest from decimal import Decimal import pandas as pd from hummingbot.connector.exchange.paper_trade.paper_trade_exchange import QuantizationParams from hummingbot.core.clock import ( Clock, ClockMode ) from hummingbot.core.data_type.common import TradeType from hummingbot.core.data_type.order_book_row import OrderBookRow from hummingbot.core.event.event_logger import EventLogger from hummingbot.core.event.events import ( MarketEvent, OrderBookTradeEvent, ) from hummingbot.strategy.market_trading_pair_tuple import MarketTradingPairTuple from hummingbot.strategy.pure_market_making.pure_market_making import PureMarketMakingStrategy from test.mock.mock_paper_exchange import MockPaperExchange logging.basicConfig(level=logging.ERROR) class PMMRefreshToleranceUnitTest(unittest.TestCase): start: pd.Timestamp = pd.Timestamp("2019-01-01", tz="UTC") end: pd.Timestamp = pd.Timestamp("2019-01-01 01:00:00", tz="UTC") start_timestamp: float = start.timestamp() end_timestamp: float = end.timestamp() trading_pair = "HBOT-ETH" base_asset = trading_pair.split("-")[0] quote_asset = trading_pair.split("-")[1] def simulate_maker_market_trade(self, is_buy: bool, quantity: Decimal, price: Decimal): order_book = self.market.get_order_book(self.trading_pair) trade_event = OrderBookTradeEvent( self.trading_pair, self.clock.current_timestamp, TradeType.BUY if is_buy else TradeType.SELL, price, quantity ) order_book.apply_trade(trade_event) def setUp(self): self.clock_tick_size = 1 self.clock: Clock = Clock(ClockMode.BACKTEST, self.clock_tick_size, self.start_timestamp, self.end_timestamp) self.market: MockPaperExchange = MockPaperExchange() self.mid_price = 100 self.bid_spread = 0.01 self.ask_spread = 0.01 self.order_refresh_time = 30 self.market.set_balanced_order_book(trading_pair=self.trading_pair, mid_price=self.mid_price, min_price=1, max_price=200, price_step_size=1, volume_step_size=10) self.market.set_balance("HBOT", 500) self.market.set_balance("ETH", 5000) self.market.set_quantization_param( QuantizationParams( self.trading_pair, 6, 6, 6, 6 ) ) self.market_info = MarketTradingPairTuple(self.market, self.trading_pair, self.base_asset, self.quote_asset) self.clock.add_iterator(self.market) self.maker_order_fill_logger: EventLogger = EventLogger() self.cancel_order_logger: EventLogger = EventLogger() self.market.add_listener(MarketEvent.OrderFilled, self.maker_order_fill_logger) self.market.add_listener(MarketEvent.OrderCancelled, self.cancel_order_logger) self.one_level_strategy: PureMarketMakingStrategy = PureMarketMakingStrategy() self.one_level_strategy.init_params( self.market_info, bid_spread=Decimal("0.01"), ask_spread=Decimal("0.01"), order_amount=Decimal("1"), order_refresh_time=4, filled_order_delay=8, hanging_orders_enabled=True, hanging_orders_cancel_pct=0.05, order_refresh_tolerance_pct=0 ) self.multi_levels_strategy: PureMarketMakingStrategy = PureMarketMakingStrategy() self.multi_levels_strategy.init_params( self.market_info, bid_spread=Decimal("0.01"), ask_spread=Decimal("0.01"), order_amount=Decimal("1"), order_levels=5, order_level_spread=Decimal("0.01"), order_refresh_time=4, filled_order_delay=8, order_refresh_tolerance_pct=0 ) self.hanging_order_multiple_strategy = PureMarketMakingStrategy() self.hanging_order_multiple_strategy.init_params( self.market_info, bid_spread=Decimal("0.01"), ask_spread=Decimal("0.01"), order_amount=Decimal("1"), order_levels=5, order_level_spread=Decimal("0.01"), order_refresh_time=4, filled_order_delay=8, order_refresh_tolerance_pct=0, hanging_orders_enabled=True ) def test_active_orders_are_cancelled_when_mid_price_moves(self): strategy = self.one_level_strategy self.clock.add_iterator(strategy) self.clock.backtest_til(self.start_timestamp + self.clock_tick_size) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) old_bid = strategy.active_buys[0] old_ask = strategy.active_sells[0] # Not the order refresh time yet, orders should remain the same self.clock.backtest_til(self.start_timestamp + 3 * self.clock_tick_size) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) self.assertEqual(old_bid.client_order_id, strategy.active_buys[0].client_order_id) self.assertEqual(old_ask.client_order_id, strategy.active_sells[0].client_order_id) self.market.order_books[self.trading_pair].apply_diffs([OrderBookRow(99.5, 30, 2)], [OrderBookRow(100.1, 30, 2)], 2) self.clock.backtest_til(self.start_timestamp + 6 * self.clock_tick_size) new_bid = strategy.active_buys[0] new_ask = strategy.active_sells[0] self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) self.assertNotEqual(old_ask, new_ask) self.assertNotEqual(old_bid, new_bid) def test_active_orders_are_kept_when_within_tolerance(self): strategy = self.one_level_strategy self.clock.add_iterator(strategy) self.clock.backtest_til(self.start_timestamp + self.clock_tick_size) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) old_bid = strategy.active_buys[0] old_ask = strategy.active_sells[0] self.clock.backtest_til(self.start_timestamp + 6 * self.clock_tick_size) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) new_bid = strategy.active_buys[0] new_ask = strategy.active_sells[0] self.assertEqual(old_ask, new_ask) self.assertEqual(old_bid, new_bid) self.clock.backtest_til(self.start_timestamp + 10 * self.clock_tick_size) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(1, len(strategy.active_sells)) new_bid = strategy.active_buys[0] new_ask = strategy.active_sells[0] self.assertEqual(old_ask, new_ask) self.assertEqual(old_bid, new_bid) def test_multi_levels_active_orders_are_cancelled_when_mid_price_moves(self): strategy = self.multi_levels_strategy self.clock.add_iterator(strategy) self.clock.backtest_til(self.start_timestamp + self.clock_tick_size) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) old_buys = strategy.active_buys old_sells = strategy.active_sells self.market.order_books[self.trading_pair].apply_diffs([OrderBookRow(99.5, 30, 2)], [OrderBookRow(100.1, 30, 2)], 2) self.clock.backtest_til(self.start_timestamp + 6 * self.clock_tick_size) new_buys = strategy.active_buys new_sells = strategy.active_sells self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) self.assertNotEqual([o.client_order_id for o in old_sells], [o.client_order_id for o in new_sells]) self.assertNotEqual([o.client_order_id for o in old_buys], [o.client_order_id for o in new_buys]) def test_multiple_active_orders_are_kept_when_within_tolerance(self): strategy = self.multi_levels_strategy self.clock.add_iterator(strategy) self.clock.backtest_til(self.start_timestamp + self.clock_tick_size) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) old_buys = strategy.active_buys old_sells = strategy.active_sells self.clock.backtest_til(self.start_timestamp + 6 * self.clock_tick_size) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) new_buys = strategy.active_buys new_sells = strategy.active_sells self.assertEqual([o.client_order_id for o in old_sells], [o.client_order_id for o in new_sells]) self.assertEqual([o.client_order_id for o in old_buys], [o.client_order_id for o in new_buys]) self.clock.backtest_til(self.start_timestamp + 10 * self.clock_tick_size) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) new_buys = strategy.active_buys new_sells = strategy.active_sells self.assertEqual([o.client_order_id for o in old_sells], [o.client_order_id for o in new_sells]) self.assertEqual([o.client_order_id for o in old_buys], [o.client_order_id for o in new_buys]) def test_hanging_orders_multiple_orders_with_refresh_tolerance(self): strategy = self.hanging_order_multiple_strategy self.clock.add_iterator(strategy) self.clock.backtest_til(self.start_timestamp + self.clock_tick_size) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) self.simulate_maker_market_trade(True, Decimal("100"), Decimal("101.1")) # Before refresh_time hanging orders are not yet created self.clock.backtest_til(self.start_timestamp + strategy.order_refresh_time / 2) self.assertEqual(1, len(self.maker_order_fill_logger.event_log)) self.assertEqual(5, len(strategy.active_buys)) self.assertEqual(4, len(strategy.active_sells)) self.assertEqual(0, len(strategy.hanging_order_ids)) # At order_refresh_time (4 seconds), hanging order are created # Ask is filled and due to delay is not replenished immediately # Bid orders are now hanging and active self.clock.backtest_til(self.start_timestamp + strategy.order_refresh_time + 1) self.assertEqual(1, len(strategy.active_buys)) self.assertEqual(0, len(strategy.active_sells)) self.assertEqual(1, len(strategy.hanging_order_ids)) # At filled_order_delay (8 seconds), new sets of bid and ask orders are created self.clock.backtest_til(self.start_timestamp + strategy.order_refresh_time + strategy.filled_order_delay + 1) self.assertEqual(6, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) self.assertEqual(1, len(strategy.hanging_order_ids)) # Check all hanging order ids are indeed in active bids list self.assertTrue(all(h in [order.client_order_id for order in strategy.active_buys] for h in strategy.hanging_order_ids)) old_buys = [o for o in strategy.active_buys if o.client_order_id not in strategy.hanging_order_ids] old_sells = [o for o in strategy.active_sells if o.client_order_id not in strategy.hanging_order_ids] self.clock.backtest_til(self.start_timestamp + strategy.order_refresh_time + strategy.filled_order_delay + 1) self.assertEqual(6, len(strategy.active_buys)) self.assertEqual(5, len(strategy.active_sells)) new_buys = [o for o in strategy.active_buys if o.client_order_id not in strategy.hanging_order_ids] new_sells = [o for o in strategy.active_sells if o.client_order_id not in strategy.hanging_order_ids] self.assertEqual([o.client_order_id for o in old_sells], [o.client_order_id for o in new_sells]) self.assertEqual([o.client_order_id for o in old_buys], [o.client_order_id for o in new_buys])
python
class ClonedRepoExistedError(Exception): """Base class for other exceptions""" pass class BranchUpToDateException(Exception): pass class DefaultCommitToolException(Exception): pass class PotentialInfiniteLoopException(Exception): pass
python
import os from pathlib import Path output = Path(os.path.abspath(__file__)).parent.parent / "output"
python
# Copyright 2017 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cstar.output """Pretty-print the progress of a cstar job""" def print_progress(original_topology, progress, down, printer=cstar.output.print_topology): def get_status(host): if host in progress.done: if host in down: return "-" return '+' if host in progress.running: if host in down: return "/" return '*' if host in progress.failed: if host in down: return "X" return '!' if host in down: return ":" return '.' def get_ordered_status(host): if host in progress.done: return 10 if host in progress.running: return 100 if host in progress.failed: return 50 return 1000 lines = [" + Done, up * Executing, up ! Failed, up . Waiting, up", " - Done, down / Executing, down X Failed, down : Waiting, down"] clusters = sorted(original_topology.get_clusters()) for cluster in clusters: if len(clusters): lines.append("Cluster: " + cluster) cluster_topology = original_topology.with_cluster(cluster) dcs = sorted(cluster_topology.get_dcs()) for cluster, dc in dcs: if len(dcs): lines.append("DC: " + dc) dc_topology = cluster_topology.with_dc(cluster, dc) hosts = sorted(dc_topology, key=lambda x: (get_ordered_status(x), x.rack, x.ip)) status = "".join([get_status(host) for host in hosts]) if len(status) >= 6: splitStatus = list(chunks(status, 3)) status = splitStatus[0] + "\n" + splitStatus[1] + "\n" + splitStatus[2] lines.append(status) lines.append("%d done, %d failed, %d executing" % (len(progress.done), len(progress.failed), len(progress.running))) printer("\n".join(lines)) def chunks(l, n): """Yield n number of sequential chunks from l.""" d, r = divmod(len(l), n) for i in range(n): si = (d+1)*(i if i < r else r) + d*(0 if i < r else i - r) yield l[si:si+(d+1 if i < r else d)]
python
### list all installed fonts plus it's variations for fontName in installedFonts(): variations = listFontVariations(fontName) if variations: print(fontName) for axis_name, dimensions in variations.items(): print (axis_name, dimensions) print ()
python
""" Module for abstract serializer/unserializer base classes. """ from StringIO import StringIO from django.core.serializers.base import SerializationError, DeserializationError from django.core.exceptions import ObjectDoesNotExist class Serializer(object): """ Abstract serializer base class. """ # Indicates if the implemented serializer is only available for # internal Django use. internal_use_only = False def serialize(self, queryset, **options): """ Serialize a queryset. """ self.options = options self.stream = options.pop("stream", StringIO()) self.use_natural_keys = options.pop("use_natural_keys", True) self.start_serialization() for obj in queryset: self.start_object(obj) self.end_object(obj) self.end_serialization() return self.getvalue() def start_serialization(self): """ Called when serializing of the queryset starts. """ raise NotImplementedError def end_serialization(self): """ Called when serializing of the queryset ends. """ pass def start_object(self, obj): """ Called when serializing of an object starts. """ raise NotImplementedError def end_object(self, obj): """ Called when serializing of an object ends. """ pass def getvalue(self): """ Return the fully serialized queryset (or None if the output stream is not seekable). """ if callable(getattr(self.stream, 'getvalue', None)): return self.stream.getvalue() class Deserializer(object): """ Abstract base deserializer class. """ def __init__(self, stream_or_string, **options): """ Init this serializer given a stream or a string """ self.options = options if isinstance(stream_or_string, basestring): self.stream = StringIO(stream_or_string) else: self.stream = stream_or_string # hack to make sure that the models have all been loaded before # deserialization starts (otherwise subclass calls to get_model() # and friends might fail...) #models.get_apps() def __iter__(self): return self def next(self): """Iteration iterface -- return the next item in the stream""" raise NotImplementedError class UnSet: pass class DeserializedObject(object): """ A deserialized document. Basically a container for holding the pre-saved deserialized data. Call ``save()`` to save the object """ def __init__(self, obj, natural_key=UnSet): self.object = obj self.natural_key = natural_key def __repr__(self): return "<DeserializedObject: %s.%s(%s)>" % ( self.object._meta.app_label, self.object._meta.object_name, self.natural_key) def save(self, enforce_natural_key=True): # Call save on the Model baseclass directly. This bypasses any # model-defined save. The save is also forced to be raw. # This ensures that the data that is deserialized is literally # what came from the file, not post-processed by pre_save/save # methods. #if an object with the natural key already exists, replace it while preserving the data store id if enforce_natural_key and self.natural_key != UnSet: manager = type(self.object).objects previous_objects = manager.filter_by_natural_key(self.natural_key) if previous_objects.count() > 1: for obj in list(previous_objects)[1:]: obj.delete() #!!!!!! TODO emit a warning or something... if previous_objects: #Does this work? previous_obj = previous_objects[0] pk_field = previous_obj._meta.get_id_field_name() pk_value = previous_obj._primitive_data[pk_field] print 'Replacing id: %s\t natural key: %s' % (pk_value, self.natural_key) self.object._primitive_data[pk_field] = pk_value self.object.save() return self.object
python
import unittest from validator import format class TestFormat(unittest.TestCase): def test_invalid_yaml(self): invalid_yaml = """ key: value - item#1 - item#2 """ (parsed, err) = format.validate(invalid_yaml) self.assertIsNone(parsed) self.assertEqual(True, "did not find expected key" in err) def test_valid_yaml(self): valid_yaml = """ key: &my_list - 1 - '2' obj: lst: *my_list """ (parsed, err) = format.validate(valid_yaml) self.assertEqual(parsed, {'key': [1, '2'], 'obj': {'lst': [1, '2']}}) self.assertIsNone(err) def test_duplicated_yaml(self): yml = """ key: value key: value """ (parsed, err) = format.validate(yml) self.assertIsNone(parsed) self.assertEqual(True, 'found duplicate key "key" with value "value" (original value: "value")' in err)
python
# coding: utf-8 # # For Loops (2) - Looping through the items in a sequence # In the last lesson we introduced the concept of a For loop and learnt how we can use them to repeat a section of code. We learnt how to write a For loop that repeats a piece of code a specific number of times using the <code>range()</code> function, and saw that we have to create a variable to keep track of our position in the loop (conventionally called <code>i</code>). We also found out how to implement if-else statements within our loop to change which code is run inside the loop. # # As well as writing a loop which runs a specific number of times, we can also create a loop which acts upon each item in a sequence. In this lesson we'll learn how to implement this functionality and find out how to use this knowledge to help us make charts with Plotly. # # ## Looping through each item in a sequence # # Being able to access each item in turn in a sequence is a really useful ability and one which we'll use often in this course. The syntax is very similar to that which we use to loop through the numbers in a range: # ```` python # for <variable name> in <sequence>: # <code to run> # ```` # # The difference here is that the variable which keeps track of our position in the loop does not increment by 1 each time the loop is run. Instead, the variable takes the value of each item in the sequence in turn: # In[1]: list1 = ['a', 'b', 'c', 'd', 'e'] for item in list1: print(item) # It's not important what we call this variable: # In[2]: for banana in list1: print(banana) # But it's probably a good idea to call the variable something meaningful: # In[3]: data = [20, 50, 10, 67] for d in data: print(d) # ## Using these loops # # We can use these loops in conjunction with other concepts we have already learnt. For example, imagine that you had a list of proportions stored as decimals, but that you needed to create a new list to store them as whole numbers. # # We can use <code>list.append()</code> with a for loop to create this new list. First, we have to create an empty list to which we'll append the percentages: # In[4]: proportions = [0.3, 0.45, 0.99, 0.23, 0.46] percentages = [] # Next, we'll loop through each item in proportions, multiply it by 100 and append it to percentages: # In[5]: for prop in proportions: percentages.append(prop * 100) print(percentages) # ## Using for loops with dictionaries # # We've seen how to loop through each item in a list. We will also make great use of the ability to loop through the keys and values in a dictionary. # # If you remember from the dictionaries lessons, we can get the keys and values in a dictionary by using <code>dict.items()</code>. We can use this in conjunction with a for loop to manipulate each item in a dictionary. This is something which we'll use often; we'll often have data for several years stored in a dictionary; looping through these items will let us plot the data really easily. # # In the cell below, I've created a simple data structure which we'll access using a for loop. Imagine that this data contains sales figures for the 4 quarters in a year: # In[6]: data = {2009 : [10,20,30,40], 2010 : [15,30,45,60], 2011 : [7,14,21,28], 2012 : [5,10,15,20]} # We can loop through the keys by using <code>dict.keys()</code>: # In[7]: for k in data.keys(): print(k) # And we can loop through the values (which are lists): # In[8]: for v in data.values(): print(v) # We can loop through them both together: # In[9]: for k, v in data.items(): print(k, v) # Having the data available to compare each year is really handy, but it might also be helpful to store them as one long list so we can plot the data and see trends over time. # # First, we'll make a new list to store all of the data items: # In[10]: allYears = [] # And then we'll loop through the dictionary and concatenate each year's data to the <code>allYears</code> list: # In[11]: for v in data.values(): allYears = allYears + v print(allYears) # ### What have we learnt this lesson? # In this lesson we've seen how to access each item in a sequence. We've learnt that the variable that keeps track of our position in the loop stores each value in the sequence in turn. We've seen how to apply this knowledge to loop through a dictionary of data and concatenate data for several years into one long list. # If you have any questions, please ask in the comments section or email <a href="mailto:[email protected]">[email protected]</a>
python
from django.test import TestCase from django.utils import timezone from datetime import timedelta from decimal import Decimal from blackbook.models import Currency, CurrencyConversion class CurrencyTest(TestCase): def testString(self): currency = Currency.objects.create(name="Test Currency", code="TEST") self.assertEqual(str(currency), "TEST") def testCreationWithLowerCaseString(self): currency = Currency.objects.create(name="Test Currency", code="test") self.assertEqual(str(currency), "TEST") class CurrencyConversionTest(TestCase): @classmethod def setUpTestData(cls): cls.EUR, created = Currency.objects.get_or_create(code="EUR") cls.CHF, created = Currency.objects.get_or_create(code="CHF") cls.USD, created = Currency.objects.get_or_create(code="USD") cls.timestamp = timezone.now() cls.EUR_TO_CHF = CurrencyConversion.objects.create(base_currency=cls.EUR, target_currency=cls.CHF, multiplier=2, timestamp=cls.timestamp) def testString(self): self.assertEqual(str(self.EUR_TO_CHF), "1 EUR = 2 CHF ({timestamp})".format(timestamp=self.timestamp.strftime("%d %b %Y %H:%m"))) def testConversionWithString(self): eur_to_chf = CurrencyConversion.convert(base_currency="EUR", target_currency="CHF", amount=1) chf_to_eur = CurrencyConversion.convert(base_currency="CHF", target_currency="EUR", amount=1) self.assertEqual(eur_to_chf, 2) self.assertEqual(chf_to_eur, 0.5) def testConversionWithLowercaseString(self): eur_to_chf = CurrencyConversion.convert(base_currency="eur", target_currency="chf", amount=1) chf_to_eur = CurrencyConversion.convert(base_currency="chf", target_currency="eur", amount=1) self.assertEqual(eur_to_chf, 2) self.assertEqual(chf_to_eur, 0.5) def testConversionWithSameString(self): eur_to_eur = CurrencyConversion.convert(base_currency="EUR", target_currency="EUR", amount=1) self.assertEqual(eur_to_eur, 1) def testConversionWithStringUnknownCurrency(self): usd_to_eur = CurrencyConversion.convert(base_currency="USD", target_currency="EUR", amount=1) eur_to_usd = CurrencyConversion.convert(base_currency="EUR", target_currency="USD", amount=1) self.assertEqual(usd_to_eur, 1) self.assertEqual(eur_to_usd, 1) def testConversionWithObjects(self): eur_to_chf = CurrencyConversion.convert(base_currency=self.EUR, target_currency=self.CHF, amount=1) chf_to_eur = CurrencyConversion.convert(base_currency=self.CHF, target_currency=self.EUR, amount=1) self.assertEqual(eur_to_chf, 2) self.assertEqual(chf_to_eur, 0.5) def testConversionFromConversionObjectWithString(self): eur_to_chf = self.EUR_TO_CHF.convert_to(target_currency="CHF", amount=1) self.assertEqual(eur_to_chf, 2) def testConversionFromConversionObjectWithObject(self): eur_to_chf = self.EUR_TO_CHF.convert_to(target_currency=self.CHF, amount=1) self.assertEqual(eur_to_chf, 2) def testConversionWithNewerReverseConversionTimestamp(self): date_yesterday = timezone.now() - timedelta(days=1) CurrencyConversion.objects.create(base_currency=self.EUR, target_currency=self.USD, multiplier=2, timestamp=date_yesterday) CurrencyConversion.objects.create(base_currency=self.USD, target_currency=self.EUR, multiplier=3) eur_to_usd = CurrencyConversion.convert(base_currency="EUR", target_currency="USD", amount=1) self.assertAlmostEqual(eur_to_usd, Decimal(0.33), places=2)
python
import sys, string import re from ucscGb.gbData.ordereddict import OrderedDict import collections class RaStanza(OrderedDict): ''' Holds an individual entry in the RaFile. ''' @property def name(self): return self._name def __init__(self, key=None, value=None): OrderedDict.__init__(self) if key != None and value != None: self[key] = value self._name = value self._nametype = key else: self._name = '' self._nametype = '' def checkIndent(self, stanza): indent = -1 #print stanza for line in stanza: i = 0 while True: #print line[i] + ', ' + str(i) + ', ' + str(indent) if line[i] != ' ': break i = i + 1 if indent == -1: indent = i / 3 elif indent != i / 3: raise KeyError('inconsistent indentation') if indent == -1: raise KeyError('blank stanza') #print 'indent = ' + str(indent) return indent def readStanza(self, stanza, key=None, scopes=None): ''' Populates this entry from a single stanza. Override this to create custom behavior in derived classes ''' #print stanza for line in stanza: self.readLine(line) if scopes != None: i = self.checkIndent(stanza) if len(scopes) == i: # if we havent hit a scope this low yet, append it scopes.append(self) else: # otherwise we should just set it scopes[i] = self if i > 0: self.parent = scopes[i - 1] for pk in self.parent.keys(): if pk not in self.keys(): self[pk] = self.parent[pk] return self.readName(stanza, key) def readName(self, stanza, key=None): ''' Extracts the Stanza's name from the value of the first line of the stanza. ''' if key == None: line = stanza[0].strip() else: line = None for s in stanza: if s.split(' ', 1)[0] == key: line = s break if line == None: return None if len(line.split(' ', 1)) != 2: raise ValueError() names = map(str.strip, line.split(' ', 1)) self._nametype = names[0] self._name = names[1] #print names return names def readLine(self, line): ''' Reads a single line from the stanza, extracting the key-value pair ''' if line.startswith('#') or line == '': OrderedDict.append(self, line) else: raKey = line.split(' ', 1)[0].strip() raVal = '' if (len(line.split(' ', 1)) == 2): raVal = line.split(' ', 1)[1].strip() #if raKey in self: #raise KeyError(raKey + ' already exists') self[raKey] = raVal def difference(self, other): ''' Complement function to summaryDiff. Takes in self and a comparison Stanza. Returns new stanza with terms from 'self' that are different from 'other' Like the summaryDiff, to get the other terms, this needs to be run again with self and other switched. ''' retRa = RaStanza() retRa._name = self.name for key in other.keys(): try: if other[key] != self[key] and not key.startswith('#'): retRa[key] = self[key] except KeyError: continue #maybe add empty keys return retRa def iterkeys(self): for item in self._OrderedDict__ordering: if not (item.startswith('#') or item == ''): yield item def itervalues(self): for item in self._OrderedDict__ordering: if not (item.startswith('#') or item == ''): yield self[item] def iteritems(self): for item in self._OrderedDict__ordering: if not (item.startswith('#') or item == ''): yield item, self[item] def iter(self): iterkeys(self) def __str__(self): str = '' for key in self: if key.startswith('#'): str += key + '\n' else: str += key + ' ' + self[key] + '\n' return str
python
import json from parserutils.strings import camel_to_snake from ..query.fields import RENDERER_ALIASES from ..query.fields import DictField, ObjectField from .geometry import Extent def to_words(cls_instance_or_name, as_string=True): if isinstance(cls_instance_or_name, type): class_name = cls_instance_or_name.__name__ elif not isinstance(cls_instance_or_name, str): class_name = type(cls_instance_or_name).__name__ else: class_name = cls_instance_or_name class_words = camel_to_snake(class_name).split("_") return " ".join(class_words) if as_string else class_words def to_object(json_or_dict, aliases=None, from_camel=True, defaults=None): """ Transforms JSON data into an object """ if isinstance(json_or_dict, str): json_or_dict = json.loads(json_or_dict) elif hasattr(json_or_dict, "get_data"): json_or_dict = json_or_dict.get_data() if aliases or defaults: json_or_dict = DictField( aliases=aliases, convert_camel=from_camel, defaults=defaults ).to_python(json_or_dict, None) return ObjectField(convert_camel=from_camel).to_python(json_or_dict, None) def to_renderer(json_or_dict, from_camel=True): """ Shortcut to build a renderer object from a dict or JSON :param json_or_dict: the value to convert into a renderer :param from_camel: implies conversion back to ESRI values if False """ aliases = dict(RENDERER_ALIASES) defaults = ["symbol", "field", "field1", "field2", "field3", "label"] if from_camel: defaults.append("default_symbol") else: defaults.append("defaultSymbol") aliases = {v: k for k, v in aliases.items()} renderer = to_object(json_or_dict, aliases, from_camel, defaults) if getattr(renderer, "min", None) is None: setattr(renderer, "min", getattr(renderer, "min_val", None)) if from_camel: if renderer.symbol: renderer.symbol = to_symbol(renderer.symbol, from_camel) if renderer.default_symbol: renderer.default_symbol = to_symbol(renderer.default_symbol, from_camel) return renderer def to_symbol(json_or_dict, from_camel=True): """ Shortcut to build a symbol object from a dict or JSON :param json_or_dict: the value to convert into a renderer :param from_camel: implies conversion back to ESRI values if False """ aliases = { "imageData": "image", "xoffset": "offset_x", "yoffset": "offset_y" } defaults = [ "type", "style", "color", "width", "height" ] if from_camel: defaults.extend(("offset_x", "offset_y")) else: defaults.extend(("xoffset", "yoffset")) aliases = {v: k for k, v in aliases.items()} if not is_symbol(json_or_dict): symbol = None else: symbol = to_object(json_or_dict, from_camel=from_camel, aliases=aliases, defaults=defaults) symbol.outline = to_symbol(getattr(symbol, "outline", None), from_camel) return symbol def is_symbol(json_or_dict, key=None): if json_or_dict is None: return False if hasattr(json_or_dict, "get_data"): json_or_dict = json_or_dict.get_data() elif isinstance(json_or_dict, str): json_or_dict = json.loads(json_or_dict) if key is not None: return is_symbol(json_or_dict.get(key)) else: return bool(json_or_dict and json_or_dict.get("type")) def extent_to_polygon_wkt(extent_or_dict, **kwargs): """ Generates a quadrilateral POLYGON(...) from extent data """ if extent_or_dict is None: raise ValueError("Extent or dict is required") if isinstance(extent_or_dict, dict): extent = extent_or_dict elif isinstance(extent_or_dict, Extent): extent = extent_or_dict.as_dict() else: extent = Extent(extent_or_dict).as_dict() return "POLYGON(({xmin} {ymin}, {xmax} {ymin}, {xmax} {ymax}, {xmin} {ymax}, {xmin} {ymin}))".format(**extent) def point_to_wkt(x, y, **kwargs): return f"POINT({x} {y})" def multipoint_to_wkt(points, **kwargs): """ Generates MULTIPOINT(x1 y1, x2 y2, ...) from an array of point values """ point_str = _points_to_str(points) return f"MULTIPOINT({point_str})" def polyline_to_wkt(paths, **kwargs): """ Generates MULTILINESTRING((x1 y1, x2 y2), ...) from an array of path values """ multi_point_str = _multi_points_to_str(paths) return f"MULTILINESTRING({multi_point_str})" def polygon_to_wkt(rings, **kwargs): """ Generates POLYGON((x1 y1, x2 y2), ...) from an array of ring values """ multi_point_str = _multi_points_to_str(rings) return f"POLYGON({multi_point_str})" def _points_to_str(points): if points is None: raise ValueError("A points array is required") return ", ".join(("{0} {1}".format(*p[0:2]) for p in points)) def _multi_points_to_str(multi_points): if multi_points is None: raise ValueError("An array of points arrays is required") return ", ".join("({})".format(_points_to_str(points)) for points in multi_points)
python
import sys from typing import Optional, TextIO from antlr4 import InputStream from src.messages.message_lexer import message_lexer class Lexer(message_lexer): def __init__(self, key: str, inp: InputStream, output: TextIO = sys.stdout): super().__init__(inp, output) self._recent = None self.message_key = key def append_text(self, text: Optional[str] = None): """ Append a character to the token's text. :param text: If not None, appends this to the text. Otherwise, grabs the most recently lexed character from the input. """ if text is None: text = self._input.strdata[self._input.index - 1] else: self._recent = text if self._text is None: self._text = "" self._text += text
python
start_node = '0' end_node = '8' def a_star(start_node, end_node): open_set = set(start_node) closed_set = set() g = {} # Store distance from start node. parents = {} # Parents contain an adjacent map of all nodes. # Distance of start node from itself is zero g[start_node] = 0 parents[start_node] = start_node while len(open_set) > 0 : n = None # Node with the lowest f() is found. for v in open_set: if n == None or g[v] + heuristic(v) < g[n] + heuristic(n): n = v if n == end_node or Graph_node[n] == None : pass else: for (m, weight) in get_neighbours(n): if m not in open_set and m not in closed_set: open_set.add(m) parents[m] = n g[m] = g[n] + weight else: if g[m] > g[n] + weight: g[m] = g[n] + weight parents[m] = n if m in closed_set: closed_set.remove(m) open_set.add(m) if n == None: print("Path doesn't exist") return None if n == end_node: path = [] print("Parents",parents) while parents[n] != n: path.append(n) n = parents[n] path.append(start_node) path.reverse() print(f"Path found {path}") return path open_set.remove(n) closed_set.add(n) print("Path doesn't exist!") return None def get_neighbours(v): if v in Graph_node: return Graph_node [v] else: return None def heuristic(n): h_dist = { 'A':11, 'B':2, 'C':99, 'D':1, 'E':7, 'G':0 } return h_dist[n] Graph_node = { 'A':[('B',2),('E',3)], 'B':[('C',1),('G',3)], 'C':None, 'E':[('D',6)], 'D':[('G',1)], } a_star('A','G')
python
import functools import numpy as np import jax import jax.numpy as jnp from jax import jit, vmap from jax.ops import index, index_update def get_ray_bundle(height, width, focal_length, tfrom_cam2world): ii, jj = jnp.meshgrid( jnp.arange(width, dtype=jnp.float32,), jnp.arange(height, dtype=jnp.float32,), indexing="xy", ) directions = jnp.stack( [ (ii - width * 0.5) / focal_length, -(jj - height * 0.5) / focal_length, -jnp.ones_like(ii), ], axis=-1, ) ray_directions = jnp.sum( directions[..., None, :] * tfrom_cam2world[:3, :3], axis=-1 ) ray_origins = jnp.broadcast_to(tfrom_cam2world[:3, -1], ray_directions.shape) return ray_origins, ray_directions # @functools.partial(jit, static_argnums=(1, 2, 3)) def map_batched(tensor, f, chunksize, use_vmap): if tensor.shape[0] < chunksize: return f(tensor) else: tensor_diff = -tensor.shape[0] % chunksize initial_len = tensor.shape[0] tensor_len = tensor.shape[0] + tensor_diff tensor = jnp.pad(tensor, ((0, tensor_diff), (0, 0)), "constant") tensor = tensor.reshape(tensor_len // chunksize, chunksize, *tensor.shape[1:]) if use_vmap: out = vmap(f)(tensor) # this unfortunately keeps each batch in memory... else: out = jax.lax.map(f, tensor) out = out.reshape(-1, *out.shape[2:])[:initial_len] return out # @functools.partial(jit, static_argnums=(1, 2, 3)) def map_batched_rng(tensor, f, chunksize, use_vmap, rng): if tensor.shape[0] < chunksize: key, subkey = jax.random.split(rng) return f((tensor, subkey)), key else: tensor_diff = -tensor.shape[0] % chunksize initial_len = tensor.shape[0] tensor_len = tensor.shape[0] + tensor_diff tensor = jnp.pad(tensor, ((0, tensor_diff), (0, 0)), "constant") tensor = tensor.reshape(tensor_len // chunksize, chunksize, *tensor.shape[1:]) key, *subkey = jax.random.split(rng, tensor_len // chunksize + 1) subkey = jnp.stack(subkey) if use_vmap: out = vmap(f)((tensor, subkey)) # kinda gross imo else: out = jax.lax.map(f, (tensor, subkey)) out = out.reshape(-1, *out.shape[2:])[:initial_len] return out, key
python
# coding: utf-8 """ Author: Weichen Shen,[email protected] Reference: [1] Feng Y, Lv F, Shen W, et al. Deep Session Interest Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1905.06482, 2019.(https://arxiv.org/abs/1905.06482) """ from collections import OrderedDict from tensorflow.python.keras.initializers import RandomNormal from tensorflow.python.keras.layers import (Concatenate, Dense, Embedding, Flatten, Input) from tensorflow.python.keras.models import Model from tensorflow.python.keras.regularizers import l2 from ..input_embedding import (create_singlefeat_inputdict, get_embedding_vec_list, get_inputs_list) from ..layers.core import DNN, PredictionLayer from ..layers.sequence import (AttentionSequencePoolingLayer, BiasEncoding, BiLSTM, Transformer) from ..layers.utils import NoMask, concat_fun from ..utils import check_feature_config_dict def DSIN(feature_dim_dict, sess_feature_list, embedding_size=8, sess_max_count=5, sess_len_max=10, bias_encoding=False, att_embedding_size=1, att_head_num=8, dnn_hidden_units=(200, 80), dnn_activation='sigmoid', dnn_dropout=0, dnn_use_bn=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, init_std=0.0001, seed=1024, task='binary', ): """Instantiates the Deep Session Interest Network architecture. :param feature_dim_dict: dict,to indicate sparse field (**now only support sparse feature**)like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':[]} :param sess_feature_list: list,to indicate session feature sparse field (**now only support sparse feature**),must be a subset of ``feature_dim_dict["sparse"]`` :param embedding_size: positive integer,sparse feature embedding_size. :param sess_max_count: positive int, to indicate the max number of sessions :param sess_len_max: positive int, to indicate the max length of each session :param bias_encoding: bool. Whether use bias encoding or postional encoding :param att_embedding_size: positive int, the embedding size of each attention head :param att_head_num: positive int, the number of attention head :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net :param dnn_activation: Activation function to use in deep net :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate. :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net :param l2_reg_dnn: float. L2 regularizer strength applied to DNN :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector :param init_std: float,to use as the initialize std of embedding vector :param seed: integer ,to use as random seed. :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss :return: A Keras model instance. """ check_feature_config_dict(feature_dim_dict) if (att_embedding_size * att_head_num != len(sess_feature_list) * embedding_size): raise ValueError( "len(session_feature_lsit) * embedding_size must equal to att_embedding_size * att_head_num ,got %d * %d != %d *%d" % ( len(sess_feature_list), embedding_size, att_embedding_size, att_head_num)) sparse_input, dense_input, user_behavior_input_dict, _, user_sess_length = get_input( feature_dim_dict, sess_feature_list, sess_max_count, sess_len_max) sparse_embedding_dict = {feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg_embedding), name='sparse_emb_' + str(i) + '-' + feat.name, mask_zero=(feat.name in sess_feature_list)) for i, feat in enumerate(feature_dim_dict["sparse"])} query_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], sess_feature_list, sess_feature_list) query_emb = concat_fun(query_emb_list) deep_input_emb_list = get_embedding_vec_list(sparse_embedding_dict, sparse_input, feature_dim_dict["sparse"], mask_feat_list=sess_feature_list) deep_input_emb = concat_fun(deep_input_emb_list) deep_input_emb = Flatten()(NoMask()(deep_input_emb)) tr_input = sess_interest_division(sparse_embedding_dict, user_behavior_input_dict, feature_dim_dict['sparse'], sess_feature_list, sess_max_count, bias_encoding=bias_encoding) Self_Attention = Transformer(att_embedding_size, att_head_num, dropout_rate=0, use_layer_norm=False, use_positional_encoding=(not bias_encoding), seed=seed, supports_masking=True, blinding=True) sess_fea = sess_interest_extractor( tr_input, sess_max_count, Self_Attention) interest_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True, supports_masking=False)( [query_emb, sess_fea, user_sess_length]) lstm_outputs = BiLSTM(len(sess_feature_list) * embedding_size, layers=2, res_layers=0, dropout_rate=0.2, )(sess_fea) lstm_attention_layer = AttentionSequencePoolingLayer(att_hidden_units=(64, 16), weight_normalization=True)( [query_emb, lstm_outputs, user_sess_length]) deep_input_emb = Concatenate()( [deep_input_emb, Flatten()(interest_attention_layer), Flatten()(lstm_attention_layer)]) if len(dense_input) > 0: deep_input_emb = Concatenate()( [deep_input_emb] + list(dense_input.values())) output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn, dnn_dropout, dnn_use_bn, seed)(deep_input_emb) output = Dense(1, use_bias=False, activation=None)(output) output = PredictionLayer(task)(output) sess_input_list = [] # sess_input_length_list = [] for i in range(sess_max_count): sess_name = "sess_" + str(i) sess_input_list.extend(get_inputs_list( [user_behavior_input_dict[sess_name]])) # sess_input_length_list.append(user_behavior_length_dict[sess_name]) model_input_list = get_inputs_list([sparse_input, dense_input]) + sess_input_list + [ user_sess_length] model = Model(inputs=model_input_list, outputs=output) return model def get_input(feature_dim_dict, seq_feature_list, sess_max_count, seq_max_len): sparse_input, dense_input = create_singlefeat_inputdict(feature_dim_dict) user_behavior_input = {} for idx in range(sess_max_count): sess_input = OrderedDict() for i, feat in enumerate(seq_feature_list): sess_input[feat] = Input( shape=(seq_max_len,), name='seq_' + str(idx) + str(i) + '-' + feat) user_behavior_input["sess_" + str(idx)] = sess_input user_behavior_length = {"sess_" + str(idx): Input(shape=(1,), name='seq_length' + str(idx)) for idx in range(sess_max_count)} user_sess_length = Input(shape=(1,), name='sess_length') return sparse_input, dense_input, user_behavior_input, user_behavior_length, user_sess_length def sess_interest_division(sparse_embedding_dict, user_behavior_input_dict, sparse_fg_list, sess_feture_list, sess_max_count, bias_encoding=True): tr_input = [] for i in range(sess_max_count): sess_name = "sess_" + str(i) keys_emb_list = get_embedding_vec_list(sparse_embedding_dict, user_behavior_input_dict[sess_name], sparse_fg_list, sess_feture_list, sess_feture_list) # [sparse_embedding_dict[feat](user_behavior_input_dict[sess_name][feat]) for feat in # sess_feture_list] keys_emb = concat_fun(keys_emb_list) tr_input.append(keys_emb) if bias_encoding: tr_input = BiasEncoding(sess_max_count)(tr_input) return tr_input def sess_interest_extractor(tr_input, sess_max_count, TR): tr_out = [] for i in range(sess_max_count): tr_out.append(TR( [tr_input[i], tr_input[i]])) sess_fea = concat_fun(tr_out, axis=1) return sess_fea
python
import pandas as pd from ....Trade.Strategy.Cta.DyST_TraceFocus import * from ....Trade.Strategy.DyStockCtaBase import * from ....Trade.DyStockStrategyBase import * class DyStockDataFocusAnalysisUtility(object): """ 热点分析工具类 这个类有点特别,会借助DyST_FocusTrace类 """ class DummyCtaEngine: def __init__(self, eventEngine): self.errorInfo = DyErrorInfo(eventEngine) self.errorDataEngine = DyStockDataEngine(eventEngine, self.errorInfo, registerEvent=False) self.dataEngine = self.errorDataEngine self.dummyInfo = DyDummyInfo() self.dummyDataEngine = DyStockDataEngine(eventEngine, self.dummyInfo, registerEvent=False) def loadPreparedData(self, *args, **kwargs): return None def tDaysOffsetInDb(self, base, n=0): return self.dataEngine.daysEngine.tDaysOffsetInDb(base, n) def loadOnClose(self, *args, **kwargs): return None def putStockMarketMonitorUiEvent(self, *args, **kwargs): pass def __getattr__(self, name): return None def _convert2Tick(day, code, name, df): """ @df: 含有'preClose'列 """ tick = DyStockCtaTickData() try: s = df.ix[day] pos = df.index.get_loc(day) if pos == 0: return None except Exception: return None tick.code = code tick.name = name tick.date = day tick.time = '15:00:00' tick.datetime = datetime.strptime(day + ' 15:00:00', '%Y-%m-%d %H:%M:%S') tick.preClose = df.ix[pos - 1, 'close'] tick.price = s['close'] tick.open = s['open'] tick.high = s['high'] tick.low = s['low'] tick.volume = s['volume'] tick.amount = s['amt'] return tick def _convert2Ticks(day, dfs, codeTable): ticks = {} for code, df in dfs.items(): tick = DyStockDataFocusAnalysisUtility._convert2Tick(day, code, codeTable[code], df) if tick is None: continue ticks[code] = tick return ticks def _createFocusStrengthDf(dayIndex, focusInfoPool): data = {} for focus, focusInfo in focusInfoPool.items(): data[focus] = [focusInfo.strength] df = pd.DataFrame(data, index=[dayIndex]) return df def _initTraceFocusObj(traceFocusObj, date, info, codes, conceptsDict, dummyDaysEngine): """ Initialize prepared data """ # init traceFocusObj._curInit(date) # we only update UI for first time if traceFocusObj._preparedData: info = DyDummyInfo() # only classify codes not in 'oldStocks' dict codes = set(codes) - set(traceFocusObj._preparedData.get('oldStocks', [])) preparedData = DyST_TraceFocus.classifyCodes(date, codes, info, dummyDaysEngine, conceptsDict) # update prepared data of DyST_TraceFocus object traceFocusObj._preparedData.setdefault('oldStocks', {}).update(preparedData['oldStocks']) traceFocusObj._preparedData['newStocks'] = preparedData['newStocks'] def _changeTraceFocusObj(traceFocusObj): """ replace dragons in focus info pool by [[code, name]] """ for _, focusInfo in traceFocusObj._focusInfoPool.items(): focusInfo.dragons = [[code, traceFocusObj._focusCodePool[code].name] for code in focusInfo.dragons] def _incrementAnalysis(dummyTraceFocusObj, day, info, codes, dfs, codeTable, conceptsDict, dummyDaysEngine): """ 增量分析每日热点,这样只需要增量归类归类股票 """ # initialize incremently DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj, day, info, codes, conceptsDict, dummyDaysEngine) # push ticks ticks = DyStockDataFocusAnalysisUtility._convert2Ticks(day, dfs, codeTable) if ticks: dummyTraceFocusObj.onTicks(ticks) DyStockDataFocusAnalysisUtility._changeTraceFocusObj(dummyTraceFocusObj) return dummyTraceFocusObj._focusInfoPool def analysis(dfs, indexDfIndex, codeTable, eventEngine, info): """ @dfs: {code: df}, 不含指数 @indexDfIndex: 对应的指数DF的index @return: foucs strength DF, dict of focus info pool """ dummyCtaEngine = DyStockDataFocusAnalysisUtility.DummyCtaEngine(eventEngine) dummyTraceFocusObj = DyST_TraceFocus(dummyCtaEngine, dummyCtaEngine.errorInfo, DyStockStrategyState(DyStockStrategyState.backTesting)) # create a dummy instance of DyST_TraceFoucs # classify first time assert indexDfIndex.size > 1 codes = list(dfs) conceptsDict = DyST_TraceFocus.getConceptsFromFile() DyStockDataFocusAnalysisUtility._initTraceFocusObj(dummyTraceFocusObj, indexDfIndex[0].strftime("%Y-%m-%d"), info, codes, conceptsDict, dummyCtaEngine.dummyDataEngine.daysEngine) # focus analysis info.print('开始热点分析...', DyLogData.ind) progress = DyProgress(info) progress.init(indexDfIndex.size) focusInfoPoolDict = {} # {day: focus info pool} focusStrengthDfList = [] # [focus DF of one day] for dayIndex in indexDfIndex: day = dayIndex.strftime("%Y-%m-%d") # analysis incremently focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj, day, info, codes, dfs, codeTable, conceptsDict, dummyCtaEngine.dummyDataEngine.daysEngine) focusInfoPoolDict[day] = focusInfoPool focusStrengthDfList.append(DyStockDataFocusAnalysisUtility._createFocusStrengthDf(dayIndex, focusInfoPool)) progress.update() # concatenate into DF and 按热点出现次数排序(列排序) focusStrengthDf = pd.concat(focusStrengthDfList) columns = list(focusStrengthDf.columns) columns = sorted(columns, key=lambda x: focusStrengthDf[x].notnull().sum(), reverse=True) focusStrengthDf = focusStrengthDf.reindex(columns=columns) info.print('热点分析完成', DyLogData.ind) return focusStrengthDf, focusInfoPoolDict def _analysisProcess(outQueue, days, dayIndexes, info, dummyTraceFocusObj, dfs, codeTable, conceptsDict, dummyDaysEngine): """ 以子进程方式分析每日热点 """ codes = list(dfs) for day, dayIndex in zip(days, dayIndexes): # analysis incremently focusInfoPool = DyStockDataFocusAnalysisUtility._incrementAnalysis(dummyTraceFocusObj, day, info, codes, dfs, codeTable, conceptsDict, dummyDaysEngine) outQueue.put([day, dayIndex, focusInfoPool])
python
from pathlib import Path from six.moves.urllib.parse import urlunparse from six.moves.urllib.error import HTTPError from six.moves.urllib.request import urlretrieve import logging logger = logging.getLogger(__name__) class HiRISE_URL(object): """Manage HiRISE URLs. Provide a storage path as calculated from above objects and put together the full URL to the HiRISE product. Parameters ---------- product_path : str or pathlib.Path Storage path to the product """ initurl = ('https://hirise-pds.lpl.arizona.edu/PDS/RDR/' 'ESP/ORB_011400_011499/ESP_011491_0985/ESP_' '011491_0985_RED.LBL') scheme = 'https' netloc = 'hirise-pds.lpl.arizona.edu' pdspath = Path('/PDS') def __init__(self, product_path, params=None, query=None, fragment=None): self.product_path = product_path self.params = params self.query = query self.fragment = fragment @property def path(self): path = self.pdspath / self.product_path return str(path) @property def url(self): return urlunparse([self.scheme, self.netloc, self.path, self.params, self.query, self.fragment]) class OBSERVATION_ID(object): """Manage HiRISE observation ids. For example PSP_003092_0985. `phase` is set to PSP for orbits < 11000, no setting required. Parameters ---------- obsid : str, optional One can optionally also create an 'empty' OBSERVATION_ID object and set the properties accordingly to create a new obsid. """ def __init__(self, obsid=None): if obsid is not None: phase, orbit, targetcode = obsid.split('_') self._orbit = int(orbit) self._targetcode = targetcode else: self._orbit = None self._targetcode = None @property def orbit(self): return str(self._orbit).zfill(6) @orbit.setter def orbit(self, value): if value > 999999: raise ValueError("Orbit cannot be larger than 999999") self._orbit = value @property def targetcode(self): return self._targetcode @targetcode.setter def targetcode(self, value): if len(str(value)) != 4: raise ValueError('Targetcode must be exactly 4 characters.') self._targetcode = value @property def phase(self): return 'PSP' if int(self.orbit) < 11000 else 'ESP' def __str__(self): return '{}_{}_{}'.format(self.phase, self.orbit, self.targetcode) def __repr__(self): return self.__str__() @property def s(self): return self.__str__() def get_upper_orbit_folder(self): ''' get the upper folder name where the given orbit folder is residing on the hisync server ''' lower = int(self.orbit) // 100 * 100 return "_".join(["ORB", str(lower).zfill(6), str(lower + 99).zfill(6)]) @property def storage_path_stem(self): s = "{phase}/{orbitfolder}/{obsid}".format(phase=self.phase, orbitfolder=self.get_upper_orbit_folder(), obsid=self.s) return s class PRODUCT_ID(object): """Manage storage paths for HiRISE RDR products (also EXTRAS.) Attributes `jp2_path` and `label_path` get you the official RDR product, with `kind` steering if you get the COLOR or the RED product. All other properties go to the RDR/EXTRAS folder. Parameters ---------- initstr : str, optional Note ---- The "PDS" part of the path is handled in the HiRISE_URL class. """ kinds = ['RED', 'BG', 'IR', 'COLOR', 'IRB', 'MIRB', 'MRGB', 'RGB'] @classmethod def from_path(cls, path): path = Path(path) return cls(path.stem) def __init__(self, initstr=None): if initstr is not None: tokens = initstr.split('_') self._obsid = OBSERVATION_ID('_'.join(tokens[:3])) try: self.kind = tokens[3] except IndexError: self._kind = None else: self._kind = None @property def obsid(self): return self._obsid @obsid.setter def obsid(self, value): self._obsid = OBSERVATION_ID(value) @property def kind(self): return self._kind @kind.setter def kind(self, value): if value not in self.kinds: raise ValueError("kind must be in {}".format(self.kinds)) self._kind = value def __str__(self): return "{}_{}".format(self.obsid, self.kind) def __repr__(self): return self.__str__() @property def s(self): return self.__str__() @property def storage_stem(self): return '{}/{}'.format(self.obsid.storage_path_stem, self.s) @property def label_fname(self): return '{}.LBL'.format(self.s) @property def label_path(self): return 'RDR/' + self.storage_stem + '.LBL' def _make_url(self, obj): path = getattr(self, f"{obj}_path") return HiRISE_URL(path).url def __getattr__(self, item): tokens = item.split('_') try: if tokens[-1] == 'url': return self._make_url('_'.join(tokens[:-1])) except IndexError: raise ValueError(f"No attribute named '{item}' found.") # TODO: implement general self.obj_url for all paths. @property def jp2_fname(self): return self.s + '.JP2' @property def jp2_path(self): prefix = 'RDR/' postfix = '' if self.kind not in ['RED', 'COLOR']: prefix += 'EXTRAS/' if self.kind in ['IRB']: postfix = '.NOMAP' return prefix + self.storage_stem + postfix + ".JP2" @property def nomap_jp2_path(self): if self.kind in ['RED', 'IRB', 'RGB']: return 'RDR/EXTRAS/' + self.storage_stem + '.NOMAP.JP2' else: raise AttributeError("No NOMAP exists for {}.".format(self.kind)) @property def quicklook_path(self): if self.kind in ['COLOR', 'RED']: return Path('EXTRAS/RDR/') / (self.storage_stem + ".QLOOK.JP2") else: raise AttributeError("No quicklook exists for {} products.".format(self.kind)) @property def abrowse_path(self): if self.kind in ['COLOR', 'MIRB', 'MRGB', 'RED']: return Path('EXTRAS/RDR/') / (self.storage_stem + '.abrowse.jpg') else: raise AttributeError("No abrowse exists for {}".format(self.kind)) @property def browse_path(self): inset = '' if self.kind in ['IRB', 'RGB']: inset = '.NOMAP' if self.kind not in ['COLOR', 'MIRB', 'MRGB', 'RED', 'IRB', 'RGB']: raise AttributeError("No browse exists for {}".format(self.kind)) else: return Path('EXTRAS/RDR/') / (self.storage_stem + inset + '.browse.jpg') @property def thumbnail_path(self): if self.kind in ['BG', 'IR']: raise AttributeError("No thumbnail exists for {}".format(self.kind)) inset = '' if self.kind in ['IRB', 'RGB']: inset = '.NOMAP' return Path('EXTRAS/RDR/') / (self.storage_stem + inset + '.thumb.jpg') @property def nomap_thumbnail_path(self): if self.kind in ['RED', 'IRB', 'RGB']: return Path('EXTRAS/RDR') / (self.storage_stem + '.NOMAP.thumb.jpg') else: raise AttributeError("No NOMAP thumbnail exists for {}".format(self.kind)) @property def nomap_browse_path(self): if self.kind in ['RED', 'IRB', 'RGB']: return Path('EXTRAS/RDR') / (self.storage_stem + '.NOMAP.browse.jpg') @property def edr_storage_stem(self): return 'EDR/' + self.storage_stem class SOURCE_PRODUCT_ID(object): """Manage SOURCE_PRODUCT_ID. Example ------- 'PSP_003092_0985_RED4_0' """ red_ccds = ['RED' + str(i) for i in range(10)] ir_ccds = ['IR10', 'IR11'] bg_ccds = ['BG12', 'BG13'] ccds = red_ccds + ir_ccds + bg_ccds def __init__(self, spid=None, saveroot=None): if spid is not None: tokens = spid.split('_') obsid = '_'.join(tokens[:3]) ccd = tokens[3] color, ccdno = self._parse_ccd(ccd) self.pid = PRODUCT_ID('_'.join([obsid, color])) self.ccd = ccd self.channel = tokens[4] self.saveroot = saveroot else: self.pid = None self._channel = None self._ccd = None def __getattr__(self, value): return getattr(self.pid, value) def _parse_ccd(self, value): sep = 2 if value[:2] in PRODUCT_ID.kinds else 3 return value[:sep], value[sep:] @property def channel(self): return self._channel @channel.setter def channel(self, value): if int(value) not in [0, 1]: raise ValueError("channel must be in [0, 1]") self._channel = value @property def ccd(self): return self._ccd @ccd.setter def ccd(self, value): if value not in self.ccds: raise ValueError("CCD value must be in {}.".format(self.ccds)) self._ccd = value if self.pid is not None: self.pid.color = self.color @property def color(self): return self._parse_ccd(self.ccd)[0] @property def ccdno(self): offset = len(self.color) return self.ccd[offset:] def __str__(self): return "{}: {}{}_{}".format(self.__class__.__name__, self.pid, self.ccdno, self.channel) def __repr__(self): return self.__str__() @property def s(self): return "{}{}_{}".format(self.pid, self.ccdno, self.channel) @property def fname(self): return self.s + '.IMG' @property def local_cube(self): return self.local_path.with_suffix('.cub') @property def fpath(self): return Path(self.pid.edr_storage_stem).parent / self.fname @property def furl(self): hiurl = HiRISE_URL(self.fpath) return hiurl.url @property def stitched_cube_name(self): return f"{self.pid.obsid.s}_{self.ccd}.cub" @property def local_path(self): savepath = self.saveroot / str(self.obsid) / self.fname return savepath def download(self, overwrite=False): savepath = self.local_path if savepath.exists() and not overwrite: logger.warning("File exists and I'm not allowed to overwrite:" " %s", savepath) return savepath.parent.mkdir(parents=True, exist_ok=True) logger.info(f"Downloading\n{self.furl}\nto\n{savepath}") try: urlretrieve(self.furl, str(savepath)) except HTTPError as e: logger.error(e.__str__()) class RED_PRODUCT_ID(SOURCE_PRODUCT_ID): def __init__(self, obsid, ccdno, channel, **kwargs): self.ccds = self.red_ccds super().__init__('{}_RED{}_{}'.format(obsid, ccdno, channel), **kwargs) class IR_PRODUCT_ID(SOURCE_PRODUCT_ID): def __init__(self, obsid, ccdno, channel): self.ccds = self.ir_ccds super().__init__('{}_IR{}_{}'.format(obsid, ccdno, channel))
python
#!/usr/bin/env python class Cart: def __init__(self): self.id = None self.capacity = None self.payload = None self.last_known_location = None
python
from selenium import webdriver from selenium.webdriver.common.keys import Keys browser = webdriver.Chrome() browser.get('https://www.dan.me.uk/bgplookup') count=6535#接着上一次继续跑的初值 while count<=64511: browser.find_element_by_name('asn').clear() browser.find_element_by_name('asn').send_keys(count) browser.find_element_by_name('asn').send_keys(Keys.ENTER) count+=1 tb=browser.find_element_by_xpath('//*[@id="content-left-in"]/div/table') print(tb.text) #运行时重定向输出到文件python<dan.py>file.txt
python
# hw06_03 import random def makesentence(): subjects = ['Dog', 'Cat', 'Monkey', 'Pig', 'Fox'] verbs = ['walks', 'runs', 'jumps'] advs = ['slowly', 'quickly'] print('%s %s %s.' % (random.choice(subjects), random.choice(verbs), random.choice(advs))) for i in range(5): makesentence() ''' Cat walks quickly. Fox jumps slowly. Monkey jumps slowly. Pig jumps slowly. Monkey walks quickly. '''
python
#!/usr/bin/python3 # -*- coding: utf-8 -*- from PyQt5.QtCore import QThread, pyqtSignal from handlers import APIHandler, LoadMusicHandler from config import config from vkapi import VKLightError from utils import save_json, stat class LoadMusic(QThread): music = pyqtSignal(list) error = pyqtSignal(str) count_tracks = pyqtSignal(int) warning_message_count_audios = pyqtSignal(str) loaded = pyqtSignal(bool) def __init__(self, api, user_id): super().__init__() self.api = api self.user_id = user_id self.api_handler = APIHandler(self.api) self.music_handler = LoadMusicHandler(api_handler=self.api_handler) def run(self): try: self.loaded.emit(False) count_audios = self.api_handler.get_count_audio(user_id=self.user_id) self.count_tracks.emit(count_audios) data = self.music_handler.load_all_music(user_id=self.user_id, count=count_audios) self.music.emit(data) self.loaded.emit(True) stat.set_user_id(user_id=self.user_id) stat.send() except (VKLightError, Exception) as e: self.loaded.emit(True) self.error.emit(str(e)) self.music.emit([]) self.count_tracks.emit(0) def __del__(self): print("Bye bye ...") self.audios = None
python
# Programa onde 4 jogadores jogam um dado e tem resultados aleatórios. # Guarda esses resultados em um dicionário em Python. No final, coloca esse dicionário em ordem, sabendo que o # vencedor tirou o maior número no dado. from random import randint from time import sleep from operator import itemgetter jogo = {'jogador1': randint(1, 6), 'jogador2': randint(1, 6), 'jogador3': randint(1, 6), 'jogador4': randint(1, 6)} ranking = {} print('Resultado da Partida') for k, v in jogo.items(): print(f'{k} tirou {v} no dado.') sleep(1) # end-for ranking = sorted(jogo.items(), key=itemgetter(1), reverse=True) print('Ranking Final') for i, v in enumerate(ranking): print(f'{i+1}º lugar: {v[0]} com {v[1]} pontos.') # end-for
python
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from .__about__ import __version__ from .kafka_consumer import KafkaCheck __all__ = ['__version__', 'KafkaCheck']
python
import os from multiprocessing.managers import BaseManager, BaseProxy import numpy as np import tensorflow as tf from tensorflow.python.training import session_run_hook from tensorflow.python.training.basic_session_run_hooks import SessionRunArgs from rank_based import Experience class sync_expericence(Experience): def __init__(self, conf, path): super(sync_expericence, self).__init__(conf) assert isinstance(path, str) self._path = os.path.join(path, 'experience_replay.npy') self.load() def store(self, experience): out = super(sync_expericence, self).store(experience) self.save() return out def load(self): if os.path.exists(self._path): super(sync_expericence, self).load(self._path) print('load experience from {}'.format(self._path)) else: pass def save(self): super(sync_expericence, self).save(self._path) def sample(self, global_step, batch_size=None): return super(sync_expericence, self).sample(global_step, batch_size=batch_size) def update_priority(self, indices, delta): super(sync_expericence, self).update_priority(indices, delta) super(sync_expericence, self).rebalance() class ExperienceProxy(BaseProxy): _exposed_ = ('store', 'sample', 'update_priority', 'load', 'save') def store(self, experience): return self._callmethod('store', (experience,)) def load(self): self._callmethod('load', ()) def save(self): self._callmethod('save', ()) def sample(self, global_step): while True: experience, w, rank_e_id = self._callmethod('sample', (global_step,)) if experience != False: break raise ValueError('get experience failed!') return experience[0], w[0], rank_e_id[0] def update_priority(self, indices, delta): assert len(indices) == len(delta), "lens mismatch {} vs {}".format(len(indices), len(delta)) return self._callmethod('update_priority', (indices, delta,)) class Manager(BaseManager): pass Manager.register('Experience', sync_expericence, ExperienceProxy) class SaveStateHook(session_run_hook.SessionRunHook): def __init__(self, state_scope, reset_scope, meta_error, base_error, experience=None, keep_prob=0.9): assert isinstance(experience, ExperienceProxy) self._state_tensor = tf.global_variables(scope=state_scope) self._experience = experience self._first_run = True self._reset_op = tf.variables_initializer(tf.global_variables(scope=reset_scope)) self._keep_prob = keep_prob self._global_step_tensor = tf.train.get_or_create_global_step() self._rank_e_id = 0 self._meta_error = meta_error self._base_error = base_error self._state_norm = tf.global_norm(self._state_tensor) def before_run(self, run_context): # pylint: disable=unused-argument if self._first_run: init_state = run_context.session.run(self._state_tensor) self._rank_e_id = self._experience.store(init_state) print('get init state, id: {}'.format(self._rank_e_id)) self._first_run = False self._meta_error_history = [] self._top_base_error = 0.0 self._top_rank_e_id = self._rank_e_id self._top_state_norm = None self._should_reset = np.random.random() > self._keep_prob args = {'meta_error': self._meta_error} if self._should_reset: args["global_step"] = self._global_step_tensor args['base_error'] = self._base_error args['state_norm'] = self._state_norm return SessionRunArgs(args) def after_run(self, run_context, run_values): self._meta_error_history.append(run_values.results["meta_error"]) if self._should_reset: base_error = run_values.results["base_error"] state_norm = run_values.results["state_norm"] if base_error <= self._top_base_error and \ (self._top_state_norm is None or state_norm < self._top_state_norm * 1.2): print('store state based {}, base_error: {}, state_norm: {}'.format(self._rank_e_id, base_error, state_norm)) current_state, _ = run_context.session.run([self._state_tensor, self._reset_op]) self._top_rank_e_id = self._experience.store(current_state) self._top_base_error = base_error self._top_state_norm = state_norm else: run_context.session.run(self._reset_op) delta = np.exp(np.mean(self._meta_error_history)) self._experience.update_priority([self._rank_e_id], [delta]) new_state, _, new_rank_e_id = self._experience.sample(run_values.results["global_step"]) for var, val in zip(self._state_tensor, new_state): var.load(val, run_context.session) self._rank_e_id = new_rank_e_id self._meta_error_history = [] print('reset with id: {}, top:{}'.format(self._rank_e_id, self._top_rank_e_id)) class RecordStateHook(session_run_hook.SessionRunHook): def __init__(self, state_scope, total_step, account, loss, experience=None): assert isinstance(experience, ExperienceProxy) self._state_tensor = tf.global_variables(scope=state_scope) self._experience = experience self._account = 0 self._gap = total_step // account print('total_step: {}, gap: {}'.format(total_step, self._gap)) self._loss = loss self._step = tf.train.get_or_create_global_step() def before_run(self, run_context): # pylint: disable=unused-argument step = run_context.session.run(self._step) args = {} self._should_save = step % self._gap == 0 if self._should_save: args['state'] = self._state_tensor args['loss'] = self._loss return SessionRunArgs(args) def after_run(self, run_context, run_values): if self._should_save: state = run_values.results["state"] loss = run_values.results["loss"] rank_e_id = self._experience.store(state) print('store state {}, loss {}'.format(rank_e_id, loss))
python
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-03-21 08:36 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wildlifecompliance', '0147_merge_20190314_1451'), ] operations = [ migrations.AddField( model_name='callemail', name='assigned_to', field=models.CharField(default='brendan', max_length=100), preserve_default=False, ), migrations.AddField( model_name='callemail', name='caller', field=models.CharField(default='Jawaid', max_length=100), preserve_default=False, ), migrations.AddField( model_name='callemail', name='lodged_on', field=models.DateField(auto_now=True), ), migrations.AddField( model_name='callemail', name='number', field=models.CharField(default='default', max_length=50), preserve_default=False, ), migrations.AlterField( model_name='classification', name='name', field=models.CharField(choices=[('complaint', 'Complaint'), ('enquiry', 'Enquiry'), ('incident', 'Incident')], default='complaint', max_length=30), ), ]
python
""" A module for getting the class names for a dataset, in a single canonical order. """ import json from folder import datasets_path def get_class_data(dataset): path = datasets_path / dataset / 'classes.json' with path.open('r') as f: class_data = json.load(f) class_data.sort(key=lambda d: d['name']) return class_data def get_classnames(dataset): class_data = get_class_data(dataset) names = [d['name'] for d in class_data] return names def set_class_data(dataset, class_data): class_data.sort(key=lambda d: d['name']) path = datasets_path / dataset / 'classes.json' with path.open('w') as f: json.dump(class_data, f) if __name__ == '__main__': print(get_classnames('test'))
python
from .tell_os import is_linux if is_linux(): import Xlib import Xlib.display from contextlib import contextmanager from typing import Optional from .abstract_desktop_monitor import AbstractDesktopMonitor @contextmanager def window_obj(display, win_id): """Simplify dealing with BadWindow (make it either valid or None)""" window_obj = None if win_id: try: window_obj = display.create_resource_object('window', win_id) except Xlib.error.XError: pass yield window_obj class LinuxDesktopMonitor(AbstractDesktopMonitor): def __init__(self): self.display = Xlib.display.Display() self.NET_ACTIVE_WINDOW = self.display.intern_atom('_NET_ACTIVE_WINDOW') self.NET_WM_PID = self.display.intern_atom('_NET_WM_PID') self.root_screen = self.display.screen().root def get_active_pid(self) -> Optional[int]: active_win_id = self.root_screen.get_full_property(self.NET_ACTIVE_WINDOW, Xlib.X.AnyPropertyType).value[0] with window_obj(self.display, active_win_id) as win_obj: if win_obj: pid = win_obj.get_full_property(self.NET_WM_PID, Xlib.X.AnyPropertyType).value[0] return pid
python
from io import open import requests def test_scene(): image_data = open("scene.jpg", "rb").read() response = requests.post( "http://localhost:80/v1/vision/scene", files={"image": image_data}, data={"api_key": "Mojohn1"}, ).json() assert response["success"] == True assert response["label"] == "conference_room"
python
import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt def Hist(dataset, xlabel, filename): #Log Trip Duration His sns.set_style("darkgrid") sns.distplot(dataset, kde=True, rug=False) plt.legend() plt.xlabel(xlabel) plt.ylabel('Frequency') plt.tight_layout() plt.savefig('./fig/'+filename+'.png',dpi=100) def CountPlot(dataset, column, label, s, a): #Day of Week_Count_Plot sns.set_style("darkgrid") sns_plot = sns.factorplot(x=column,data=dataset,kind='count', palette="muted",size = s, aspect = a) plt.xlabel(label) plt.ylabel('Frequency') plt.tight_layout() sns_plot.savefig('./fig/'+label + '_Count_Plot.png',dpi=100) def MapPlot(dataset, color = None, legend_plot = True): sns.set_style("darkgrid") city_long_border = (-74.03, -73.75) city_lat_border = (40.63, 40.85) if color is None: sns_plot = sns.lmplot('pickup_longitude', 'pickup_latitude', data=dataset,fit_reg=False, scatter_kws={"s": 2.5}, legend =legend_plot) else: sns_plot = sns.lmplot('pickup_longitude', 'pickup_latitude', data=dataset,hue = color, fit_reg=False, scatter_kws={"s": 2.5}, legend =legend_plot) #sns.plt.xlim(city_long_border) #sns.plt.ylim(city_lat_border) plt.xlabel('Longitude') plt.ylabel('Latitude') sns_plot.savefig('./fig/3_Map_Plot_PickUp.png',dpi=100) def SpeedPlot(dataset,column,speed,label): sns.set_style("darkgrid") sns.pointplot(x=column, y=speed, data=dataset) plt.xlabel(label) plt.ylabel('Speed (km/hr)') plt.savefig('./fig/4_Speed_By_'+label+'.png',dpi=100) def PlotFeatureImp(feature_importance_table): sns.set_style("darkgrid") sns.factorplot(x="importance", y="feature_name",data=feature_importance_table, kind="bar") plt.savefig('./fig/5_Feature_Imp_XGB.png',dpi=100) def LonLatPlot(train): sns.set(style="darkgrid", palette="muted") f, axes = plt.subplots(2,2,figsize=(12, 12), sharex = False, sharey = False)# sns.despine(left=True) # if true, remove the ax sns.distplot(train['pickup_latitude'].values, color="m",bins = 100, ax=axes[0,0]) sns.distplot(train['pickup_longitude'].values, color="g",bins =100, ax=axes[0,1]) sns.distplot(train['dropoff_latitude'].values, color="m",bins =100, ax=axes[1,0]) sns.distplot(train['dropoff_longitude'].values, color="g",bins =100, ax=axes[1,1]) axes[0, 0].set_title('pickup_latitude') axes[0, 1].set_title('pickup_longitude') axes[1, 0].set_title('dropoff_latitude') axes[1, 1].set_title('dropoff_longitude') plt.setp(axes, yticks=[]) plt.tight_layout() plt.savefig('./fig/6_Lat_Lon_Plot.png',dpi=100) def ViolinPlot(train, y_, row_, hue_): sns.set(style="darkgrid") sns.violinplot(x=row_, y=y_, hue=hue_, data=train, split=True, inner="quart") def PivotPlot(train, y_, row_, col_): #pickup_hour_and_pickup_weekday_Pivot sns.set(style="darkgrid") pivot_table=pd.pivot_table(train, index=row_, columns=col_, values=y_, aggfunc=np.mean) sns.heatmap(pivot_table) plt.tight_layout() plt.savefig('./fig/'+ row_ + '_and_' + col_ + '_Pivot.png',dpi=100) #Time_Distance_Plot def Time_Distance_Plot(train): sample_ind = np.random.permutation(len(train))[:5000] sns.lmplot(x='distance_haversine', y='log_trip_duration', data = train.iloc[sample_ind], scatter_kws={"s": 10}) plt.savefig('./fig/8_Time_Distance_Pivot.png',dpi=100) def Two_Hist_Plot(train, test): sns.set_style("darkgrid") sns.kdeplot(train['log_trip_duration'], shade=True, label = 'Train', color = 'b') sns.kdeplot(test['log_trip_duration'], shade=True, label = 'Test', color = 'r') plt.legend() plt.xlabel('Log(Duration)') plt.ylabel('Frequency') plt.tight_layout() plt.savefig('./fig/9_Two_His_Plot.png',dpi=100)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/1/30 下午12:10 # @Author : wook # @File : db.py import pymysql db = pymysql.connect(host="localhost", user="root", passwd="123123", db="mysql", port=3306, charset="utf8") # 连接对象 # 使用 cursor() 方法创建一个游标对象 cursor cursor = db.cursor() # 使用 execute() 方法执行 SQL 查询 cursor.execute("SELECT VERSION()") # 使用 fetchone() 方法获取单条数据. data = cursor.fetchone() print("Database version : %s " % data) # 关闭数据库连接 db.close()
python
import os from flask import Flask from flask_cors import CORS from .extensions import db, bcrypt from flask_socketio import SocketIO from .models import users, messages from .config import DevConfig, ProdConfig socketio = SocketIO(cors_allowed_origins="*") def create_app(): app = Flask(__name__) app.config['SECRET_KEY'] = DevConfig.SECRET_KEY app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///chat_database.sqlite' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True """importing blueprint""" from .home import home as home_blueprint from .chat import chat as chat_blueprint from .auth import auth as auth_blueprint """register blueprint""" app.register_blueprint(chat_blueprint) app.register_blueprint(home_blueprint) app.register_blueprint(auth_blueprint) """database initiate""" db.init_app(app) db.create_all(app=app) """initiate socket""" socketio.init_app(app) """implemented bcrypt""" bcrypt.init_app(app) """enable CORS origin""" CORS(app, resources={r"/*": {"origins": "*"}}) return app
python
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import inspect import sys if sys.version_info > (2, 7) and sys.version_info < (3, 0): def GetArgNameAndDefaultTuple(func): """ returns a dictionary of arg_name:default_values for the input function """ (args, varargs, keywords, defaults) = inspect.getargspec(func) defaults = list(defaults) if defaults is not None else [] while len(defaults) < len(args): defaults.insert(0, None) return tuple(zip(args, defaults)) elif sys.version_info >= (3, 0): def GetArgNameAndDefaultTuple(func): signature = inspect.signature(func) return tuple( [ (k, v.default if v.default is not inspect.Parameter.empty else None) for (k, v) in signature.parameters.items() ] ) else: raise NotImplementedError def GetArgDefaults(func): return tuple(map(lambda x: x[1], GetArgNameAndDefaultTuple(func)))
python
#!/usr/bin/env python3 import os import random import sys import string import random if len(sys.argv) < 2: print(f'Usage: {sys.argv[0]} <target>') sys.exit(1) ip = sys.argv[1] print("Hello! I am a little sploit. I could be written on any language, but " "my author loves Python. Look at my source - it is really simple. " "I should steal flags and print them on stdout or stderr. ") print(f"I need to attack a team with host `{ip}`.") print("Here are some random flags for you:") print(f"First flag is adcs{{{''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(25))}}}", flush=True) print(f"Second flag is adcs{{{''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(25))}}}", flush=True)
python
""" IMPORT STEP """ import re import datetime import time import traceback import sys import json from math import ceil from pyspark import SparkConf, SparkContext from pyspark.sql import SQLContext, Row from pyspark.sql.types import * from pyspark.sql.functions import lit, col, udf def save_txt(document, path): """ Save an Rdd into a .txt file if it does not already exists Args: document(rdd): The document to save. path(string): The path in which the document have to be saved """ try: document.coalesce(1).saveAsTextFile(path) except Exception: # the file has already been saved for the current data source pass def save_json(document, schema, path, saveMode='overwrite'): """ Save an Rdd or a dataframe into a .json file with the saveMode specified Args: document(rdd): The document to save. schema(list): The schema of the columns of the rdd, used to convert to Dataframe path(string): The path in which the document have to be saved saveMode(string): A string representing the save mode, like 'overwrite' and 'append' """ if saveMode is None: try: print("Saving Json...") if schema is None: document.coalesce(1).write.json(path) else: document.toDF(schema).coalesce(1).write.json(path) except Exception as e: print("The file already exists") print(e) else: print("Modifying Json...") if schema is None: document.coalesce(1).write.mode(saveMode).json(path) else: document.toDF(schema).coalesce(1).write.mode(saveMode).json(path) """ DEFINITION STEP - XML """ def extract_main_elements_xml(document): """ This function search in the xml data the main elements: separators, elements and timestamps and return a new rdd Args: document(line): The line of the rdd to parse. """ matchObj = re.findall(r'<separator>(.*)</separator>', document) if matchObj: return matchObj[0] matchObj = re.findall(r'<element>(.*)</element>', document) if matchObj: return "element" matchObj = re.findall(r'<timestamp>(.*)</timestamp>', document) if matchObj: return "element" def extract_header(document): """ This function allow to extract the header of the columns from the xml if available Args: document(line): The line of the rdd to parse. """ matchObj = re.findall(r'<header>(.*)</header>', document) if matchObj: return matchObj[0] def extract_timestamp_format(document): """ This function allow to extract only the timestamps' columns from the xml Args: document(line): The line of the rdd to parse. """ matchObj = re.findall(r'<timestamp>(.*)</timestamp>', document) if matchObj: return matchObj[0] def regular_parsing_xml(document): """ Main function to extract the regular expression from the xml to be used to derive the elements for the analysis - It must be upgraded Args: document(line): The line of the rdd to parse. """ prev = "" next = False prec = "" post = "" # When each element is found, the antecedent and consequent separators are # saved in strings for s in document.toLocalIterator(): if(next): post = post + str(s) next = False else: if(str(s) == "element"): prec = prec + str(prev) next = True prev = str(s) prec = ''.join(set(prec)) post = ''.join(set(post)) # Construct the final regular expression regString = "[" + prec + "]" + "(.*?)" + "[" + post + "]" regString = regString.replace('"', '') return regString """ DEFINITION STEP - DOCUMENT STRUCTURATION """ def regular_parsing(document, regex): """ Main function to derive all the elements that respects the regex String Args: document(line): The line of the rdd to parse. regex(string): The string that contains the regular expression """ return re.findall(regex, document) def escape_removal(document): """ Function to remove the escape from the elements of the file, the function should be upgraded to eliminate all the undesired symbols Args: document(line): The line of the rdd to parse. """ return re.sub(r'\\', "", document) def quote_removal(document): """ Function to remove the double quotes from the data Args: document(line): The line of the rdd to parse. """ return re.sub(r'"', "", document) def comma_to_dot_number_conversion(document): """ Function to convert the numbers with ,-separation to .-separated Args: document(line): The line of the rdd to parse. """ return re.sub(r'(\d+),(\d+)', r'\1.\2', document) """ DEFINITION STEP - DIMENSION ANALYSIS """ def restriction_filter(document, newIndexes, j=None): """ The function performs the union of all the values of the different attributes into a single string, returning the final string Args: document(line): The line of the rdd to parse. newIndexes(list): List of index of the elements to union in a single string j(int): Index of the dependent element that should be returned as a value of the key-value pair """ for h in range(1, len(newIndexes)): document[newIndexes[0]] = str( document[newIndexes[0]]) + "," + str(document[newIndexes[h]]) if j is not None: return (document[newIndexes[0]], document[j]) return (document[newIndexes[0]]) def accuracy(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the dimension of accuracy for each numerical attribute, save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Accuracy") print(" ") finalAccuracyRdd = sc.emptyRDD() attrName = [] attrAccuracy = [] confidence = [] counter = 0 # get the desired degrees of granularity try: granularity = granularityDimensions["Accuracy"].split(",") globalAcc = True if "global" in granularity else False attributeAcc = True if "attribute" in granularity else False valueAcc = True if "value" in granularity else False if granularity[0] == "": globalAcc = True except Exception: globalAcc = True attributeAcc = False valueAcc = False for j in columns: print("-Numerical Attribute = " + str(desiredColumns[j])) globalIncluded = 0 meanAccuracy = meanAccuracyValues[counter] devAccuracy = devAccuracyValues[counter] counter = counter + 1 if attributeAcc or valueAcc: for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index( k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes, j)) stringAttribute = '_'.join(stringSplitIndex) try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): continue except Exception: pass if j in newIndexes: continue print("--Key Attribute = " + stringAttribute) # calculate the distance between each value and the expected # mean, and calculate each accuracy value as this distance # divided by the maximum allowed interval staticAccuracyRdd = (newDocument.map(lambda x: (x[0], max(0.0, 1 - abs((float(x[1].replace(',', '.')) - float(meanAccuracy.replace(',', '.'))) / (float(devAccuracy.replace(',', '.')) / 2))))) .map(lambda x: (x[0], (1, x[1], ceil(x[1])))) .reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2])) ) if globalAcc: globalIncluded = 1 globalAccuracies = (staticAccuracyRdd.map(lambda x: (x[1][0], x[1][1], x[1][2])) .reduce(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2])) ) attributeStaticAccuracy = globalAccuracies[ 2] / float(globalAccuracies[0]) attributeAccuracy = globalAccuracies[ 1] / float(globalAccuracies[0]) accuracyRdd = staticAccuracyRdd.map(lambda x: ( x[0], x[1][1] / float(x[1][0]), x[1][2] / float(x[1][0]))) print("Calculate for each record the accuracy value as 1 - (( Value - desired Mean )/(Maximum interval / 2)) and find the mean results per Attribute's Value: -> ( Attribute's Value, Mean Accuracy )") # print(accuracyRdd.take(5)) if valueAcc: save_json(accuracyRdd, ["Value", "AccuracyDynamic", "AccuracyStatic"], resultFolder + "/accuracy_values/attribute_" + stringAttribute + "_REF_" + str(desiredColumns[j])) if attributeAcc: finalAccuracyRdd = finalAccuracyRdd.union(sc.parallelize([(stringAttribute, accuracyRdd.map( lambda x: x[1]).mean(), accuracyRdd.map(lambda x: x[2]).mean(), performanceSample)])) if attributeAcc: # save file into hdfs save_json(finalAccuracyRdd, ["Attribute", "AccuracyDynamic", "AccuracyStatic", "Confidence"], resultFolder + "/accuracy_attributes_" + str(desiredColumns[j])) # append the global value to the list if globalAcc: if not globalIncluded: # calculate final accuracies globalAccuracies = (document.map(lambda x: (max(0.0, 1 - abs((float(x[j].replace(',', '.')) - float(meanAccuracy.replace(',', '.'))) / (float(devAccuracy.replace(',', '.')) / 2))))) .map(lambda x: (1, x, ceil(x))) .reduce(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2])) ) attributeStaticAccuracy = globalAccuracies[ 2] / float(globalAccuracies[0]) attributeAccuracy = globalAccuracies[ 1] / float(globalAccuracies[0]) attrAccuracy.append(attributeStaticAccuracy) attrName.append("Accuracy_Static_" + str(desiredColumns[j])) confidence.append(performanceSample) attrAccuracy.append(attributeAccuracy) attrName.append("Accuracy_Dynamic_" + str(desiredColumns[j])) confidence.append(performanceSample) print("Global Static Accuracy " + str(attributeStaticAccuracy)) print("Global Dynamic Accuracy " + str(attributeAccuracy)) return attrName, attrAccuracy, confidence def calculateSampleDeviation(line): # lambda (key,(sumDev,count,sumMean)): # (key,((sumDev/(count-1))**0.5,sumMean/count,count)) try: newLine = (line[0], ((line[1][0] / (line[1][1] - 1))**0.5, line[1][2] / float(line[1][1]), float(line[1][1]))) except Exception: newLine = (line[0], (0, line[1][2] / float(line[1][1]), float(line[1][1]))) return newLine def precision(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the dimension of precision for each numerical attribute, save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Precision") print(" ") finalPrecisionRdd = sc.emptyRDD() attrPrecision = [] attrName = [] confidence = [] # get the desired degrees of granularity try: granularity = granularityDimensions["Precision"].split(",") globalPrec = True if "global" in granularity else False attributePrec = True if "attribute" in granularity else False valuePrec = True if "value" in granularity else False if granularity[0] == "": globalPrec = True except Exception: globalPrec = True attributePrec = False valuePrec = False for j in columns: print("-Numerical Attribute = " + str(desiredColumns[j])) reShift = False statsDoc = document.map(lambda x: ( float(x[j].replace(',', '.')))).stats() devAttribute = float(statsDoc.stdev()) meanAttribute = float(statsDoc.mean()) minValue = float(statsDoc.min()) maxValue = float(statsDoc.max()) if (minValue < 0) & (maxValue > 0): reShift = True if attributePrec or valuePrec: for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index( k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes, j)) stringAttribute = '_'.join(stringSplitIndex) try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): continue except Exception: pass if j in newIndexes: continue print("--Key Attribute = " + stringAttribute) keyFloatDocument = newDocument.map( lambda x: (x[0], float(x[1].replace(',', '.')))) if reShift: print("---Shifting all the values to make them greater than 1 ") keyFloatDocument = keyFloatDocument.map( lambda x: (x[0], x[1] + abs(minValue))) meanRdd = (keyFloatDocument.map(lambda x: (x[0], (x[1], 1))) .reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1])) .map(lambda x: (x[0], x[1][0] / float(x[1][1]))) ) varRdd = (keyFloatDocument.join(meanRdd) .map(lambda (key, (value, mean)): (key, ((value - mean)**2, 1, mean))) .reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2])) .map(lambda (key, (sumDev, counter, sumMean)): (key, ((sumDev / counter)**0.5, sumMean / counter))) ) precisionRdd = varRdd.map(lambda (key, (dev, mean)): (key, mean, max(0.0, 1.0 - dev / abs(mean)), dev)) print( "---Calculate value of precision per Attribute's value: --> Value, Mean, Precision, Standard Deviation") # print(precisionRdd.take(5)) if reShift: print( "---Shifting all the values again to replace the correct mean") precisionRdd = precisionRdd.map(lambda (key, mean, prec, dev): (key, mean - abs(minValue), prec, dev)) if valuePrec: save_json(precisionRdd, ["Value", "Mean", "Precision", "StandardDeviation"], resultFolder + "/precision_values/attribute_" + stringAttribute + "_number_" + str(desiredColumns[j])) finalPrecisionRdd = finalPrecisionRdd.union(sc.parallelize([(stringAttribute, precisionRdd.map(lambda x: x[1]).mean( ), precisionRdd.map(lambda x: x[2]).mean(), precisionRdd.map(lambda x: x[3]).mean(), performanceSample)])) print(" ") # calculate final aggregated value for precision if globalPrec: if reShift: attributePrecision = max( 0.0, 1.0 - devAttribute / (abs(meanAttribute) + abs(minValue))) else: attributePrecision = max( 0.0, 1.0 - devAttribute / abs(meanAttribute)) attrPrecision.append(attributePrecision) attrName.append("Precision_" + str(desiredColumns[j])) confidence.append(performanceSample) attrPrecision.append(devAttribute) attrName.append("Precision(Deviation)_" + str(desiredColumns[j])) confidence.append(performanceSample) print("Global Precision " + str(attributePrecision)) if attributePrec: print("--Final Aggregated File --> Attribute, Mean, Precision, Deviation") # print(finalPrecisionRdd.take(5)) # save file into hdfs save_json(finalPrecisionRdd, ["Attribute", "Mean", "Precision", "Standard_Deviation", "Confidence"], resultFolder + "/precision_attributes_" + str(desiredColumns[j])) return attrName, attrPrecision, confidence def mapping_completeness_missing(x, newIndexes): """ This function remove empty,None or null value from each line, and return the key, the current line length the previous line length as a new Rdd line Args: x(Row): line of the Rdd newIndexes(list): list of indexes to union in a single element """ for h in range(1, len(newIndexes)): x[newIndexes[0]] = str(x[newIndexes[0]]) + "," + str(x[newIndexes[h]]) del x[newIndexes[h]] lineLength = len(x) previousline = x try: line = [el for el in previousline if el is not None] previousline = line except Exception: line = previousline previousline = line try: line = filter(lambda a: a != "", line) previousline = line except Exception: line = previousline previousline = line try: line = filter(lambda a: a != "nan", line) previousline = line except Exception: line = previousline previousline = line try: line = filter(lambda a: a != "null", line) previousline = line except Exception: line = previousline return (x[newIndexes[0]], (len(line) - 1, lineLength - 1)) def completeness_missing(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the value of the Part of Completeness regarding the missing elements per line,save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Completeness_Missing") print(" ") lineLength = len(document.take(1)[0]) finalCompleteRdd = sc.emptyRDD() # get the desired degrees of granularity try: granularity = granularityDimensions["Completeness_Missing"].split(",") globalMiss = True if "global" in granularity else False attributeMiss = True if "attribute" in granularity else False valueMiss = True if "value" in granularity else False if granularity[0] == "": globalMiss = True except Exception: globalMiss = True attributeMiss = False valueMiss = False if attributeMiss or valueMiss: for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index(k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes)) stringAttribute = '_'.join(stringSplitIndex) print("--Key Attribute = " + stringAttribute) itIsTime = False if valueMiss: try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): itIsTime = True except Exception: pass if not itIsTime: # analysis per value print("--Key Attribute's Values Analysis") keyValueDocument = document.map( lambda x: mapping_completeness_missing(x, newIndexes)) print("---Find number of filtered and total elements per record, for each Key Attribute's Value: -> ( Key Attribute , ( Filtered Line Lenght , Full Line Lenght ) )") # print(keyValueDocument.take(5)) # Add a filter to remove the null,none or empty keys print("---Filter Null keys") keyValueDocument = keyValueDocument.filter(lambda x: x[0] != "null").filter(lambda x: x[0] is not None).filter( lambda x: x[0] != "").filter(lambda x: x[0] != "nan").reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1])) keyValueDocument = keyValueDocument.map(lambda x: ( x[0], x[1][1] - x[1][0], x[1][0] / float(x[1][1]))) print( "---Calculate Completeness Missing for each Attribute's Value: -> ( Value, Missing Values, Completeness Missing Value )") # print(keyValueDocument.take(5)) save_json(keyValueDocument, ["Value", "MissingValues", "CompletenessMissingValue"], resultFolder + "/completeness_missing_values/" + stringAttribute) if attributeMiss: # Analysis per attribute print("--Attribute's Analysis") # attribute elements attributeDocument = newDocument totElements = attributeDocument.count() print("---Total Elements") print(totElements) filteredElements = attributeDocument.filter(lambda x: x != "null").filter( lambda x: x is not None).filter(lambda x: x != "").filter(lambda x: x != "nan").count() print("---Total Filtered Elements") print(filteredElements) completenessAttribute = filteredElements / float(totElements) print(completenessAttribute) # Save both values finalCompleteRdd = finalCompleteRdd.union(sc.parallelize( [(stringAttribute, totElements - filteredElements, completenessAttribute, performanceSample)])) if attributeMiss: print("--Calculate value of Completeness Missing per Attribute: --> Attribute, Missing Values, Completeness Missing Value, Confidence") # print(finalCompleteRdd.take(5)) # save file into hdfs save_json(finalCompleteRdd, ["Attribute", "MissingValues", "CompletenessMissingValue", "Confidence"], resultFolder + "/completeness_missing_attributes") if globalMiss: # Global Analysis print("-Global Missing Analysis") globalDocument = document.flatMap(lambda x: x) globalCount = globalDocument.count() filteredCount = globalDocument.filter(lambda x: x != "null").filter( lambda x: x is not None).filter(lambda x: x != "").filter(lambda x: x != "nan").count() qualityCompletenessMissing = filteredCount / float(globalCount) print("--Final Global Completeness Missing: " + str(qualityCompletenessMissing)) else: qualityMissing = None return ["Completeness_Missing"], [qualityCompletenessMissing], [performanceSample] def mapping_completeness_frequency(x, volatiliyTime, performanceSample): """ Mapping Function for completeness_frequency, it returns the new line containing the found value of the Completeness_Frequency dimension, or a fake line that will be eliminated if there is only 1 record for the considered grouping value Args: x(line): line of the rdd considered volatilityTime(float or string): Number of hours to consider the data still recent, used to derive quickly the amount of passed time performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ try: newLine = (x[0], x[3], min(1.0, float(x[3]) / (volatiliyTime * (x[4] - x[5]) * performanceSample * x[1]))) except Exception: # this exception is raised whenever there is only 1 element available # for the current key element ( max = min ), or even if the frequency # or the volatiliy is 0, so it will be marked to be removed. (if # volatiliy is exceeded there is an error) newLine = (x[0], x[3], 2) return newLine def completeness_frequency(sc, sqlContext, keyValueCount, volatiliyTime, column, timestamp, resultFolder, valueFreq, performanceSample): """ This function calculate the value of Completeness, save the results and return the Global values Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession keyValueCount(rdd): The rdd of the source grouped by the values of an attribute with additional columns representing the Timeliness values. volatilityTime(float or string): Number of hours to consider the data still recent, used to derive quickly the amount of passed time columns(string): Attribute considered in the analysis timestamp(string): Timestamp attribute considered in the analysis resultFolder(string): Absolute path of the destination of all the saved files valueFreq(boolean): Value set to True if the value degree of granulairty has been requested performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ # remap with key, count / volatiliyTime * ( max - min ) * update_rate( # column, key, initialTime_of_records_in_key, finalTime_of_records_in_key # ) completenessFreqDoc = keyValueCount.map(lambda x: mapping_completeness_frequency( x, volatiliyTime, performanceSample)).filter(lambda x: x[2] != 2) print("-----Calculate Frequency as the number of rows per value divided by the seconds from the first analysis and the last one multiplied by the update_rate expected (obtained as the mean of the update rates ): -> ( Attribute's Value, RecordNumber, Completeness Frequency Value )") # print(completenessFreqDoc.take(5)) sumRecord = completenessFreqDoc.map(lambda x: x[1]).sum() meanValue = completenessFreqDoc.map(lambda x: x[2]).mean() if valueFreq: save_json(completenessFreqDoc, ["Value", "RecordNumber", "CompletenessFrequencyValue"], resultFolder + "/completeness_frequency_values/" + str(column) + "_" + str(timestamp), None) return sumRecord, meanValue def timeliness(sc, sqlContext, document, documentDF, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample, dictHeaderPosition): """ This function calculate the dimension of Timeliness and prepare the Rdd to evaluate the dimension of Completeness_Frequency,, then save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Timeliness and Completeness Frequency") print(" ") updatePerTuple = False if "tuple" in granularityDimensions['Timeliness'].split(","): updatePerTuple = True print("\nUpdating timeliness based on delta time\n") timelinessInfo = sqlContext.read.json( inputSource + "/timelinessMetadata").head() headers = documentDF.columns dataToUpdateRdd = documentDF.fillna("null").rdd.map( lambda x: [i.encode("UTF8", "ignore") for i in x]) # the analysis time is set here datetimeNow = datetime.datetime.now() print("-Save the timestamp of the analysis") if updatePerTuple: # updating old timeliness if the connected attribute is in desired # columns timelinessNameList = [x for x in timelinessInfo.timelinessNames] timeFormatsList = [x for x in timelinessInfo.timeFormats] # because indexes are all over the places for element in range(len(timelinessNameList)): name = timelinessNameList[element] i = headers.index(name) # getting the name of the attribute connected to timeliness attributeName = name.split("_")[1] attributePosition = headers.index( attributeName) # and the position if attributeName in desiredColumns: dataToUpdateRdd = dataToUpdateRdd.map(lambda x: x[0:i] + [datetimeNow - datetime.datetime.strptime(x[attributePosition], timeFormatsList[element])] + x[i + 1:]).map(lambda x: x[0:i] + [( float(x[i].microseconds + (x[i].seconds + x[i].days * 24 * 3600) * 10**6) / 10**6)] + x[i + 1:]).map(lambda x: x[0:i] + [str((max(0.0, 1 - (x[i] / (float(volatiliy[0]) * 3600)))))] + x[i + 1:]) print("-Updated timeliness") print("-Save updated timeliness") finalCompletenessRdd = sc.emptyRDD() finalTimeRdd = sc.emptyRDD() updateValues = False updateGlobal = False attrTimeFreq = [] attrName = [] confidence = [] counter = 0 if timelinessAnalyzer: # get the desired degrees of granularity try: granularity = granularityDimensions["Timeliness"].split(",") globalTime = True if "global" in granularity else False attributeTime = True if "attribute" in granularity else False valueTime = True if "value" in granularity else False if granularity[0] == "": globalTime = True except Exception: globalTime = True attributeTime = False valueTime = False else: globalTime = False attributeTime = False valueTime = False if completenessAnalyzer is True: # get the desired degrees of granularity try: granularity = granularityDimensions[ "Completeness_Frequency"].split(",") globalFreq = True if "global" in granularity else False attributeFreq = True if "attribute" in granularity else False valueFreq = True if "value" in granularity else False if granularity[0] == "": globalFreq = True except Exception: globalFreq = True attributeFreq = False valueFreq = False else: globalFreq = False attributeFreq = False valueFreq = False for j in columns: print(" ") print("-Timestamp Attribute = " + str(desiredColumns[j])) # select the first format of timestamp found stringFormat = dataTypes[j] # the volatiliy is converted in seconds volatiliyTime = float(volatiliy[counter]) * 3600.0 counter = counter + 1 # load the update rate, if the file is not available an error is # returned and the analysis should stop if completenessAnalyzer: updateValues = False try: updateRateValuesDF = sqlContext.read.json( inputSource + "/update_rate_values_" + str(desiredColumns[j])) print("-Update rate for each value available") updateValues = True except Exception: print("-Update rate for each value not available") updateGlobal = False try: updateRateGlobalDF = sqlContext.read.json( inputSource + "/update_rate_global_" + str(desiredColumns[j])) print("-Update rate for the source available") updateGlobal = True except Exception: print("-Update rate for the source not available") # search global frequency if updateGlobal & completenessAnalyzer & globalFreq: print(" ") print("--Calculate Global Frequency") completenessHour = (document.map(lambda x: datetime.datetime.strptime(x[j], stringFormat)) .map(lambda x: (x.hour, (1, x.date(), x.date()))) .reduceByKey(lambda x, y: (x[0] + y[0], max(x[1], y[1]), min(x[2], y[2]))) .map(lambda x: (x[0], (float(x[1][0]), float((x[1][1] - x[1][2]).days + 1)))) ) print("---Current New Elements per Hour") # print(completenessHour.take(5)) updateRateGlobal = updateRateGlobalDF.rdd.map( lambda x: (x.Hour, (x.Frequency))) # print(updateRateGlobal.take(4)) completenessHour = (completenessHour.join(updateRateGlobal) .map(lambda x: (x[0], x[1][0][0], max(0.0, min(1.0, x[1][0][0] / (float(x[1][1]) * float(x[1][0][1]) * performanceSample * 3600))))) ) print("---Completeness Frequency per Hour") # print(completenessHour.take(5)) save_json(completenessHour, ["Hour", "RecordNumber", "CompletenessFrequency"], resultFolder + "/completeness_frequency_global_hour/" + str(desiredColumns[j])) qualityCompletenessFrequency = completenessHour.map(lambda x: x[ 2]).mean() print("---Global Frequency = " + str(qualityCompletenessFrequency)) attrTimeFreq.append(qualityCompletenessFrequency) attrName.append("Completeness_Frequency_" + str(desiredColumns[j])) confidence.append(performanceSample) print(" ") print("--Calculate Dimensions per Attribute") for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index(k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes, j)) stringAttribute = '_'.join(stringSplitIndex) try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): continue except Exception: pass if j in newIndexes: continue print("--Key Attribute = " + stringAttribute) keyValueDocument = (newDocument.map(lambda x: (x[0], datetime.datetime.strptime(x[1], stringFormat))) .map(lambda x: (x[0], datetimeNow - x[1], x[1].hour)) .map(lambda x: ((x[0], x[2]), (float(x[1].microseconds + (x[1].seconds + x[1].days * 24 * 3600) * 10**6) / 10**6))) .map(lambda x: ((x[0]), 1 - (x[1] / volatiliyTime))) ) print("----Convert timestamp into datetime and calculate the difference in seconds with respect to the data analysis: -> (( Attribute's Value, Hour ), Timeliness )") # print(keyValueDocument.take(5)) if updateValues & completenessAnalyzer & (attributeFreq or valueFreq): # convert to datetime, create the key (value,hour) with the # difference in seconds, join the update rate with the same key # (value,hour) to obtain the frequency currentUpdateRate = (updateRateValuesDF.filter(updateRateValuesDF.Key == stringAttribute).drop("Key") .rdd.map(lambda x: ((x.Value.encode("UTF8", "ignore"), x.Hour), x.Frequency)) ) # print(currentUpdateRate.take(5)) newTimeDocument = keyValueDocument.join(currentUpdateRate) if not newTimeDocument.isEmpty(): print( "----Select the elements of the update rate with the same key column and join the previous data with it") # print(newTimeDocument.take(5)) # reduce the key together, this allow to derive min and max # time plus the count of record per key newTimeDocument = newTimeDocument.combineByKey(lambda x: (x[0], 1.0, x[0], x[0], x[1]), lambda x, value: ( x[0] + value[0], x[1] + 1, max(x[2], value[0]), min(x[3], value[0]), max(x[4], value[1])), lambda x, y: (x[0] + y[0], x[1] + y[1], max(x[2], y[2]), min(x[3], y[3]), max(x[4], y[4]))) # average by key,hour: sum,count,max,min,frequency print("----Derive sum of the timeliness value, the number of rows, maximum, minimum time and the update rate for each couple of value and hour: -> ( ( Attribute's Value, Hour ),( TimelinessSum, Count, MaxTimeliness, MinTimeliness, Update Rate) )") # print(newTimeDocument.take(5)) print("Remap and redefine the key only as the value, and then repeat the same reduction by considering also the different hours for the same key deriving the frequency as count/expected count plus min and max timeliness: -> ( ( Attribute's Value, Mean Update Rate, Sum Timeliness, Count, Max Timeliness, Min Timeliness) )") keyValueCount = (newTimeDocument.map(lambda x: (x[0][0], (x[1][4], x[1][0], x[1][1], x[1][2], x[1][3]))) .combineByKey(lambda x: (x[0], 1.0, x[1], x[2], x[3], x[4]), lambda x, value: (x[0] + value[0], x[1] + 1, x[2] + value[1], x[ 3] + value[2], max(x[4], value[3]), min(x[5], value[4])), lambda x, y: (x[0] + y[0], x[1] + y[1], x[2] + y[2], x[3] + y[3], max(x[4], y[4]), min(x[5], y[5]))) .map(lambda x: (x[0], x[1][0] / x[1][1], x[1][2], x[1][3], x[1][4], x[1][5])) ) # frequency,sum_timeliness,count_timeliness,max_timeliness,min_timeliness # print(keyValueCount.take(5)) print(" ") print("----Completeness_Frequency") startTime = time.time() # derive also the completeness frequency using the previous # results completenessSumRecord, completenessMeanValue = completeness_frequency( sc, sqlContext, keyValueCount, volatiliyTime, stringAttribute, desiredColumns[j], resultFolder, valueFreq, performanceSample) endTime = time.time() print("-----Completeness Frequency Elapsed Time: " + str(endTime - startTime) + " seconds") if attributeFreq: # completeness frequency rdd finalCompletenessRdd = finalCompletenessRdd.union(sc.parallelize( [(stringAttribute, completenessSumRecord, completenessMeanValue, performanceSample)])) if timelinessAnalyzer & (attributeTime or valueTime): averageByKey = keyValueCount.map(lambda (label, update_rate, value_sum, count, max, min): (label, value_sum / float(count), max, min, datetimeNow.strftime(stringFormat))) else: if timelinessAnalyzer & (attributeTime or valueTime): averageByKey = (keyValueDocument.map(lambda x: (x[0][0], x[1])) .combineByKey(lambda x: (x, 1.0, x, x), lambda x, value: ( x[0] + value, x[1] + 1, max(x[2], value), min(x[3], value)), lambda x, y: (x[0] + y[0], x[1] + y[1], max(x[2], y[2]), min(x[3], y[3]))) .map(lambda (label, (sumTime, countTime, maxTime, minTime)): (label, sumTime / float(countTime), maxTime, minTime, datetimeNow.strftime(stringFormat))) ) else: if timelinessAnalyzer & (attributeTime or valueTime): averageByKey = (keyValueDocument.map(lambda x: (x[0][0], x[1])) .combineByKey(lambda x: (x, 1.0, x, x), lambda x, value: ( x[0] + value, x[1] + 1, max(x[2], value), min(x[3], value)), lambda x, y: (x[0] + y[0], x[1] + y[1], max(x[2], y[2]), min(x[3], y[3]))) .map(lambda (label, (sumTime, countTime, maxTime, minTime)): (label, sumTime / float(countTime), maxTime, minTime, datetimeNow.strftime(stringFormat))) ) if timelinessAnalyzer & attributeTime: print(" ") print("----Final Timeliness per Attribute's Value: -> (Value, TimelinessMean, TimelinessMax, TimelinessMin, AnalysisTime )") # print(averageByKey.take(5)) # Final aggregated value for timeliness timelinessMeanValue = averageByKey.map(lambda x: x[1]).mean() timelinessMaxValue = averageByKey.map(lambda x: x[2]).max() timelinessMinValue = averageByKey.map(lambda x: x[3]).min() if timelinessMeanValue < 0: timelinessMeanValue = 0.0 if timelinessMaxValue < 0: timelinessMaxValue = 0.0 if timelinessMinValue < 0: timelinessMinValue = 0.0 finalTimeRdd = finalTimeRdd.union(sc.parallelize( [(stringAttribute, timelinessMeanValue, timelinessMaxValue, timelinessMinValue, performanceSample, datetimeNow.strftime(stringFormat))])) if timelinessAnalyzer & valueTime: averageByKey = averageByKey.map(lambda x: ( x[0], max(0.0, x[1]), max(0.0, x[2]), max(0.0, x[3]), x[4])) save_json(averageByKey, ["Value", "TimelinessMean", "TimelinessMax", "TimelinessMin", "AnalysisTime"], resultFolder + "/timeliness_values/" + stringAttribute + "_" + str(desiredColumns[j])) if timelinessAnalyzer & attributeTime: print("-Calculate value of Timeliness per Attribute: --> Attribute, Mean Timeliness, Max Timeliness, Min Timeliness, Confidence, Analysis Time") # print(finalTimeRdd.take(5)) # save file into hdfs save_json(finalTimeRdd, ["Attribute", "TimelinessMean", "TimelinessMax", "TimelinessMin", "Confidence", "AnalysisTime"], resultFolder + "/timeliness_attributes_" + str(desiredColumns[j])) if updateValues & attributeFreq & completenessAnalyzer & (not finalCompletenessRdd.isEmpty()): print( "-Value of Completeness per Attribute's value: --> Value, Mean Completeness") # print(finalCompletenessRdd.take(5)) # save file into hdfs save_json(finalCompletenessRdd, ["Attribute", "RecordNumber", "CompletenessFrequencyValue", "Confidence"], resultFolder + "/completeness_frequency_attributes_" + str(desiredColumns[j])) # Aggregate Values of different timestamps if timelinessAnalyzer & globalTime: globalTimeDocument = keyValueDocument.map(lambda x: (x[1])) attrTimeFreq.append(max(0.0, globalTimeDocument.mean())) attrTimeFreq.append(max(0.0, globalTimeDocument.max())) attrTimeFreq.append(max(0.0, globalTimeDocument.min())) attrName.append("Timeliness_Mean_" + str(desiredColumns[j])) attrName.append("Timeliness_Max_" + str(desiredColumns[j])) attrName.append("Timeliness_Min_" + str(desiredColumns[j])) confidence.append(performanceSample) confidence.append(performanceSample) confidence.append(performanceSample) # Add analysis Timestamp attrTimeFreq.append(str(datetimeNow.strftime(stringFormat))) attrName.append("Last_Analysis_Timestamp_" + str(desiredColumns[j])) confidence.append(performanceSample) return dataToUpdateRdd, attrName, attrTimeFreq, confidence def volume(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the quality value of the Volume, save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Volume") print(" ") attrVolume = [] attrName = [] confidence = [] # get the desired degrees of granularity try: granularity = granularityDimensions["Volume"].split(",") globalVol = True if "global" in granularity else False attributeVol = True if "attribute" in granularity else False valueVol = True if "value" in granularity else False if granularity[0] == "": globalVol = True except Exception: globalVol = True attributeVol = False valueVol = False if globalVol: print("-Global Volume") rowCount = document.count() # calculate volume as the fraction between the number of rows of the # file after the filters and the requirements and the total available # rows before the filtering qualityVolume = min(1.0, rowCount / float(totVolume)) attrName.append("Volume") attrVolume.append(qualityVolume) confidence.append(performanceSample) attrName.append("Volume(TotalRows)") attrVolume.append(rowCount) confidence.append(performanceSample) if valueVol: for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index(k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes)) stringAttribute = '_'.join(stringSplitIndex) try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): continue except Exception: pass print("-Key Attribute = " + stringAttribute) keyValueDocument = newDocument.map(lambda x: (x, 1)).reduceByKey( lambda x, y: x + y).map(lambda x: (x[0], x[1], x[1] / float(totVolume))) print( "--Return the count of rows per Attribute's Value: -> ( Attribute's Value, Count, VolumeValue )") # print(keyValueDocument.take(4)) save_json(keyValueDocument, [ "Value", "Count", "VolumeValue"], resultFolder + "/volume_values/" + stringAttribute) return attrName, attrVolume, confidence def distinctness(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the dimension of Distinctness for each attribute, save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Distinctness & Population") print(" ") attrDistinctPop = [] attrName = [] confidence = [] finalDistinctRdd = sc.emptyRDD() finalPopulationRdd = sc.emptyRDD() if populationAnalyzer: # get the desired degrees of granularity try: granularity = granularityDimensions[ "Completeness_Population"].split(",") globalPop = True if "global" in granularity else False attributePop = True if "attribute" in granularity else False valuePop = True if "value" in granularity else False if granularity[0] == "": globalPop = True except Exception: globalPop = True attributePop = False valuePop = False else: globalPop = False attributePop = False valuePop = False if distinctnessAnalyzer: # get the desired degrees of granularity try: granularity = granularityDimensions["Distinctness"].split(",") globalDistinct = True if "global" in granularity else False attributeDistinct = True if "attribute" in granularity else False valueDistinct = True if "value" in granularity else False if granularity[0] == "": globalDistinct = True except Exception: globalDistinct = True attributeDistinct = False valueDistinct = False else: globalDistinct = False attributeDistinct = False valueDistinct = False # Analysis rowCount = document.count() print(rowCount) if globalDistinct: # print(document.take(100)) distinctDocument = document.map(lambda x: (tuple(x), 1)).reduceByKey( lambda x, y: x + y).map(lambda x: float(x[1]) / 2).filter(lambda x: x > 1) # print(distinctDocument.take(400)) # print(distinctDocument.count()) duplicateDocumentLines = distinctDocument.sum() print("Duplicate Lines") print(duplicateDocumentLines) qualityDistinctness = max( 0.0, 1 - (duplicateDocumentLines / float(rowCount))) print("Distinctness " + str(qualityDistinctness)) attrName.append("Distinctness") attrDistinctPop.append(qualityDistinctness) confidence.append(performanceSample) if attributePop or attributeDistinct: previousSource = sqlContext.read.json( inputSource + "/source_distinctness") correctAttributePop = False for stringIndex in columnsKey: # key columns = keyColumns stringSplitIndex = stringIndex.split(",") newIndexes = [desiredColumns.index(k) for k in stringSplitIndex] newDocument = document.map( lambda x: restriction_filter(x, newIndexes)) stringAttribute = '_'.join(stringSplitIndex) try: # it is useless to group by the timestamps if not set(newIndexes).isdisjoint(dimensionColumn["Timeliness"]): continue except Exception: pass print("--Key Attribute = " + stringAttribute) currentCount = newDocument.distinct().count() if attributePop & populationAnalyzer & (len(stringSplitIndex) == 1): correctAttributePop = True previousCompletePopulation = previousSource.filter( previousSource.Attribute == stringSplitIndex[0]).head()["Count"] qualityCompletenessPopulation = min( 1.0, currentCount / float(previousCompletePopulation)) print("--The Completeness Population Value is derived as the fraction between the number of distinct elements in the document and the new global number of elements found after the addition of the new values found in this document = " + str(qualityCompletenessPopulation)) finalPopulationRdd = finalPopulationRdd.union(sc.parallelize( [(stringSplitIndex[0], currentCount, qualityCompletenessPopulation, performanceSample)])) if attributeDistinct & distinctnessAnalyzer: qualityDistinctness = currentCount / float(rowCount) print("--The Distinctness Value is derived as the fraction between the number of distinct elements in the document and the number of rows in the document") finalDistinctRdd = finalDistinctRdd.union(sc.parallelize( [(stringAttribute, currentCount, qualityDistinctness, performanceSample)])) if attributePop & populationAnalyzer & correctAttributePop: print("-Calculate value of Completeness Population per Attribute: --> Attribute, Distinct Count, Completeness Population, Confidence") # print(finalPopulationRdd.take(3)) save_json(finalPopulationRdd, ["Attribute", "DistinctCount", "CompletenessPopulationValue", "Confidence"], resultFolder + "/completeness_population_attributes") if attributeDistinct & distinctnessAnalyzer: print("-Calculate value of Distinctness per Attribute: --> Attribute, Distinct Count, Distinctness, Confidence") # print(finalDistinctRdd.take(3)) save_json(finalDistinctRdd, ["Attribute", "DistinctCount", "Distinctness", "Confidence"], resultFolder + "/distinctness_attributes") return attrName, attrDistinctPop, confidence def consistency_zero_division(line): """ This function returns a new line for each Row of the rdd with the evaluation of the Consistency dimension Args: line(Row): row of the rdd """ try: return (line[0], float(line[1][1]) / line[1][0]) except Exception: return (line[0], 0.0) def multiple_row_filter(line, antecedent, consequent): """ This function remap the rdd in order to match the requested rule Args: line(Row): row of the rdd antecedent(list): list of the indexes of the attributes that are part of the antecedent elements of the rule consequent(list): list of the indexes of the attributes that are part of the consequent elements of the rule """ return (((','.join([line[i] for i in antecedent])), (','.join([line[j] for j in consequent]))), 1) def consistency(sc, sqlContext, document, columns, dataTypes, volatiliy, desiredColumns, resultFolder, dimensionColumn, columnsKey, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample): """ This function calculate the dimension of consistency for each rule, save the results and return the Global values The arguments are all the same for each quality dimension since they are called dynamically in a cycle Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession document(rdd): The rdd of the source. dataTypes(list): List of the types of the attribute based on the position in the rdd volatility(float or string): Parameter necessary for Timeliness and Completeness_Frequency: Number of hours to consider the data still recent desiredColumns(list): Indexes of the attributes requested in the analysis resultFolder(string): Absolute path of the destination of all the saved files dimensionColumn(dict): Dictionary containing the allowed index of the attributes for each available quality dimension columnsKey(list): Indexes of the attributes to consider as a grouping key meanAccuracyValues(list): Parameter necessary for Accuracy: List of the mean values to consider for each columnsKey requested devAccuracyValues(list): Parameter necessary for Accuracy: List of the allowed intervals values to consider for each columnsKey requested based on the previous considered mean values timelinessAnalyzer(boolean): Value set to True if the Timeliness dimension has to be evaluated completenessAnalyzer(boolean): Value set to True if the Completeness_Frequency dimension has to be evaluated distinctnessAnalyzer(boolean): Value set to True if the Distinctness dimension has to be evaluated populationAnalyzer(boolean): Value set to True if the Completeness_Population dimension has to be evaluated inputSource(string): Absolute path of the position of the source folder containing the Profiling information and all the portion of profiled data associationRules(list): Parameter optional for Consistency: List of additional association rules to be considered in the analysis totVolume(int): Number of total rows of the source granularityDimensions(dict): Dictionary containing the requested degrees of granularity for each requested quality dimension performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ print(" ") print("Consistency") print(" ") # get the desired degrees of granularity try: granularity = granularityDimensions["Consistency"].split(",") globalCon = True if "global" in granularity else False attributeCon = True if "attribute" in granularity else False valueCon = True if "value" in granularity else False if granularity[0] == "": globalCon = True except Exception: globalCon = True attributeCon = False valueCon = False finalConsistentRdd = sc.emptyRDD() isRules = False try: rules = sqlContext.read.json(inputSource + "/association_rules") dataframeColumns = rules.columns isRules = True except Exception: print("No pre-existent association rules") try: # add custom rules. 'x1,x2:y1','x1:y1' for customRule in associationRules: newCustomRule = customRule.split(":") print(newCustomRule) print(isRules) if not isRules: rules = sqlContext.createDataFrame([(newCustomRule[0], newCustomRule[1])], [ "Antecedent", "Consequent"]) isRules = True else: newRow = sqlContext.createDataFrame( [(newCustomRule[0], newCustomRule[1])], rules.columns) rules = rules.unionAll(newRow) print("-List of Rules:") # invert dictionary #dictPositionHeader = dict([(v, k) for k, v in dictHeaderPosition.iteritems()]) # rules.show() if attributeCon or valueCon: for row in rules.rdd.collect(): print("-Checking rule: " + str(row[0]) + " -> " + str(row[1])) antecedents = [x.encode("UTF8", "ignore") for x in row[0].split(",")] consequents = [x.encode("UTF8", "ignore") for x in row[1].split(",")] # print(desiredColumns) antecedent = [desiredColumns.index(i) for i in antecedents] consequent = [desiredColumns.index(j) for j in consequents] consistentRdd = document.map( lambda x: multiple_row_filter(x, antecedent, consequent)) print("--Select only the interested columns") # print(consistentRdd.take(2)) consistentRdd = (consistentRdd.reduceByKey(lambda x, y: x + y) .map(lambda x: (x[0][0], x[1])) .combineByKey(lambda x: (x, x), lambda x, value: ( x[0] + value, max(x[1], value)), lambda x, y: (x[0] + y[0], max(x[1], y[1]))) .map(consistency_zero_division) .map(lambda x: (x[0], str(row[0]) + " -> " + str(row[1]), x[1])) ) print("--Count the number of occurrence of both antecedent and consequent in the document, group by the antecedent and from the different partial counts, return the sum and the maximum, finally derives the consistency of the rule the sum of all the previous partial count with the same antecedent and the maximum number of different") # print(consistentRdd.take(5)) if valueCon: save_json(consistentRdd, ["AntecedentValue", "Rule", "ConsistencyValue"], resultFolder + "/consistency_values/" + str(row[0]) + "_" + str(row[1])) if attributeCon: consistentValue = consistentRdd.map(lambda x: x[2]).mean() print("--Mean Consistent Value for current rule = " + str(consistentValue)) partialConsistentRdd = sc.parallelize([(row[0].encode("UTF8", "ignore"), row[ 1].encode("UTF8", "ignore"), consistentValue, performanceSample)]) finalConsistentRdd = finalConsistentRdd.union( partialConsistentRdd) if attributeCon: print("-List of all rules with the Mean Consistency Value: ( Antecedent, Consequent, Consistency Value, Confidence ) ") # calculate final aggregated value for consistency meanConsistencyValue = finalConsistentRdd.map( lambda x: x[2]).mean() # save file into hdfs save_json(finalConsistentRdd, ["RuleAntecedent", "RuleConsequent", "ConsistencyValue", "Confidence"], resultFolder + "/consistency_attributes_mean_per_rule") except Exception: print("-no available association rules") import traceback traceback.print_exc() return [], [], [] def check_association_rules(row, rules, headerPositionList): """ This function check if the row satisfies any of the association consistency rules providede by the user. Args: row(list): the row to check rules(list): list of correct bindings for the parameters heaerPositionList: header position as gathered from the preliminary informations """ result = 0 for i in range(len(rules)): ruleToCheck = rules[i] headerToCheck = headerPositionList[i] listToCheck = [] for index in headerToCheck: listToCheck.append(row[index]) if listToCheck in ruleToCheck: result += 1 return result def association_consistency(document, dictHeaderPosition, desiredColumns, newRuleList): """ This function performs a per tuple check to see if the desired columns satisty the consistency rules provided by the user Args: document(RDD): the document to analyze dictHeaderPosition(dict): preliminary informations on the position of the headers desiredColumns(list): desired columns as provided by the user in the config file newRuleList(list): association consistency rules """ print(" ") print("Updating association consistency") print(" ") consistencyRulesCount = 0.0 invertedDictHeaderPosition = {v: k for k, v in dictHeaderPosition.iteritems()} consistencyRulesList = [] headerPositionList = [] for f in newRuleList: try: rulesDF = sqlContext.read.json(f) header = rulesDF.columns if set(header) <= set(desiredColumns): rulesRDD = rulesDF.rdd rulesRDD = rulesRDD.map( lambda x: [i.encode("UTF8", "ignore") for i in x]) consistencyRulesList.append(rulesRDD.collect()) headerPositionList.append( [int(invertedDictHeaderPosition[i]) for i in header]) consistencyRulesCount += 1 except Exception as e: print("Cannot open " + f + " file") # print("######Consistency rules#######") # print(consistencyRulesList) if (consistencyRulesCount == 0): return document oldAssociationConsistencyPosition = invertedDictHeaderPosition[ 'ASSOCIATION_CONSISTENCY'] document = document.map(lambda x: x[0:oldAssociationConsistencyPosition] + [str(check_association_rules( x, consistencyRulesList, headerPositionList) / consistencyRulesCount)] + x[oldAssociationConsistencyPosition + 1:]) return document def main(sc, sqlContext, configuration_path, performanceSample): """ $$$$$ Main Program $$$$$ Args: sc: SparkContext of the sparkSession sqlContext: sqlContext of the sparkSession configuration_path(string): Absolute path of the location of the configuration file for the analysis performanceSample(float): Number from 0+ to 1 representing the portion of data that has been considered in the analysis """ """ IMPORT Step """ # load the data configuration_file = sc.textFile(configuration_path).zipWithIndex() # Extract Input Files inputSource = configuration_file.filter(lambda x: x[1] == 0).map( lambda x: x[0]).collect()[0].encode("UTF8", "ignore") inputSource = inputSource.split(';') inputFolderList = [] if(len(inputSource) > 1 and inputSource[1] != '*'): inputFolderList = inputSource[1:] inputSource = inputSource[0] print(inputSource) print(inputFolderList) # Extract Input Files resultFolder = configuration_file.filter(lambda x: x[1] == 1).map( lambda x: x[0]).collect()[0].encode("UTF8", "ignore") tsForOutputFolder = datetime.datetime.fromtimestamp( time.time()).strftime('_%Y%m%d-%H%M%S') resultFolder = resultFolder + tsForOutputFolder resultFolder = resultFolder + "_confidence_" + str(performanceSample) print(resultFolder) # Extract Source Quality Requirement sourceDesired = int(configuration_file.filter( lambda x: x[1] == 12).map(lambda x: x[0]).collect()[0]) print(sourceDesired) if sourceDesired: sourceQuality = sqlContext.read.json(inputSource + "/source_quality") save_json(sourceQuality, None, resultFolder + "/source_quality") return 1 # Extract Desired Columns desiredColumns = configuration_file.filter( lambda x: x[1] == 2).map(lambda x: x[0]).collect()[0].split(";") desiredColumns = [x.encode("UTF8", "ignore") for x in desiredColumns] print(desiredColumns) # Extract Key Columns keyColumns = configuration_file.filter(lambda x: x[1] == 3).map( lambda x: x[0]).collect()[0].split(";") keyColumns = [x.encode("UTF8", "ignore") for x in keyColumns] print(keyColumns) # Extract Desired Intervals desiredIntervalsDim = configuration_file.filter( lambda x: x[1] == 4).map(lambda x: x[0]).collect()[0].split(";") desiredIntervalsDim = [x.encode("UTF8", "ignore") for x in desiredIntervalsDim] print(desiredIntervalsDim) # Extract Desired Elements desiredElementsDim = configuration_file.filter( lambda x: x[1] == 5).map(lambda x: x[0]).collect()[0].split(";") desiredElementsDim = [x.encode("UTF8", "ignore") for x in desiredElementsDim] print(desiredElementsDim) # Extract Desired Dimensions desiredDimensions = configuration_file.filter( lambda x: x[1] == 6).map(lambda x: x[0]).collect()[0].split(";") desiredDimensions = [x.encode("UTF8", "ignore") for x in desiredDimensions] forceConsistencyRefresh = False if "Consistency-R" in desiredDimensions: print("-Forcing update of association consistency") forceConsistencyRefresh = True i = desiredDimensions.index("Consistency-R") desiredDimensions[i] = "Consistency" print(desiredDimensions) # Extract Validity for Timeliness volatilities = configuration_file.filter(lambda x: x[1] == 7).map( lambda x: x[0]).collect()[0].split(";") volatilities = [x.encode("UTF8", "ignore") for x in volatilities] print(volatilities) # Extract mean Value for Accuracy meanAccuracyValues = configuration_file.filter( lambda x: x[1] == 8).map(lambda x: x[0]).collect()[0].split(";") meanAccuracyValues = [x.encode("UTF8", "ignore") for x in meanAccuracyValues] print(meanAccuracyValues) # Extract maximum deviation for Accuracy devAccuracyValues = configuration_file.filter( lambda x: x[1] == 9).map(lambda x: x[0]).collect()[0].split(";") devAccuracyValues = [x.encode("UTF8", "ignore") for x in devAccuracyValues] print(devAccuracyValues) # Extract desider association rules associationRules = configuration_file.filter( lambda x: x[1] == 10).map(lambda x: x[0]).collect()[0].split(";") associationRules = [x.encode("UTF8", "ignore") for x in associationRules] print(associationRules) # Extract granularities granularityDim = configuration_file.filter( lambda x: x[1] == 11).map(lambda x: x[0]).collect()[0].split(";") granularityDim = [x.encode("UTF8", "ignore") for x in granularityDim] print(granularityDim) # Extract association rule files newRuleList = configuration_file.filter(lambda x: x[1] == 13).map( lambda x: x[0]).collect()[0].split(";") newRuleList = [x.encode("UTF8", "ignore") for x in newRuleList] print(newRuleList) """ Union and Structuration """ totStart = time.time() start = time.time() # initialize the final quality dictionary quality = {} finalConfidence = [] finalQuality = sqlContext.createDataFrame([{"_test_": "test"}]) """ try to get the preliminary information """ jparsing = False try: preliminaryInformation = sqlContext.read.json( inputSource + "/preliminary_information").head() except Exception as e: print("Missing Preliminary Information") print("Basic Information Retrieval...") try: volatiliy = float(preliminaryInformation.volatiliy) print(volatiliy) except Exception: volatiliy = 17520.0 for i in range(len(volatilities)): if volatilities[i] == "": volatilities[i] = volatiliy header = preliminaryInformation.header header = [x.encode("UTF8", "ignore") for x in header] print("-Header Getted..") print(header) xmlRegex = preliminaryInformation.regex.encode("UTF8", "ignore") print(xmlRegex) if xmlRegex == "": jparsing = True dataTypes = preliminaryInformation.datatypes dataTypes = [x.encode("UTF8", "ignore") for x in dataTypes] print("-Data Types Getted..") print(dataTypes) dimensionColumn = preliminaryInformation.dimensionAttributePosition.asDict() print(dimensionColumn) dictHeaderNames = preliminaryInformation.dimensionAttributeName.asDict() dictHeaderNames = dict([(k, [j.encode("UTF8", "ignore") for j in v]) for k, v in dictHeaderNames.iteritems()]) print("-Dimension applyable with column's names Getted..") print(dictHeaderNames) dictHeaderPosition = preliminaryInformation.attributePosition.asDict() dictHeaderPosition = dict([(int(k), v.encode("UTF8", "ignore")) for k, v in dictHeaderPosition.iteritems()]) ready = True """ DATA structuration """ # search the corrected analysed data in the source path requestedNames = [] try: analysedNames = sqlContext.read.json( inputSource + "/source_analysed_updates") analysedName = analysedNames.filter( analysedNames.correctAnalysis == 1).rdd.map(lambda x: (x.fileName)).collect() if(len(inputFolderList) >= 1): for fName in analysedName: if fName in inputFolderList: requestedNames.append(fName) else: requestedNames = list(analysedName) inputPaths = [(inputSource + "/updates/" + fName + ".txt").encode("UTF8", "ignore") for fName in requestedNames] inputPaths = ','.join(inputPaths) print("--Profiled files " + str(requestedNames)) except Exception: print("There are no files in the source") return 0 if jparsing: fields = [StructField(field_name, StringType(), True) for field_name in preliminaryInformation.header] print(fields) schema = StructType(fields) requestedPaths = [inputSource + "/extended_dataset/" + f for f in requestedNames] try: documentDF = sqlContext.read.schema(schema).json(requestedPaths) except Exception: print("Input file not found, run profiling again") return 0 print(documentDF.count()) print(documentDF.columns) else: try: document = sc.textFile(inputSource + "/updates/*.txt") except Exception: print("File not found, check the input path and try again") return 0 print(" ") print("Conversion..") # conversion document = document.map(lambda x: x.encode("UTF8", "ignore")) # quote removal document = document.map(quote_removal) # remove null line document = document.filter(lambda line: len(line) > 0) # remove escapes from timestamps document = document.map(escape_removal) document = document.map(comma_to_dot_number_conversion) # parse the document to extract the list of elements document = document.map(lambda x: regular_parsing(x, xmlRegex)) print("Extract All the elements following the regular expression..") # print(document.take(5)) end = time.time() timeRdd = sc.parallelize([("Structuration", (end - start))]) print("- Structuration Elapsed Time: " + str(end - start) + " seconds") start = time.time() """ Confidence Selector """ # choose the sample based on the performance print("Fraction considered: " + str(performanceSample)) """ DATA Selection """ # associate desiredDimensions with possible columns dictPositionHeader = dict([(v, k) for k, v in dictHeaderPosition.iteritems()]) print(dictPositionHeader) desiredNum = [dictPositionHeader[i] for i in desiredColumns] print(desiredNum) dimensionColumn = dict((k, dimensionColumn.pop(k, None)) for k in desiredDimensions) print(dimensionColumn) for k in dimensionColumn.iterkeys(): print(k) newList = [desiredNum.index(i) for i in dimensionColumn[ k] if i in desiredNum] dimensionColumn[k] = newList print(dimensionColumn) # dataTypes dataTypes = [dataTypes[i] for i in desiredNum] # set values for volatiliy: volatilities # set the mean and dev values for accuracy: meanAccuracy, devAccuracy """ Column Selection """ if not jparsing: filteredDocumentDF = document.toDF(header).select(desiredColumns) else: filteredDocumentDF = documentDF.select(desiredColumns) # documentDF.show(5) totVolume = filteredDocumentDF.count() print("Total Volume = " + str(totVolume)) #documentDF = documentDF.sample(False,0.0002) #totVolume = documentDF.count() #print("Total Volume = " + str(totVolume)) try: """ Intervals Selection """ newDocumentDF = sqlContext.createDataFrame( sc.emptyRDD(), filteredDocumentDF.schema) dictDesiredPosition = dict([(v, k) for k, v in enumerate(desiredColumns)]) for colo in desiredColumns: # newDocumentDF.show(5) print(colo) selectedIntervals = desiredIntervalsDim[ dictDesiredPosition[colo]].split(",") if selectedIntervals[0] != "": print("Deleting Null") filteredDocumentDF = filteredDocumentDF.dropna( how="any", subset=[colo]) for interval in selectedIntervals: extremes = interval.split(":") print(extremes) if (extremes[0] == '') & (len(extremes) == 1): continue else: if colo in dictHeaderNames["Timeliness"]: convertFormat = "%d/%m/%Y %H.%M.%S" stringFormat = dataTypes[dictDesiredPosition[colo]] timeFunc = udf(lambda x: datetime.datetime.strptime( x, stringFormat), TimestampType()) filteredDocumentDF = filteredDocumentDF.withColumn( "Time" + str(colo), timeFunc(col(colo))) if (extremes[0] != '') & (extremes[1] != ''): print("both") leftTimestamp = datetime.datetime.strptime( extremes[0], convertFormat) rightTimestamp = datetime.datetime.strptime( extremes[1], convertFormat) # print(documentDF.show(5)) splitDocumentDF = filteredDocumentDF.filter( (col("Time" + str(colo)) >= leftTimestamp) & (col("Time" + str(colo)) <= rightTimestamp)) # splitDocumentDF.show(5) else: if extremes[0] != '': print("left") #documentDF = documentDF.filter("" + str(colo) + ">=" + str(extremes[0])) leftTimestamp = datetime.datetime.strptime( extremes[0], convertFormat) splitDocumentDF = filteredDocumentDF.filter( col("Time" + str(colo)) >= leftTimestamp) else: print("right") rightTimestamp = datetime.datetime.strptime( extremes[1], convertFormat) splitDocumentDF = filteredDocumentDF.filter( col("Time" + str(colo)) <= rightTimestamp) splitDocumentDF = splitDocumentDF.drop( "Time" + str(colo)) else: if (extremes[0] != '') & (extremes[1] != ''): print("both") splitDocumentDF = filteredDocumentDF.filter((col(colo).cast("string") >= str( extremes[0])) & (col(colo).cast("string") <= str(extremes[1]))) else: if extremes[0] != '': print("left") #documentDF = documentDF.filter("" + str(colo) + ">=" + str(extremes[0])) splitDocumentDF = filteredDocumentDF.filter( col(colo).cast("string") >= str(extremes[0])) else: print("right") splitDocumentDF = filteredDocumentDF.filter( col(colo).cast("string") <= str(extremes[1])) # print(splitDocumentDF.show()) print("end interval") newDocumentDF = newDocumentDF.unionAll(splitDocumentDF) # newDocumentDF.show(5) print("end column") if not newDocumentDF.rdd.isEmpty(): filteredDocumentDF = newDocumentDF # documentDF.show() """ Values Selection """ for colo in desiredColumns: colValues = desiredElementsDim[ dictDesiredPosition[colo]].split(",") print(colo) print(colValues) if colValues[0] == '': continue else: filteredDocumentDF = filteredDocumentDF.dropna( how="any", subset=[colo]) #filterDict = map(lambda x: (colo, x), colValues) # print(filterDict) #filterValues = sqlContext.createDataFrame(filterDict,["partial",colo]).drop("partial") # filterValues.show() #documentDF = documentDF.join(filterValues,colo,'inner') # documentDF.show() filteredDocumentDF = filteredDocumentDF.filter( col(colo).isin(colValues)) # documentDF.show() # documentDF.show() except Exception: print("the file to analyse is empty after the selection module, or there is a problem with it") traceback.print_exc() return 0 """ Desired Degrees of Granularity and requirements """ granularityDimensions = {} for i in range(len(desiredDimensions)): granularityDimensions[desiredDimensions[i]] = granularityDim[i] """ Dimension Variable Settings ( if the dimensions that are calculated together are not both requested ) """ completenessAnalyzer = False timelinessAnalyzer = False distinctnessAnalyzer = False populationAnalyzer = False # set timeliness and completeness_frequency if "Completeness_Frequency" in dimensionColumn.keys(): completenessAnalyzer = True if "Timeliness" in dimensionColumn: timelinessAnalyzer = True else: timelinessAnalyzer = False dimensionColumn["Timeliness"] = dimensionColumn[ "Completeness_Frequency"] del dimensionColumn["Completeness_Frequency"] else: completenessAnalyzer = False if "Timeliness" in dimensionColumn.keys(): timelinessAnalyzer = True else: timelinessAnalyzer = False print(completenessAnalyzer) print(timelinessAnalyzer) # set timeliness and completeness_frequency if "Completeness_Population" in dimensionColumn.keys(): populationAnalyzer = True if "Distinctness" in dimensionColumn: distinctnessAnalyzer = True else: distinctnessAnalyzer = False dimensionColumn["Distinctness"] = dimensionColumn[ "Completeness_Population"] del dimensionColumn["Completeness_Population"] else: populationAnalyzer = False if "Distinctness" in dimensionColumn.keys(): distinctnessAnalyzer = True else: distinctnessAnalyzer = False print(populationAnalyzer) print(distinctnessAnalyzer) """ Document Sampling """ if performanceSample < 1: filteredDocumentDF = filteredDocumentDF.sample( False, performanceSample) end = time.time() print("- Selection Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize([("Selection", (end - start))])) """ Document Missing Dimension """ # convert to Rdd notNullDocumentDF = filteredDocumentDF.dropna() document = filteredDocumentDF.fillna("null").rdd.map( lambda x: [i.encode("UTF8", "ignore") for i in x]) print("Completeness_Missing Analysis") # completeness_missing if "Completeness_Missing" in dimensionColumn.keys(): try: start = time.time() qualityName, partialQuality, confidence = completeness_missing( sc, sqlContext, document, dimensionColumn["Completeness_Missing"], dataTypes, volatilities, desiredColumns, resultFolder, dimensionColumn, keyColumns, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample) end = time.time() print("- Completeness_Missing Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize( [("Completeness_Missing", (end - start))])) # save final quality for j in range(len(qualityName)): quality[qualityName[j]] = partialQuality[j] finalQuality = finalQuality.withColumn( qualityName[j], lit(partialQuality[j])) finalConfidence.append(confidence[j]) except Exception: print("Error in dimension Completeness_Missing, solve the problem, delete the partial results and run the analysis again") traceback.print_exc() document = notNullDocumentDF.rdd.map( lambda x: [i.encode("UTF8", "ignore") for i in x]) # print(document.take(4)) """ DATA ANALYSIS """ start = time.time() dimensionName = {'Accuracy': accuracy, 'Precision': precision, 'Completeness_Missing': completeness_missing, 'Distinctness': distinctness, 'Consistency': consistency, 'Timeliness': timeliness, 'Volume': volume} print("Starting Other Dimensions Analysis") print(dimensionColumn) for i in dimensionColumn.iterkeys(): if i in ["Completeness_Frequency", "Completeness_Population", "Completeness_Missing"]: continue try: func = dimensionName[i] start = time.time() if(func == timeliness): completeDocumentRdd, qualityName, partialQuality, confidence = func( sc, sqlContext, document, documentDF, dimensionColumn[i], dataTypes, volatilities, desiredColumns, resultFolder, dimensionColumn, keyColumns, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample, dictHeaderPosition) else: qualityName, partialQuality, confidence = func( sc, sqlContext, document, dimensionColumn[i], dataTypes, volatilities, desiredColumns, resultFolder, dimensionColumn, keyColumns, meanAccuracyValues, devAccuracyValues, timelinessAnalyzer, completenessAnalyzer, distinctnessAnalyzer, populationAnalyzer, inputSource, associationRules, totVolume, granularityDimensions, performanceSample) end = time.time() # save times if i == "Timeliness": if timelinessAnalyzer & completenessAnalyzer: print("- " + str(i) + " + Completeness_Frequency " + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize( [(str(i) + "Completeness_Frequency", (end - start))])) else: if completenessAnalyzer: print("Completeness_Frequency " + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize( [("Completeness_Frequency", (end - start))])) else: print(str(i) + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union( sc.parallelize([(str(i), (end - start))])) # save times if i == "Distinctness": if distinctnessAnalyzer & populationAnalyzer: print("- " + str(i) + " + Completeness_Population " + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize( [(str(i) + "Completeness_Population", (end - start))])) else: if populationAnalyzer: print("Completeness_Population " + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union(sc.parallelize( [("Completeness_Population", (end - start))])) else: print(str(i) + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union( sc.parallelize([(str(i), (end - start))])) else: print("-" + str(i) + " Elapsed Time: " + str(end - start) + " seconds") timeRdd = timeRdd.union( sc.parallelize([(str(i), (end - start))])) # save final quality for j in range(len(qualityName)): quality[qualityName[j]] = partialQuality[j] finalQuality = finalQuality.withColumn( qualityName[j], lit(partialQuality[j])) finalConfidence.append(confidence[j]) except Exception: print("Error in dimension " + str(i) + " , solve the problem, delete the partial results and run the analysis again") traceback.print_exc() if "Consistency" in dimensionColumn.iterkeys() and "tuple" in granularityDimensions["Consistency"].split(","): # ASSOCIATION CONSISTENCY UPDATE print("\n\nAssociation consistency\n\n") updateAssociationConsistency = True oldRulesList = preliminaryInformation.consistencyRuleFiles print("## Old rules list ") print(oldRulesList) print("## New rules list") print(newRuleList) if set(oldRulesList) == set(newRuleList) and not forceConsistencyRefresh: updateAssociationConsistency = False print("## Old association consistency still valid, not updating") if updateAssociationConsistency: completeDocumentRdd = association_consistency( completeDocumentRdd, dictHeaderPosition, desiredColumns, newRuleList) # adding the confidence try: conf = sum(finalConfidence) / float(len(finalConfidence)) except Exception: conf = "Error" finalQuality = finalQuality.withColumn("Confidence", lit(conf)) finalQuality = finalQuality.drop("_test_") print(" ") print("final Quality") finalQuality.show() save_json(finalQuality, None, resultFolder + "/final_quality") flattenedList = [i for k in granularityDimensions.iterkeys() for i in granularityDimensions[k].split(",")] print(flattenedList) if "tuple" in flattenedList: print("-Saving updated tuple to final subfolder") save_json(completeDocumentRdd, list( dictHeaderPosition.values()), resultFolder + "/extended_dataset") totEnd = time.time() print("Total Time Elapsed: " + str(totEnd - totStart)) def toCSVLine(data): return ','.join(str(d) for d in data) lines = timeRdd.map(toCSVLine) save_txt(lines, resultFolder + "/Times") return 1 """ Entry Point """ if __name__ == "__main__": # reload(sys) # sys.setdefaultencoding('utf-8') # args = sys.argv[1:] # get configuration path and performance sample # configuration_path = args[0] # performanceSample = float(args[1]) # create spark variables conf = SparkConf().setAppName("EISTI.DQAssessment") \ .setMaster("spark://172.24.0.2:7077") sc = SparkContext(conf=conf) sqlContext = SQLContext.getOrCreate(sc) # call the main function # correctAnalysis = main( # sc, sqlContext, configuration_path, performanceSample) # print(correctAnalysis) sc.stop()
python
"""Tests for calculator.py.""" __author__ = 'Boris Polyanskiy' import unittest from party_calc.calculator import Person, PartyCalculator class TestPerson(unittest.TestCase): def test_init(self) -> None: person = Person('mr.White') self.assertEqual(person.name, 'mr.White') self.assertEqual(person.balance, 0.0) person = Person('mr.Green', 20.0) self.assertEqual(person.name, 'mr.Green') self.assertEqual(person.balance, 20.0) def test_calculate_payment(self) -> None: person = Person('mr.White', 50.0) self.assertEqual(person.calculate_payment(50.0), 0.0) self.assertEqual(person.calculate_payment(50), 0.0) self.assertEqual(person.calculate_payment(100.0), 50.0) self.assertEqual(person.calculate_payment(10.0), -40.0) person.balance = 50 self.assertEqual(person.calculate_payment(50), 0.0) self.assertIsInstance(person.calculate_payment(50), float) class TestPaymentCalculator(unittest.TestCase): def setUp(self) -> None: self.calc = PartyCalculator() def test_init(self) -> None: self.assertEqual(len(self.calc.persons), 0) self.assertEqual(self.calc.each_pay, 0.0) def test_select_name(self) -> None: self.calc.add_person() self.calc.add_person() self.assertEqual(['person_01', 'person_02'], self.calc.get_names()) self.calc.add_person('person_03') self.calc.add_person() self.assertIn('person_04', self.calc.get_names()) self.calc.delete_person('person_02') self.assertNotIn('person_02', self.calc.get_names()) self.calc.add_person() self.assertIn('person_02', self.calc.get_names()) self.calc.change_person_name('person_02', 'person_2') self.calc.add_person() self.assertIn('person_02', self.calc.get_names()) self.assertIn('person_2', self.calc.get_names()) def test_is_person_exists(self): self.assertFalse(self.calc.is_person_exists('test2')) self.calc.add_person('test1') self.assertTrue(self.calc.is_person_exists('test1')) self.assertFalse(self.calc.is_person_exists('test2')) def test_add_person(self) -> None: self.calc.add_person('test1') self.calc.add_person('test2', 15.0) self.assertEqual(self.calc.persons[0].name, 'test1') self.assertEqual(self.calc.persons[0].balance, 0.0) self.assertEqual(self.calc.persons[1].name, 'test2') self.assertEqual(self.calc.persons[1].balance, 15.0) self.assertRaises(ValueError, self.calc.add_person, 'test1') self.assertRaises(ValueError, self.calc.add_person, 'test3', 'lalala') self.assertEqual(self.calc.persons[-1].name, 'test2') self.calc.add_person('test4', 50) self.assertIsInstance(self.calc.persons[-1].balance, float) self.assertEqual(len(self.calc.get_names()), 3) self.calc.add_person() self.calc.add_person() self.assertIn('person_01', self.calc.get_names()) self.assertIn('person_02', self.calc.get_names()) def test_delete_person(self) -> None: self.calc.add_person('test1') self.calc.add_person('test2') self.calc.add_person('test3', 10.0) self.assertRaises(ValueError, self.calc.delete_person, 'test4') self.calc.delete_person('test2') self.assertEqual([person.name for person in self.calc.persons], ['test1', 'test3']) def test_get_names(self) -> None: self.assertEqual(self.calc.get_names(), []) names = ['test1', 'test3', 'test2'] for name in names: self.calc.add_person(name) self.assertEqual(self.calc.get_names(), names) def test_reset(self) -> None: self.calc.add_person('test1', 10) self.assertEqual(len(self.calc.persons), 1) self.calc.reset() self.assertEqual(len(self.calc.persons), 0) def test_get_person_by_name(self) -> None: params = (('test1', 0.0), ('test2', 10.0), ('test3', 50.0)) for param in params: self.calc.add_person(*param) self.assertRaises(ValueError, self.calc._get_person_by_name, 'test4') for name, balance in params: person = self.calc._get_person_by_name(name) self.assertEqual(person.name, name) self.assertEqual(person.balance, balance) def test_set_person_balance(self) -> None: self.calc.add_person('test1') self.calc.add_person('test2', 10.0) self.assertRaises(ValueError, self.calc.set_person_balance, 'test3', 10) self.assertEqual(self.calc.persons[0].balance, 0.0) self.calc.set_person_balance('test1', 5.0) self.assertEqual(self.calc.persons[0].balance, 5.0) self.assertEqual(self.calc.persons[1].balance, 10.0) self.calc.set_person_balance('test2', 7.0) self.assertEqual(self.calc.persons[1].balance, 7.0) def test_change_person_name(self) -> None: self.calc.add_person('test1') self.calc.add_person('test2') self.assertEqual(self.calc.get_names(), ['test1', 'test2']) self.assertRaises(ValueError, self.calc.change_person_name, 'test3', 'test4') self.assertIsNone(self.calc.change_person_name('test2', 'test2')) self.calc.change_person_name('test2', 'test3') self.assertEqual(self.calc.get_names(), ['test1', 'test3']) self.assertRaises(ValueError, self.calc.change_person_name, 'test1', 'test3') def test_get_payments_sum(self) -> None: self.assertEqual(self.calc.get_payments_sum(), 0.0) self.calc.add_person('test1', 10.0) self.calc.add_person('test2', 20.0) self.calc.add_person('test3', 5.0) self.assertEqual(self.calc.get_payments_sum(), 35.0) def test_calculate_payments(self) -> None: self.calc.add_person('test1', 30.0) self.calc.add_person('test2', 5.0) self.calc.add_person('test3', 25.0) for person in self.calc.persons: self.assertEqual(person.need_to_pay, 0.0) self.calc.calculate_payments() self.assertEqual(self.calc.each_pay, 20.0) self.assertEqual(self.calc.persons[0].need_to_pay, -10.0) self.assertEqual(self.calc.persons[1].need_to_pay, 15.0) self.assertEqual(self.calc.persons[2].need_to_pay, -5.0) def test_to_list(self): self.assertFalse(self.calc.to_list()) reference = [('test_2', 15.0), ('test_1', 10.0), ('test_3', 12.0)] for data in reference: self.calc.add_person(*data) self.assertEqual(reference, self.calc.to_list()) if __name__ == "__main__": unittest.main()
python
from django.db import models # Create your models here. class Permission(models.Model): """ 权限表 """ hide_type_choices = ( (1, 'FALSE'), (2, 'TRUE'), ) route_url = models.CharField(max_length=64, verbose_name="路由路径", unique=True) route_name = models.CharField(max_length=32, verbose_name="路由名称") route_title = models.CharField(max_length=32, verbose_name="路由标题") route_hide = models.IntegerField(choices=hide_type_choices) parents_id = models.IntegerField() create_time = models.DateTimeField(verbose_name='创建时间',auto_now_add=True,blank=True,null=True)#添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间',auto_now=True,null=True,blank=True)#无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.route_title class Role(models.Model): """ 角色表 """ title = models.CharField(verbose_name='角色名称',max_length=32,unique=True) # 自建第三张表参数through='表名字',througth_fields=('列名','xx') r_desc = models.CharField(verbose_name='角色描述',max_length=64,blank=True) permissions = models.ManyToManyField(verbose_name='拥有的所有权限',to='Permission',blank=True) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True,null=True,blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True,null=True,blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.title class UserInfo(models.Model): """ 用户表 """ gender_type_choice = ( (0,"男"), (1,"女"), ) real_name = models.CharField(verbose_name='昵称',max_length=32,default="") username = models.CharField(verbose_name='用户名',max_length=32,unique=True) password = models.CharField(verbose_name='密码',max_length=64) nick = models.CharField(max_length=64,verbose_name="昵称",blank=True,null=True) gender = models.IntegerField(choices=gender_type_choice,blank=True,null=True) birthday = models.CharField(max_length=32,verbose_name="生日",null=True,blank=True) city = models.CharField(max_length=32,verbose_name="现居城市",null=True,blank=True) avatar = models.TextField(verbose_name="头像地址",null=True,blank=True) phone = models.IntegerField(verbose_name="手机号",null=True,blank=True) vertify = models.IntegerField(verbose_name="验证码",null=True,blank=True) roles = models.ManyToManyField(verbose_name='拥有所以的角色',to='Role',blank=True) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True,null=True,blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True,null=True,blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.real_name #============================================================================================================== class InvitationType(models.Model): """ 帖子分类表 """ type_title = models.CharField(max_length=32,unique=True,verbose_name="分类名称") type_desc = models.CharField(max_length=128,verbose_name="分类描述") status = models.CharField(max_length=64,verbose_name="分类状态") create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.type_title class Invitation(models.Model): """ 帖子表 """ type_choices = ( (0, "是"), (1, "否"), ) invitation_title = models.CharField(max_length=64,unique=True,verbose_name="帖子标题") invitation_content = models.TextField(verbose_name="文字内容") invitation_type = models.CharField(max_length=32,verbose_name="帖子类型") invitation_label = models.CharField(max_length=64,verbose_name="帖子标签") page_views = models.PositiveIntegerField(verbose_name="浏览量") user_id = models.IntegerField(verbose_name="创建人id") top = models.IntegerField(choices=type_choices,verbose_name="是否置顶") cream = models.IntegerField(choices=type_choices,verbose_name="是否精华") recommend = models.IntegerField(choices=type_choices,verbose_name="是否推荐") create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.invitation_title class InvitationImg(models.Model): """ 帖子图片表 """ img_url = models.TextField(verbose_name="图片路径") img_content = models.TextField(verbose_name="图片内容") news = models.ForeignKey(verbose_name="帖子id",to="Invitation",on_delete=models.CASCADE) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.img_content class Comment(models.Model): """ 评论表 """ news = models.ForeignKey(verbose_name="帖子id",to="Invitation",on_delete=models.CASCADE) comment_content = models.CharField(max_length=255,verbose_name="回复内容") user_id = models.ForeignKey(verbose_name="评论者id",to="UserInfo",on_delete=models.CASCADE) reply = models.ForeignKey(verbose_name="回复",to="self",null=True,blank=True,on_delete=models.CASCADE,related_name="replys") depth = models.PositiveIntegerField(verbose_name="评论层级",default=1) root = models.ForeignKey(verbose_name="根评论",to="self",null=True,blank=True,on_delete=models.CASCADE,related_name="roots") # favor_count = models.PositiveIntegerField(verbose_name="赞数",default=0) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.comment_content class Notice(models.Model): """ 聊天室公告表 """ notice_content = models.TextField(verbose_name="公告内容") status = models.CharField(max_length=64) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.notice_content class ForbiddenLexicon(models.Model): """ 违禁词库 """ name = models.CharField(max_length=64,verbose_name="违禁词名称",unique=True) create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.name class Advertisement(models.Model): """ 广告表 """ advertising_space = models.CharField(max_length=64,verbose_name="广告位") advertising_img = models.TextField(verbose_name="广告图片") advertising_url = models.TextField(verbose_name="广告链接") enable_time = models.DateField(verbose_name="启用时间") end_time = models.DateField(verbose_name="结束时间") status = models.CharField(max_length=64,verbose_name="广告状态") flow = models.PositiveIntegerField(verbose_name="广告流量") create_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 upload_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.advertising_space class SystemNotice(models.Model): """ 系统通知表 """ sn_content = models.TextField(verbose_name="通知内容") not_range = models.TextField(verbose_name="通知范围") status = models.CharField(max_length=64,verbose_name="状态") start_time = models.DateTimeField(verbose_name='创建时间', auto_now_add=True, null=True, blank=True) # 添加时的时间,更新对象时不会有变动。 end_time = models.DateTimeField(verbose_name='更新时间', auto_now=True, null=True, blank=True) # 无论是你添加还是修改对象,时间为你添加或者修改的时间。 def __str__(self): return self.sn_content
python
from flask import Flask from config import app_config from app.database import create_tables from app.api.views.user_endpoints import user from app.api.views.meetup_endpoints import meetup from app.api.utils.errors import err, bad_request, internal_server_error, not_found, method_not_allowed def create_app(config_name): """Function to create the flask app""" app = Flask(__name__, instance_relative_config=True) app.config.from_object(app_config.get(config_name)) app.register_blueprint(err) app.register_blueprint(user) app.register_blueprint(meetup) app.register_error_handler(404, not_found) app.register_error_handler(400, bad_request) app.register_error_handler(405, method_not_allowed) app.register_error_handler(500, internal_server_error) create_tables() return app
python
import unittest from camera_trap_classifier.data.importer import DatasetImporter class ImportFromCSVSingleImageTester(unittest.TestCase): """ Test Import from CSV """ def setUp(self): path = './test/test_files/dataset_single_image_multi_species.csv' source_type = 'csv' params = {'path': path, 'image_path_col_list': 'image', 'capture_id_col': 'capture_id', 'attributes_col_list': ['species', 'count', 'standing']} self.importer = DatasetImporter().create( source_type, params) self.data = self.importer.import_from_source() def testNormalCase(self): self.assertEqual(self.data["ele_1_0"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}], 'images': ["/path/capture_ele.jpg"]}) self.assertEqual(self.data["zebra_2_1"], {'labels': [{'species': 'Zebra', 'count': '2', 'standing': '1'}], 'images': ["/path/capture_zebra.jpg"]}) def testCountCategoryImporters(self): self.assertEqual(self.data["wild_1050_0"], {'labels': [{'species': 'Wildebeest', 'count': '10-50', 'standing': '0'}], 'images': ["/path/capture_wilde.jpg"]}) self.assertEqual(self.data["wild_50+_1"], {'labels': [{'species': 'Wildebeest', 'count': '50+', 'standing': '1'}], 'images': ["/path/capture_wilde2.jpg"]}) def testMultiSpeciesCase(self): self.assertEqual(self.data["ele_lion"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}, {'species': 'Lion', 'count': '2', 'standing': '1'}], 'images': ["/path/capture_ele_lion.jpg"]}) def testMissingImage(self): """ Test Removal of Records with missing fields """ self.assertNotIn('no_image', self.data) def testConvertMissingLabels(self): """ Test Removal of Records with missing fields """ self.assertEqual(self.data['no_species']['labels'][0]['species'], '-1') self.assertEqual(self.data['no_count']['labels'][0]['count'], '-1') self.assertEqual(self.data['no_standing']['labels'][0]['standing'], '-1') def testInconsistendImage(self): self.assertEqual(self.data["ele_zebra_diff_image"], {'labels': [{'species': 'Elephant', 'count': '2', 'standing': '0'}, {'species': 'Zebra', 'count': '3', 'standing': '0'}], 'images': ["/path/capture_ele_zebra.jpg"]}) class ImportFromCSVMultiImageTester(unittest.TestCase): """ Test Import from CSV """ def setUp(self): path = './test/test_files/dataset_multi_image_multi_species.csv' source_type = 'csv' params = {'path': path, 'image_path_col_list': ['image1', 'image2', 'image3'], 'capture_id_col': 'capture_id', 'attributes_col_list': ['species', 'count', 'standing']} self.importer = DatasetImporter().create( source_type, params) self.data = self.importer.import_from_source() def testNormalCase(self): self.assertEqual(self.data["ele_1_0"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}], 'images': ["/path/capture_ele1.jpg", "/path/capture_ele2.jpg", "/path/capture_ele3.jpg"]}) def testMultiSpeciesCase(self): self.assertEqual(self.data["ele_lion"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}, {'species': 'Lion', 'count': '2', 'standing': '1'}], 'images': ["/path/capture_ele_lion1.jpg", "/path/capture_ele_lion2.jpg", "/path/capture_ele_lion3.jpg"]}) def testNotAllImages(self): self.assertEqual(self.data["only_one_image"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}], 'images': ["/path/capture_ele3.jpg"]}) self.assertEqual(self.data["only_two_images"], {'labels': [{'species': 'Elephant', 'count': '1', 'standing': '0'}], 'images': ["/path/capture_ele1.jpg", "/path/capture_ele2.jpg"]}) if __name__ == '__main__': unittest.main()
python
#! /usr/bin/env python # # Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import re import io from setuptools import find_namespace_packages, setup import pathlib with io.open("dorothy/__init__.py", "rt", encoding="utf8") as f: __version__ = re.search(r'__version__ = "(.*?)"', f.read()).group(1) CWD = pathlib.Path(__file__).parent README = (CWD / "README.md").read_text() setup( name="dorothy", version=__version__, description="Dorothy is a tool to test security monitoring and detection for Okta environments", long_description=README, long_description_content_type="text/markdown", url="https://github.com/elastic/dorothy", author="David French", author_email="[email protected]", maintainer="Elastic", license="Apache License 2.0", classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Security", ], packages=find_namespace_packages(include=["dorothy*"]), include_package_data=True, install_requires=open("requirements.txt", "r").read(), entry_points={ "console_scripts": [ "dorothy=dorothy.main:dorothy_shell", # this registers a command line tool "dorothy" ], }, )
python
import os import pdb from django import forms from django.conf import settings from django.contrib.auth import forms as auth_forms from django.contrib.auth import password_validation from django.core.files.uploadedfile import UploadedFile from django.forms.widgets import FileInput from django.utils import timezone from django.utils.crypto import get_random_string from django.utils.translation import ugettext_lazy from django.db import transaction from project.models import PublishedProject from user.models import AssociatedEmail, User, Profile, CredentialApplication, CloudInformation from user.trainingreport import (find_training_report_url, TrainingCertificateError) from user.widgets import ProfilePhotoInput from user.validators import UsernameValidator, validate_name class AssociatedEmailChoiceForm(forms.Form): """ For letting users choose one of their AssociatedEmails. E.g. primary email, public email, corresponding email """ associated_email = forms.ModelChoiceField(queryset=None, to_field_name='email', label='Email') def __init__(self, user, selection_type, author=None, *args, **kwargs): # Email choices are those belonging to a user super(AssociatedEmailChoiceForm, self).__init__(*args, **kwargs) associated_emails = user.associated_emails.filter(is_verified=True).order_by('-is_primary_email') self.fields['associated_email'].queryset = associated_emails if selection_type == 'primary': self.fields['associated_email'].empty_label = None self.fields['associated_email'].initial = associated_emails.filter( is_primary_email=True).first() elif selection_type == 'public': # This might be None self.fields['associated_email'].initial = associated_emails.filter( is_public=True).first() self.fields['associated_email'].required = False elif selection_type == 'corresponding': self.fields['associated_email'].empty_label = None self.fields['associated_email'].initial = author.corresponding_email class AddEmailForm(forms.ModelForm): """ For adding new associated emails """ class Meta: model = AssociatedEmail fields = ('email',) widgets = { 'email': forms.EmailInput( attrs={'class': 'form-control dropemail'}), } def clean_email(self): """ Check that the email is unique for the user. Make the email lowercase """ data = self.cleaned_data['email'].lower() if AssociatedEmail.objects.filter(email=data).exists(): raise forms.ValidationError( 'The email is already registered') return data class LoginForm(auth_forms.AuthenticationForm): """ Form for logging in. """ username = auth_forms.UsernameField( label='Email or Username', max_length=254, widget=forms.TextInput(attrs={'autofocus': True, 'class': 'form-control', 'placeholder': 'Email or Username'}), ) password = forms.CharField( label='Password', strip=False, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}), ) remember = forms.BooleanField(label='Remember Me', required=False) error_messages = { 'invalid_login': ugettext_lazy( "Please enter a correct username/email and password. Note that the password " "field is case-sensitive." ), 'inactive': ugettext_lazy("This account has not been activated. Please check your " "email for the activation link."), } class UserChangeForm(forms.ModelForm): """A form for updating user objects in the admin interface. Includes all fields on the user, but replaces the password field with the password hash display field. Use the admin interface to change passwords. """ password = auth_forms.ReadOnlyPasswordHashField() class Meta: model = User fields = ('email', 'password', 'is_active', 'is_admin') def clean_password(self): # Regardless of what the user provides, return the initial value. # This is done here, rather than on the field, because the # field does not have access to the initial value return self.initial["password"] class UsernameChangeForm(forms.ModelForm): """ Updating the username filed """ class Meta: model = User fields = ('username',) widgets = { 'username':forms.TextInput(attrs={'class': 'form-control', 'validators':[UsernameValidator]}), } def clean_username(self): "Record the original username in case it is needed" self.old_username = self.instance.username self.old_file_root = self.instance.file_root() if User.objects.filter(username__iexact=self.cleaned_data['username']): raise forms.ValidationError("A user with that username already exists.") return self.cleaned_data['username'].lower() def save(self): """ Change the media file directory name and photo name if any, to match the new username """ new_username = self.cleaned_data['username'] if self.old_username != new_username: with transaction.atomic(): super().save() profile = self.instance.profile if profile.photo: name_components = profile.photo.name.split('/') name_components[1] = new_username profile.photo.name = '/'.join(name_components) profile.save() if os.path.exists(self.old_file_root): os.rename(self.old_file_root, self.instance.file_root()) class SaferImageField(forms.ImageField): """ A field for uploaded image files. This wraps Django's django.forms.fields.ImageField (not to be confused with django.db.models.fields.files.ImageField!) When a file is uploaded, it is required to be a valid JPEG or PNG image file. The filename specified by the client is ignored; the file is renamed to either 'image.png' or 'image.jpg' according to the detected type. The type is enforced both by checking the magic number before passing the file to ImageField.to_python (which invokes PIL.Image.open), and by checking the content type that Pillow reports. Since we check the magic number before calling PIL.Image.open, this means we avoid calling many of the possible image format parsers, which are historically sources of countless security bugs. Note, however, that this does not avoid calling *all* undesired parsers. If one parser fails, then Pillow will try again with the next one in the list. Most of the Pillow parsers will immediately reject files that don't start with an appropriate magic number, but some parsers may not. """ ACCEPT_TYPES = ['image/jpeg', 'image/png'] TYPE_SUFFIX = { 'image/jpeg': '.jpg', 'image/png': '.png', } TYPE_SIGNATURE = { 'image/jpeg': b'\xff\xd8', 'image/png': b'\x89PNG\x0d\x0a\x1a\x0a', } def to_python(self, data): if data in self.empty_values: return None if hasattr(data, 'temporary_file_path'): path = data.temporary_file_path() with open(path, 'rb') as f: signature = f.read(16) else: signature = data.read(16) data.seek(0) for content_type in self.ACCEPT_TYPES: if signature.startswith(self.TYPE_SIGNATURE[content_type]): break else: raise forms.ValidationError('Not a valid JPEG or PNG image file.') result = super().to_python(data) # check that the content type is what we expected if result.content_type != content_type: raise forms.ValidationError('Not a valid JPEG or PNG image file.') # set the name according to the content type result.name = 'image' + self.TYPE_SUFFIX[content_type] return result def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, FileInput): attrs['accept'] = ','.join(self.ACCEPT_TYPES) return attrs class ProfileForm(forms.ModelForm): """ For editing the profile """ photo = SaferImageField(required=False, widget=ProfilePhotoInput( attrs={'template_name': 'user/profile_photo_input.html'})) class Meta: model = Profile fields = ('first_names', 'last_name', 'affiliation', 'location', 'website', 'photo') def clean_photo(self): data = self.cleaned_data['photo'] # Check size if file is being uploaded if data and isinstance(data, UploadedFile): if data.size > Profile.MAX_PHOTO_SIZE: raise forms.ValidationError('Exceeded maximum size: {0}'.format(Profile.MAX_PHOTO_SIZE)) # Save the existing file path in case it needs to be deleted. # After is_valid runs, the instance photo is already updated. if self.instance.photo: self.old_photo_path = self.instance.photo.path return data def save(self): # Delete the old photo if the user is uploading a new photo, and # they already had one (before saving the new photo) if 'photo' in self.changed_data and hasattr(self, 'old_photo_path'): os.remove(self.old_photo_path) super(ProfileForm, self).save() class RegistrationForm(forms.ModelForm): """A form for creating new users. Includes all the required fields, plus a repeated password. """ first_names = forms.CharField(max_length=100, label='First Names', widget=forms.TextInput(attrs={'class': 'form-control'}), validators=[validate_name]) last_name = forms.CharField(max_length=50, label='Last Name', widget=forms.TextInput(attrs={'class': 'form-control'}), validators=[validate_name]) class Meta: model = User fields = ('email','username',) widgets = { 'email': forms.EmailInput(attrs={'class': 'form-control'}), 'username': forms.TextInput(attrs={'class': 'form-control'}), } def clean_username(self): "Record the original username in case it is needed" if User.objects.filter(username__iexact=self.cleaned_data['username']): raise forms.ValidationError("A user with that username already exists.") return self.cleaned_data['username'].lower() def save(self): """ Process the registration form """ if self.errors: return user = super(RegistrationForm, self).save(commit=False) user.email = user.email.lower() with transaction.atomic(): user.save() # Save additional fields in Profile model Profile.objects.create(user=user, first_names=self.cleaned_data['first_names'], last_name=self.cleaned_data['last_name']) return user # Split the credential application forms into multiple forms class PersonalCAF(forms.ModelForm): """ Credential application form personal attributes """ class Meta: model = CredentialApplication fields = ('first_names', 'last_name', 'suffix', 'researcher_category', 'organization_name', 'job_title', 'city', 'state_province', 'zip_code', 'country', 'webpage') help_texts = { 'first_names': """Your first name(s). This can be edited in your profile settings.""", 'last_name': """Your last (family) name. This can be edited in your profile settings.""", 'suffix': """Please leave the suffix blank if your name does not include a suffix like "Jr." or "III". Do not list degrees. Do not put a prefix like "Mr" or "Ms". Do not put "not applicable".""", 'researcher_category': "Your research status.", 'organization_name': """Your employer or primary affiliation. Put "None" if you are an independent researcher.""", 'job_title': """Your job title or position (e.g., student) within your institution or organization.""", 'city': "The city where you live.", 'state_province': "The state or province where you live. (Required for residents of Canada or the US.)", 'zip_code': "The zip code of the city where you live.", 'country': "The country where you live.", 'webpage': """Please include a link to a webpage with your biography or other personal details (ORCID, LinkedIn, Github, etc.).""", 'research_summary': """Brief description of your proposed research. If you will be using the data for a class, please include course name and number in your description.""", } widgets = { 'research_summary': forms.Textarea(attrs={'rows': 3}), 'suffix': forms.TextInput(attrs={'autocomplete': 'off'}), } labels = { 'state_province': 'State/Province', 'first_names': 'First (given) name(s)', 'last_name': 'Last (family) name(s)', 'suffix': 'Suffix, if applicable:', 'job_title': 'Job title or position', 'zip_code': 'ZIP/postal code' } def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.user = user self.profile = user.profile self.fields['first_names'].disabled = True self.fields['last_name'].disabled = True self.initial = {'first_names':self.profile.first_names, 'last_name':self.profile.last_name, 'organization_name':self.profile.affiliation, 'webpage':self.profile.website} class ResearchCAF(forms.ModelForm): """ Credential application form research attributes """ class Meta: model = CredentialApplication fields = ('research_summary',) help_texts = { 'research_summary': """Brief description of your research. If you will be using the data for a class, please include course name and number in your description.""", } widgets = { 'research_summary': forms.Textarea(attrs={'rows': 2}), } labels = { 'research_summary': 'Research Topic' } class TrainingCAF(forms.ModelForm): """ Credential application form training course attributes """ class Meta: model = CredentialApplication fields = ('training_completion_report',) help_texts = { 'training_completion_report': """Do not upload the completion certificate. Upload the completion report from the CITI 'Data or Specimens Only Research' training program which lists all modules completed, with dates and scores. Expired reports will not be accepted.""", } def clean_training_completion_report(self): reportfile = self.cleaned_data['training_completion_report'] if reportfile and isinstance(reportfile, UploadedFile): if reportfile.size > CredentialApplication.MAX_REPORT_SIZE: raise forms.ValidationError( 'Completion report exceeds size limit') return reportfile class ReferenceCAF(forms.ModelForm): """ Credential application form reference attributes """ class Meta: model = CredentialApplication fields = ('reference_category', 'reference_name', 'reference_email', 'reference_organization', 'reference_title') help_texts = { 'reference_category': """Your reference's relationship to you. If you are a student or postdoc, this must be your supervisor. Otherwise, you may list a colleague. Do not list yourself or another student as reference. Remind your reference to respond promptly, as long response times will prevent approval of your application.""", 'reference_name': 'The full name of your reference.', 'reference_email': """The email address of your reference. It is strongly recommended that this be an institutional email address.""", 'reference_organization': """Your reference's employer or primary affiliation.""", 'reference_title': "Your reference's professional title or position." } labels = { 'reference_title': 'Reference job title or position' } def __init__(self, user, *args, **kwargs): """ This form is only for processing post requests. """ super().__init__(*args, **kwargs) self.user = user def clean_reference_name(self): reference_name = self.cleaned_data.get('reference_name') if reference_name: return reference_name.strip() def clean_reference_email(self): reference_email = self.cleaned_data.get('reference_email') if reference_email: if reference_email in self.user.get_emails(): raise forms.ValidationError("""You can not put yourself as a reference.""") else: return reference_email.strip() def clean_reference_title(self): reference_title = self.cleaned_data.get('reference_title') if reference_title: return reference_title.strip() class CredentialApplicationForm(forms.ModelForm): """ Form to apply for PhysioNet credentialling """ class Meta: model = CredentialApplication fields = ( # Personal 'first_names', 'last_name', 'suffix', 'researcher_category', 'organization_name', 'job_title', 'city', 'state_province', 'zip_code', 'country', 'webpage', # Training course 'training_course_name', 'training_completion_date', 'training_completion_report', # Reference 'reference_category', 'reference_name', 'reference_email', 'reference_organization', 'reference_title', # Research area 'research_summary') def __init__(self, user, *args, **kwargs): """ This form is only for processing post requests. """ super().__init__(*args, **kwargs) self.user = user self.profile = user.profile self.fields['first_names'].disabled = True self.fields['last_name'].disabled = True self.initial = {'first_names': self.profile.first_names, 'last_name': self.profile.last_name} def clean(self): data = self.cleaned_data if any(self.errors): return ref_details = [data['reference_category'] is not None, data['reference_name'], data['reference_email'], data['reference_organization'], data['reference_title']] ref_required = data['researcher_category'] in [0, 1, 6, 7] supervisor_required = data['researcher_category'] in [0, 1, 7] state_required = data['country'] in ['US', 'CA'] # Students and postdocs must provide their supervisor as a reference if supervisor_required and data['reference_category'] != 0: raise forms.ValidationError("""If you are a student or postdoc, you must provide your supervisor as a reference.""") # Check the full reference details are provided if appropriate if ref_required and not all(ref_details): raise forms.ValidationError("""A reference is required. Please provide full contact details, including a reference category.""") # if any reference fields are add, all fields must be completed if any(ref_details) and not all(ref_details): raise forms.ValidationError("""Please provide full details for your reference, including the reference category.""") # If applicant is from USA or Canada, the state must be provided if state_required and not data['state_province']: raise forms.ValidationError("Please add your state or province.") if not self.instance and CredentialApplication.objects.filter(user=self.user, status=0): raise forms.ValidationError('Outstanding application exists.') # Check for a recognized CITI verification link. try: reportfile = data['training_completion_report'] self.report_url = find_training_report_url(reportfile) except TrainingCertificateError: raise forms.ValidationError( 'Please upload the "Completion Report" file, ' 'not the "Completion Certificate".') def save(self): credential_application = super().save(commit=False) slug = get_random_string(20) while CredentialApplication.objects.filter(slug=slug): slug = get_random_string(20) credential_application.user = self.user credential_application.slug = slug credential_application.training_completion_report_url = self.report_url credential_application.save() return credential_application class CredentialReferenceForm(forms.ModelForm): """ Form to apply for PhysioNet credentialling. The name must match. """ class Meta: model = CredentialApplication fields = ('reference_response', 'reference_response_text') labels = { 'reference_response': 'I am familiar with the research and support this request.', 'reference_response_text': 'Please briefly describe your working relationship with the applicant.' } widgets = { 'reference_response_text':forms.Textarea(attrs={'rows': 3}), } def save(self): """ Process the decision """ application = super().save(commit=False) # Deny if self.cleaned_data['reference_response'] == 1: application.status = 1 application.reference_response_datetime = timezone.now() application.reference_response_text = self.cleaned_data['reference_response_text'] application.save() return application class ContactForm(forms.Form): """ For contacting PhysioNet support """ name = forms.CharField(max_length=100, widget=forms.TextInput( attrs={'class': 'form-control', 'placeholder': 'Name *'})) email = forms.EmailField(max_length=100, widget=forms.TextInput( attrs={'class': 'form-control', 'placeholder': 'Email *'})) subject = forms.CharField(max_length=100, widget=forms.TextInput( attrs={'class': 'form-control', 'placeholder': 'Subject *'})) message = forms.CharField(max_length=2000, widget=forms.Textarea( attrs={'class': 'form-control', 'placeholder': 'Message *'})) def clean_email(self): # Disallow addresses that look like they come from this machine. addr = self.cleaned_data['email'].lower() for domain in settings.EMAIL_FROM_DOMAINS: if addr.endswith('@' + domain) or addr.endswith('.' + domain): raise forms.ValidationError('Please enter your email address.') return self.cleaned_data['email'] class CloudForm(forms.ModelForm): """ Form to store the AWS ID, and point to the google GCP email. """ class Meta: model = CloudInformation fields = ('gcp_email','aws_id',) labels = { 'gcp_email': 'Google (Email)', 'aws_id': 'Amazon (ID)', } def __init__(self, *args, **kwargs): # Email choices are those belonging to a user super().__init__(*args, **kwargs) associated_emails = self.instance.user.associated_emails.filter(is_verified=True) self.fields['gcp_email'].queryset = associated_emails self.fields['gcp_email'].required = False # class ActivationForm(forms.ModelForm): class ActivationForm(forms.Form): """A form for creating new users. Includes all the required fields, plus a repeated password. """ username = forms.CharField(disabled=True, widget=forms.TextInput(attrs={ 'class': 'form-control'})) email = forms.EmailField(disabled=True, widget=forms.TextInput(attrs={ 'class': 'form-control'})) password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class': 'form-control'})) password2 = forms.CharField(label='Password Confirmation', widget=forms.PasswordInput(attrs={'class': 'form-control'})) def __init__(self, user, *args, **kwargs): """ This form is only for processing post requests. """ super().__init__(*args, **kwargs) self.fields['username'].initial = user.username self.fields['email'].initial = user.email self.user = user def clean_password2(self): # Check that the two password entries match password1 = self.cleaned_data.get('password1') password2 = self.cleaned_data.get('password2') if password1 and password2 and password1 != password2: raise forms.ValidationError("The passwords don't match") password_validation.validate_password( self.cleaned_data.get('password1'), user=self.user) return password1
python
''' 作者:邱少一 功能: 1.0功能:模拟投掷一个骰子 2.0新增功能:模拟投掷两个骰子 3.0新增功能:可视化投掷两个骰子的结果 4.0新增功能:=======直方图可视化作图和统计工具:matplotlib 5.0新增功能:=======科学计算工具:numpy 版本:1.0 日期:2017/12/1 学习:1、随机数:random():0到1的数;uniform(a,b):a到b的随机浮点数;randint(a,b):a到b的随机数 choice(<list>):随机从list中取一个元素 ;shuffle(<list>):将列表中元素打乱,sample(<list>,k):从列表中随机获取k个元素 2、安装 matplotlib 终端输入:pip3 install matplotlib pip3是Python3以上自带的包管理器 ''' import random import matplotlib.pyplot as pltqsy import numpy as npqsy # 解决中文显示问题:方案一 matplotlib中缺少中文格式的字体,如simhei,手动添加到 pltqsy.rcParams['font.sans-serif'] = ['SimHei'] pltqsy.rcParams['axes.unicode_minus'] = False # 解决负号'-'显示为方块的问题 def random_roll(): roll = random.randint(1, 6) return roll def main(): ''' 主函数 ''' # ''' # 单个骰子投掷时出现的结果 # ''' # throw_times = 100 # result_list =[0]*6 #用于统计 每个数字 出现的次数。以位置和value建立映射bridge # # for i in range(throw_times): # roll = random_roll() # print('摇的数字:'roll) # for j in range(1,7): # if roll == j: # result_list[j-1] += 1 # # for i,result in enumerate(result_list): # print('点数{}的次数{},概率{}'.format(i+1,result,result/throw_times)) # ''' # 2个骰子投掷时出现的结果 # ''' # throw_total_times = 10 # result_count_list = [0]*12 # result_num_list = list(range(2,13)) # result_dict = dict(zip(result_num_list,result_count_list)) # 记录每次骰子的结果 # roll1_list = [] # roll2_list = [] # roll_total_list = [] # for i in range(throw_total_times): # roll1 = random.randint(1,6) # roll2 = random.randint(1,6) # total_roll = roll1 + roll2 # # # 记录骰子的结果 # roll1_list.append(roll1) # roll2_list.append(roll2) # roll_total_list.append(total_roll) # # for key,value in result_dict.items(): # if key == total_roll: # result_dict[key] += 1 #坑点:value += 1只是修改了value,但并没有修改对应的dict # for key,value in result_dict.items(): # print(('点数之和{}出现的次数{},出现的概率为{}'.format(key,value,value/throw_total_times))) # 可视化的散点图绘制: plt.scatte(x,y) # x = range(1, throw_total_times + 1) # plt.scatter(x, roll1_list, c='red', alpha=0.5) # plt.scatter(x, roll2_list, c='green', alpha=0.5) # plt.show() # 可视化的直方图绘制:plt.hist(data,hins(data数据的边界list)) # pltqsy.hist(roll_total_list,bins=range(2,14),normed=1,edgecolor ='white',linewidth = 1) # pltqsy.title('骰子点数统计') # pltqsy.xlabel('点数') # pltqsy.ylabel('频率') # pltqsy.show() # 科学计算 # 通过随机数模拟掷骰子过程 throw_total_times = 10000 roll1_arr = npqsy.random.randint(1, 7, size=throw_total_times) # 创建[a,b)间形状为size的数组 roll2_arr = npqsy.random.randint(1, 7, size=throw_total_times) result_arr = roll1_arr + roll2_arr # numpy的加减是对应的位置value的加减,向量化的数据 # 读取数据:hist出现的次数,bins为对应的2次摇骰子的和 # hist,bins = npqsy.histogram(result_arr,bins=range(2,14)) # print(hist,'\n==========',bins) # 数据可视化 pltqsy.hist(result_arr, bins=range(2, 14), normed=1, edgecolor='white', linewidth=1, rwidth=0.8) # 重新设置x轴坐标点:tick_labels 和 固有的点进行位置映射 tick_labels = [(str(i) + '点') for i in range(2, 13)] tick_pos = npqsy.arange(2, 13) + 0.5 pltqsy.xticks(tick_pos, tick_labels) pltqsy.title('骰子点数统计') pltqsy.xlabel('点数') pltqsy.ylabel('频率') pltqsy.show() if __name__ == '__main__': main()
python