content
stringlengths
0
894k
type
stringclasses
2 values
#!/usr/bin/env python3 import os import sys import json import zipfile import datetime import shutil from wearebeautiful import model_params as param MAX_SCREENSHOT_SIZE = 256000 # 256Kb is enough! bundles_json_file = "bundles.json" def bundle_setup(bundle_dir_arg): ''' Make the bundle dir, in case it doesn't exist ''' global bundle_dir bundle_dir = bundle_dir_arg try: os.makedirs(bundle_dir) except FileExistsError: pass def create_bundle_index(): ''' Iterate the bundles directory and read the manifest files ''' bundles = [] for path in os.listdir(bundle_dir): if path[0:6].isdigit() and path[6] == '-': with open(os.path.join(bundle_dir, path, "manifest.json"), "r") as f: manifest = json.loads(f.read()) bundles.append(manifest) with open(os.path.join(bundle_dir, bundles_json_file), "w") as out: out.write(json.dumps(bundles)) return bundles def load_bundle_data_into_redis(app): ''' Read the bundles.json file and load into ram ''' redis = app.redis bundles = [] loaded_bundles = [] try: with open(os.path.join(bundle_dir, bundles_json_file), "r") as f: loaded_bundles = json.loads(f.read()) except IOError as err: print("ERROR: Cannot read bundles.json.", err) except ValueError as err: print("ERROR: Cannot read bundles.json.", err) # Clean up old redis keys for k in redis.scan_iter("m:*"): redis.delete(k) redis.delete("m:ids") redis.delete("b:index") # Now add new redis keys bundles = [] ids = {} for bundle in loaded_bundles: redis.set("m:%s:%s:%s" % (bundle['id'], bundle['bodypart'], bundle['pose']), json.dumps(bundle)) data = { 'id' : bundle['id'], 'bodypart' : bundle['bodypart'], 'pose' : bundle['pose'] } bundles.append(data) if not bundle['id'] in ids: ids[bundle['id']] = [] ids[bundle['id']].append(data) redis.set("b:index", json.dumps(bundles)) redis.set("m:ids", json.dumps(ids)) return len(bundles) def get_bundle_id_list(redis): """ Get the list of current ids """ bundles = redis.get("b:index") or "[]" return json.loads(bundles) def get_model_id_list(redis): """ Get the list of model ids """ ids = redis.get("m:ids") or "{}" return json.loads(ids) def get_bundle(redis, id, bodypart, pose): """ Get the manifest of the given bundle """ manifest = redis.get("m:%s:%s:%s" % (id, bodypart, pose)) return json.loads(manifest) def import_bundle(bundle_file): """ unzip and read bundle file """ allowed_files = ['manifest.json', 'surface-low.stl', 'surface-medium.stl', 'solid.stl', 'surface-orig.stl', 'screenshot.jpg'] try: zipf = zipfile.ZipFile(bundle_file) except zipfile.BadZipFile: return "Invalid zip file." files = zipf.namelist() for f in files: if not f in allowed_files: return "file %s is not part of a normal bundle. don't fuck it up, ok?" % f try: rmanifest = zipf.read("manifest.json") except IOError: return "Cannot read manifest.json" try: manifest = json.loads(rmanifest) except json.decoder.JSONDecodeError as err: return err err = validate_manifest(manifest) if err: return err # The bundle looks ok, copy it into place dest_dir = os.path.join(bundle_dir, "%s-%s-%s" % (manifest['id'], manifest['bodypart'], manifest['pose'])) while True: try: os.mkdir(dest_dir) break except FileExistsError: try: shutil.rmtree(dest_dir) except IOError as err: print("Failed to erase old bundle.", err) return err try: for member in allowed_files: print(os.path.join(dest_dir, member)) zipf.extract(member, dest_dir) except IOError as err: print("IO error: ", err) return err return "" def validate_date(date, partial=False): if partial: try: date_obj = datetime.datetime.strptime(date, '%Y-%m') except ValueError as err: print("Invalid date format. Must be YYYY-MM. (%s)" % err) return False else: try: date_obj = datetime.datetime.strptime(date, '%Y-%m-%d') except ValueError as err: print("Invalid date format. Must be YYYY-MM-DD. (%s)" % err) return False if date_obj.year < 2019 or date_obj.year > datetime.datetime.now().year: print("Invalid year.") return False return True def validate_manifest(manifest): if manifest['version'] != param.FORMAT_VERSION: return "Incorrect format version. This script can only accept version %s" % param.FORMAT_VERSION if manifest.keys() in param.REQUIRED_KEYS: missing = list(set(param.REQUIRED_KEYS) - set(manifest.keys())) return "Some top level fields are missing. %s\n" % ",".join(missing) if len(manifest['id']) != 6 or not manifest['id'].isdigit(): return "Incorrect ID length or non digits in ID." if not validate_date(manifest['created'], partial=True): return "Incorrect created date. Must be in YYYY-MM format and minimally specify year and month." if not validate_date(manifest['released']): return "Incorrect released date. Must be in YYYY-MM-DD format" try: id = int(manifest['id']) except ValueError: return "Incorrect ID format. Must be a 4 digit number." if manifest['gender'] not in param.GENDERS: return "Invalid gender. Must be one of: ", param.GENDERS if manifest['bodypart'] not in param.BODYPART: return "Invalid bodypart. Must be one of: ", param.BODYPART if manifest['pose'] not in param.POSE: return "Invalid pose. Must be one of: ", param.POSE if manifest['pose'] == 'variant': if 'pose_variant' not in manifest: return "pose_variant field required for variant poses." if len(manifest['pose_variant']) < param.MIN_FREETEXT_FIELD_LEN: return "pose_variant field too short. Must be at least %d characters. " % param.MIN_FREETEXT_FIELD_LEN if manifest['pose'] != 'variant': if 'pose_variant' in manifest: return "pose_variant field must be blank when post not variant." if len(manifest['country']) != 2: return "Incorrect ID length" if manifest['country'] not in param.COUNTRIES: return "Invalid country. Must be one of ", param.COUNTRIES try: age = int(manifest['age']) except ValueError: return "Cannot parse age." if age < 18 or age > 200: return "Invalid age. Must be 18-200" if manifest['body_type'] not in param.BODY_TYPES: return "Invalid body type. Must be one of ", param.BODY_TYPES if manifest['mother'] not in param.MOTHER: return "Invalid value for the field mother. Must be one of ", param.MOTHER if len(manifest['ethnicity']) < param.MIN_FREETEXT_FIELD_LEN: return "ethnicity field too short. Must be at least %d characters. " % param.MIN_FREETEXT_FIELD_LEN if 'modification' in manifest: if type(manifest['modification']) != list: return "modification must be a list." if len(manifest['modification']) > 0 and manifest['modification'] not in param.MODIFICATIONS: return "modification must be one of: ", param.MODIFICATIONS return ""
python
from flask import render_template, flash, redirect, url_for, session, Markup from flask_login import login_user, logout_user, login_required from app import app, db, lm from app.models.forms import * from app.models.tables import * @lm.user_loader def load_user(id): return Usuario.query.filter_by(id=id).first() @app.route("/index") @app.route("/") def index(): return render_template('index.html') #Iniciando parte de login/logoff @app.route("/login", methods=["GET", "POST"]) def login(): form = LoginForm() if form.validate_on_submit(): user = Usuario.query.filter_by(username=form.username.data).first() if user and user.password == form.password.data: login_user(user) flash("Usuário logado") return redirect(url_for("index")) else: flash("Login inválido") #else: # return "erro no login" return render_template('login.html', form=form) @app.route("/logout") @login_required def logout(): logout_user() flash("Usuário Deslogado") return redirect(url_for("index")) #Iniciando parte de pedidos @app.route("/pedidos", methods=["GET", "POST"]) @login_required def pedidos(): form = PedidoForm() if form.validate_on_submit(): i = Pedido(form.servico.data, form.observacao.data, form.data_pedido.data, form.quantidade.data, form.preco.data, form.status_conclusao.data) db.session.add(i) db.session.commit() flash("Pedido adicionado com sucesso!!") return render_template('pedidos.html', form=form) @app.route("/visualizar", methods=["GET", "POST"]) @login_required def visualizar(): pedidos_ativos = Pedido.query.filter_by(status_conclusao=False).all() pedidos_concluidos = Pedido.query.filter_by(status_conclusao=True).all() return render_template('visualizar.html', pedidos_ativos=pedidos_ativos, pedidos_concluidos=pedidos_concluidos) @app.route('/visualizar/complete/<id>') @login_required def complete(id): pedido = Pedido.query.filter_by(id=int(id)).first_or_404() pedido.status_conclusao = True db.session.commit() return redirect(url_for('visualizar')) @app.route("/visualizar/delete/<id>") @login_required def delete(id): pedido = Pedido.query.filter_by(id=int(id)).first_or_404() db.session.delete(pedido) db.session.commit() return redirect(url_for('visualizar')) @app.route("/visualizar/confirmacao/<id>") @login_required def confirmacao(id): flash(Markup("Confirma a exclusão do pedido?</br></br><a href='/visualizar/delete/" + str(id) + "' class='btn btn-outline-danger btn-sm mr-3 ml-3'>Sim</a><a href='/visualizar' class='btn btn-outline-primary btn-sm ml-3 mr-3'>Não</a>")) return redirect(url_for('visualizar')) #Iniciando parte de controle de estoque @app.route('/estoque') @login_required def estoque(): estoque = Estoque.query.order_by(Estoque.id).all() return render_template('estoque.html', estoque=estoque) @app.route('/estoque/adicionar', methods=["GET", "POST"]) @login_required def adicionarEstoque(): form = EstoqueForm() if form.validate_on_submit(): i = Estoque(form.nome_item.data, form.quantidade_estoque.data, form.quantidade_minimo.data, form.data_atualizacao.data) db.session.add(i) db.session.commit() flash("Item adicionado com sucesso!!") return render_template('adicionar_estoque.html', form=form) @app.route('/estoque/atualizar/<id>', methods=["GET", "POST"]) @login_required def atualizarItem(id): item = Estoque.query.filter_by(id=int(id)).first() form = EstoqueForm() if form.validate_on_submit(): item.nome_item = form.nome_item.data item.quantidade_estoque = form.quantidade_estoque.data item.quantidade_minimo = form.quantidade_minimo.data item.data_atualizacao = form.data_atualizacao.data i = Estoque(item.nome_item, item.quantidade_estoque, item.quantidade_minimo, item.data_atualizacao) db.session.commit() flash("Atualização concluída..") return redirect(url_for('estoque')) return render_template('atualizar_estoque.html', form=form, item=item) @app.route("/estoque/delete/<id>") @login_required def deleteItem(id): item = Estoque.query.filter_by(id=int(id)).first_or_404() db.session.delete(item) db.session.commit() return redirect(url_for('estoque')) @app.route("/estoque/confirmacao/<id>") @login_required def confirmacaoEstoque(id): flash(Markup("Confirma a exclusão do item?</br></br><a href='/estoque/delete/" + str(id) + "' class='btn btn-outline-danger btn-sm mr-3 ml-3'>Sim</a><a href='/estoque' class='btn btn-outline-primary btn-sm ml-3 mr-3'>Não</a>")) return redirect(url_for('estoque'))
python
import logging import paho.mqtt.client as mqtt import time # from utils_intern.messageLogger import MessageLogger logging.basicConfig(format='%(asctime)s %(levelname)s %(name)s: %(message)s', level=logging.DEBUG) logger = logging.getLogger(__file__) class MQTTClient: def __init__(self, host, mqttPort, client_id, keepalive=60, username=None, password=None, ca_cert_path=None, set_insecure=False, id=None): # self.logger = MessageLogger.get_logger(__file__, id) self.host = host self.port = int(mqttPort) self.keepalive = keepalive self.receivedMessages = [] self.topic_sub_ack = [] self.callback_function = None self.client_id = client_id self.id = id self.connected = False self.messages = "" self.client = mqtt.Client(client_id, clean_session=False) if username is not None and password is not None: logger.debug("u " + username + " p " + password) self.client.username_pw_set(username, password) if ca_cert_path is not None and len(ca_cert_path) > 0: logger.debug("ca " + ca_cert_path) self.client.tls_set(ca_certs=ca_cert_path) logger.debug("insec " + str(set_insecure)) if not isinstance(set_insecure, bool): set_insecure = bool(set_insecure) self.client.tls_insecure_set(set_insecure) self.client.on_message = self.on_message self.client.on_publish = self.on_publish self.client.on_connect = self.on_connect self.client.on_subscribe = self.on_subscribe self.client.on_disconnect = self.on_disconnect logger.info("Trying to connect to the MQTT broker " + str(self.host) + " " + str(self.port)) try: self.client.connect(self.host, self.port, self.keepalive) except Exception as e: self.connected = False msg = "Invalid MQTT host " + str(self.host) + " " + str(self.port) logger.error("Error connecting client " + str(self.host) + " " + str(self.port) + " " + str(e)) raise InvalidMQTTHostException(msg) # self.client.loop_forever() self.client.loop_start() # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. def on_connect(self, client, userdata, flags, rc): logger.info("Connected with result code " + str(rc)) if rc == 0: self.connected = True client.connected_Flag = True logger.info("Connected to the broker") else: logger.error("Error connecting to broker " + str(rc)) def on_disconnect(self, *args): logger.error("Disconnected to broker") logger.info(str(args)) def on_message(self, client, userdata, message): # print("Message received") self.callback_function(message.payload.decode()) def sendResults(self, topic, data, qos): try: if self.connected: logger.debug("Sending results to this topic: " + topic) self.publish(topic, data, qos=qos) logger.debug("Results published") except Exception as e: logger.error(e) def publish(self, topic, message, waitForAck=False, qos=2): if self.connected: mid = self.client.publish(topic, message, qos)[1] if (waitForAck): while mid not in self.receivedMessages: logger.debug("waiting for pub ack for topic " + str(topic)) time.sleep(0.25) def on_publish(self, client, userdata, mid): self.receivedMessages.append(mid) def MQTTExit(self): logger.debug("Disconnecting MQTT") self.client.disconnect() logger.debug("Disconnected from the MQTT clients") self.client.loop_stop() logger.debug("MQTT service disconnected") def subscribe_to_topics(self, topics_qos, callback_function): count = 0 while not self.connected: time.sleep(1) count += 1 if count > 15: raise Exception mid = self.subscribe(topics_qos, callback_function) while not self.subscribe_ack_wait(mid): mid = self.subscribe(topics_qos, callback_function) logger.error("Topic subscribe missing ack") def subscribe(self, topics_qos, callback_function): # topics_qos is a list of tuples. eg [("topic",0)] try: if self.connected: logger.info("Subscribing to topics with qos: " + str(topics_qos)) result, mid = self.client.subscribe(topics_qos) if result == 0: logger.debug( "Subscribed to topics: " + str(topics_qos) + " result = " + str(result) + " , mid = " + str( mid)) self.callback_function = callback_function return mid else: logger.info("error on subscribing " + str(result)) return -1 except Exception as e: logger.error(e) return -1 def on_subscribe(self, client, userdata, mid, granted_qos): """check mid values from topic ack list""" self.topic_sub_ack.append(mid) def subscribe_ack_wait(self, mid): if mid < 0: return False count = 0 if self.connected: while count < 15: if mid in self.topic_sub_ack: return True else: logger.info("topic sub ack len = " + str(len(self.topic_sub_ack))) time.sleep(1) count += 1 self.topic_sub_ack.remove(mid) return False class InvalidMQTTHostException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg)
python
from gym_trafficnetwork.envs.parallel_network import Cell import numpy as np # For the simplest road type def homogeneous_road(num_cells, vfkph, cell_length, num_lanes): r = [] for _ in range(num_cells): r.append(Cell(vfkph, cell_length, num_lanes)) return r # For roads who have cells with the number of lanes as n-n-n-m-n def road_with_single_bottleneck(num_cells, vfkph, cell_length, num_lanes, bottleneck_id, bottleneck_num_lanes): # bottleneck_id is the id of the cell that has bottleneck_num_lanes-many lanes (0 is the first cell, and num_cells-1 is the last) # I know we will say "let's we have 5 cells and the last one is the bottleneck, so bottleneck_id is 5". Let's correct it. if bottleneck_id >= num_cells: import warnings warnings.warn("bottleneck_id is invalid! I am setting it to be the last cell.") import time time.sleep(5) bottleneck_id = num_cells - 1 r = [] for _ in range(num_cells - 1): r.append(Cell(vfkph, cell_length, num_lanes)) r.insert(bottleneck_id, Cell(vfkph, cell_length, bottleneck_num_lanes)) return r # For roads who have cells with the number of lanes as n-n-n-m-m def two_partition_road(firstpart_num_cells, secondpart_num_cells, vfkph, cell_length, firstpart_num_lanes, secondpart_num_lanes): r = [] for _ in range(firstpart_num_cells): r.append(Cell(vfkph, cell_length, firstpart_num_lanes)) for _ in range(secondpart_num_cells): r.append(Cell(vfkph, cell_length, secondpart_num_lanes)) return r # Generalization of the two_partition_road (and homogeneous_road) to n-partition roads. All parameters will be either an array or a scalar def n_partition_road(num_cells, vfkph, cell_length, num_lanes): if not (isinstance(num_cells, list) or isinstance(num_cells, np.ndarray)): num_cells = [num_cells] if not (isinstance(vfkph, list) or isinstance(vfkph, np.ndarray)): vfkph = [vfkph] if not (isinstance(cell_length, list) or isinstance(cell_length, np.ndarray)): cell_length = [cell_length] if not (isinstance(num_lanes, list) or isinstance(num_lanes, np.ndarray)): num_lanes = [num_lanes] num_partitions = np.max([len(num_cells), len(vfkph), len(cell_length), len(num_lanes)]) if len(num_cells) == 1: num_cells = [num_cells[0]]*num_partitions if len(vfkph) == 1: vfkph = [vfkph[0]]*num_partitions if len(cell_length) == 1: cell_length = [cell_length[0]]*num_partitions if len(num_lanes) == 1: num_lanes = [num_lanes[0]]*num_partitions r = [] for i in range(len(num_cells)): for _ in range(num_cells[i]): r.append(Cell(vfkph[i], cell_length[i], num_lanes[i])) return r
python
import re import typing as tp from time import time from loguru import logger def time_execution(func: tp.Any) -> tp.Any: """This decorator shows the execution time of the function object passed""" def wrap_func(*args: tp.Any, **kwargs: tp.Any) -> tp.Any: t1 = time() result = func(*args, **kwargs) t2 = time() logger.debug(f"Function {func.__name__!r} executed in {(t2 - t1):.4f}s") return result return wrap_func def get_headers(rfid_card_id: str) -> tp.Dict[str, str]: """return a dict with all the headers required for using the backend""" return {"rfid-card-id": rfid_card_id} def is_a_ean13_barcode(string: str) -> bool: """define if the barcode scanner input is a valid EAN13 barcode""" return bool(re.fullmatch("\d{13}", string))
python
import os import time import argparse import numpy as np import cv2 from datetime import datetime import nnabla as nn import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S import nnabla.logger as logger import nnabla.utils.save as save from nnabla.monitor import Monitor, MonitorSeries, MonitorImageTile from dataset import prepare_dataloader from model import depth_cnn_model, l1_loss from auxiliary import convert_depth2colormap def main(args): from numpy.random import seed seed(46) # Get context. from nnabla.ext_utils import get_extension_context ctx = get_extension_context('cudnn', device_id='0', type_config='float') nn.set_default_context(ctx) # Create CNN network # === TRAIN === # Create input variables. image = nn.Variable([args.batch_size, 3, args.img_height, args.img_width]) label = nn.Variable([args.batch_size, 1, args.img_height, args.img_width]) # Create prediction graph. pred = depth_cnn_model(image, test=False) pred.persistent = True # Create loss function. loss = l1_loss(pred, label) # === VAL === #vimage = nn.Variable([args.batch_size, 3, args.img_height, args.img_width]) #vlabel = nn.Variable([args.batch_size, 1, args.img_height, args.img_width]) #vpred = depth_cnn_model(vimage, test=True) #vloss = l1_loss(vpred, vlabel) # Prepare monitors. monitor = Monitor(os.path.join(args.log_dir, 'nnmonitor')) monitors = { 'train_epoch_loss': MonitorSeries('Train epoch loss', monitor, interval=1), 'train_itr_loss': MonitorSeries('Train itr loss', monitor, interval=100), # 'val_epoch_loss': MonitorSeries('Val epoch loss', monitor, interval=1), 'train_viz': MonitorImageTile('Train images', monitor, interval=1000, num_images=4) } # Create Solver. If training from checkpoint, load the info. if args.optimizer == "adam": solver = S.Adam(alpha=args.learning_rate, beta1=0.9, beta2=0.999) elif args.optimizer == "sgd": solver = S.Momentum(lr=args.learning_rate, momentum=0.9) solver.set_parameters(nn.get_parameters()) # Initialize DataIterator data_dic = prepare_dataloader(args.dataset_path, datatype_list=['train', 'val'], batch_size=args.batch_size, img_size=(args.img_height, args.img_width)) # Training loop. logger.info("Start training!!!") total_itr_index = 0 for epoch in range(1, args.epochs + 1): ## === training === ## total_train_loss = 0 index = 0 while index < data_dic['train']['size']: # Preprocess image.d, label.d = data_dic['train']['itr'].next() loss.forward(clear_no_need_grad=True) # Initialize gradients solver.zero_grad() # Backward execution loss.backward(clear_buffer=True) # Update parameters by computed gradients if args.optimizer == 'sgd': solver.weight_decay(1e-4) solver.update() # Update log index += 1 total_itr_index += 1 total_train_loss += loss.d # Pass to monitor monitors['train_itr_loss'].add(total_itr_index, loss.d) # Visualization pred.forward(clear_buffer=True) train_viz = np.concatenate([image.d, convert_depth2colormap(label.d), convert_depth2colormap(pred.d)], axis=3) monitors['train_viz'].add(total_itr_index, train_viz) # Logger logger.info("[{}] {}/{} Train Loss {} ({})".format(epoch, index, data_dic['train']['size'], total_train_loss / index, loss.d)) # Pass training loss to a monitor. train_error = total_train_loss / data_dic['train']['size'] monitors['train_epoch_loss'].add(epoch, train_error) # Save Parameter out_param_file = os.path.join(args.log_dir, 'checkpoint' + str(epoch) + '.h5') nn.save_parameters(out_param_file) ## === Validation === ## #total_val_loss = 0.0 #val_index = 0 # while val_index < data_dic['val']['size']: # # Inference # vimage.d, vlabel.d = data_dic['val']['itr'].next() # vpred.forward(clear_buffer=True) # vloss.forward(clear_buffer=True) # total_val_loss += vloss.d # val_index += 1 # break # Pass validation loss to a monitor. #val_error = total_val_loss / data_dic['val']['size'] #monitors['val_epoch_loss'].add(epoch, val_error) if __name__ == "__main__": parser = argparse.ArgumentParser('depth-cnn-nnabla') parser.add_argument('--dataset-path', type=str, default="~/datasets/nyudepthv2") parser.add_argument('--batch-size', type=int, default=8) parser.add_argument('--img-height', type=int, default=228) parser.add_argument('--img-width', type=int, default=304) parser.add_argument('--optimizer', type=str, default='sgd') parser.add_argument('--learning-rate', type=float, default=1e-3) parser.add_argument('--epochs', type=int, default=30) parser.add_argument('--log-dir', default='./log') args = parser.parse_args() main(args)
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: get_app_health_config_v2.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from monitor_sdk.model.monitor_config import app_health_config_pb2 as monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='get_app_health_config_v2.proto', package='app_health', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x1eget_app_health_config_v2.proto\x12\napp_health\x1a\x38monitor_sdk/model/monitor_config/app_health_config.proto\"-\n\x1bGetAppHealthConfigV2Request\x12\x0e\n\x06\x61pp_id\x18\x01 \x01(\t\"h\n\x1cGetAppHealthConfigV2Response\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03msg\x18\x02 \x01(\t\x12-\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x1f.monitor_config.AppHealthConfig\"\x8f\x01\n#GetAppHealthConfigV2ResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x36\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32(.app_health.GetAppHealthConfigV2Responseb\x06proto3') , dependencies=[monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2.DESCRIPTOR,]) _GETAPPHEALTHCONFIGV2REQUEST = _descriptor.Descriptor( name='GetAppHealthConfigV2Request', full_name='app_health.GetAppHealthConfigV2Request', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='app_id', full_name='app_health.GetAppHealthConfigV2Request.app_id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=104, serialized_end=149, ) _GETAPPHEALTHCONFIGV2RESPONSE = _descriptor.Descriptor( name='GetAppHealthConfigV2Response', full_name='app_health.GetAppHealthConfigV2Response', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='app_health.GetAppHealthConfigV2Response.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='msg', full_name='app_health.GetAppHealthConfigV2Response.msg', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='app_health.GetAppHealthConfigV2Response.data', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=151, serialized_end=255, ) _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER = _descriptor.Descriptor( name='GetAppHealthConfigV2ResponseWrapper', full_name='app_health.GetAppHealthConfigV2ResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='app_health.GetAppHealthConfigV2ResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=258, serialized_end=401, ) _GETAPPHEALTHCONFIGV2RESPONSE.fields_by_name['data'].message_type = monitor__sdk_dot_model_dot_monitor__config_dot_app__health__config__pb2._APPHEALTHCONFIG _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER.fields_by_name['data'].message_type = _GETAPPHEALTHCONFIGV2RESPONSE DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2Request'] = _GETAPPHEALTHCONFIGV2REQUEST DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2Response'] = _GETAPPHEALTHCONFIGV2RESPONSE DESCRIPTOR.message_types_by_name['GetAppHealthConfigV2ResponseWrapper'] = _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) GetAppHealthConfigV2Request = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2Request', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2REQUEST, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2Request) }) _sym_db.RegisterMessage(GetAppHealthConfigV2Request) GetAppHealthConfigV2Response = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2Response', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2RESPONSE, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2Response) }) _sym_db.RegisterMessage(GetAppHealthConfigV2Response) GetAppHealthConfigV2ResponseWrapper = _reflection.GeneratedProtocolMessageType('GetAppHealthConfigV2ResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _GETAPPHEALTHCONFIGV2RESPONSEWRAPPER, '__module__' : 'get_app_health_config_v2_pb2' # @@protoc_insertion_point(class_scope:app_health.GetAppHealthConfigV2ResponseWrapper) }) _sym_db.RegisterMessage(GetAppHealthConfigV2ResponseWrapper) # @@protoc_insertion_point(module_scope)
python
#!/usr/bin/env python import os import sys import visa import time #-------------------------------------------------------------# ## main function # @param there is no parameter for main function def main(): rm = visa.ResourceManager() print rm.list_resources() instr1 = rm.open_resource('USB0::0x05E6::0x2280::4106469::INSTR') print instr1.query("*IDN?") for i in xrange(60): print "output voltage %sV"%i instr1.write(":VOLTage %s"%i) time.sleep(0.5) print "OK" #-------------------------------------------------------------# ## if statement if __name__ == '__main__': main()
python
import os, time from this import d import numpy as np import pandas as pd import tensorflow as tf from sklearn.preprocessing import MinMaxScaler from datetime import datetime, timedelta from detector import detect_anomaly from decomposition import load_STL_results, decompose_model from models import * from data_loader import _create_sequences, _decreate_sequences, _count_anomaly_segments, _wavelet from data_loader import convert_datetime, get_dummies, add_temporal_info os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ['CUDA_VISIBLE_DEVICES']= "-1" def get_dataset_name(column_names): # 9 -> IoT, 10 -> samsung, 4 -> kpi, 43 -> energy n_columns = len(column_names) dataset_names = {9: 'IoT', 10: 'samsung', 4: 'kpi', 34: 'energy'} return dataset_names[n_columns] def preprocess_uploaded_file(filepath): if filepath.split('.')[-1] == 'csv': df = pd.read_csv(filepath) dataset_name = get_dataset_name(df.columns) anomaly_scores, th = run_detector(df) anomaly_scores = _decreate_sequences(anomaly_scores) print('Threshold ==>', th) chart_data = [] if dataset_name == 'samsung': columns = df.columns[1:] for col in columns[:-2]: for i in range(df.shape[0]): chart_data.append({ 'date': df['date'].iloc[i][:-3], 'value': float(df[col].iloc[i]), 'column': col, 'score': float(anomaly_scores[i]), 'label': int(df['label'].iloc[i]) }) display_columns = columns[:-2].tolist() elif dataset_name == 'energy': columns = df.columns[1:] for col in columns[:-2]: for i in range(df.shape[0]): chart_data.append({ 'date': df['date'].iloc[i][:-3], 'value': float(df[col].iloc[i]), 'column': col, 'score': float(anomaly_scores[i]), 'label': int(df['label'].iloc[i]) }) display_columns = columns[:-2].tolist() os.remove(filepath) return {'status': 200, 'data': chart_data, 'columns': display_columns, 'anomaly_scores': anomaly_scores, 'threshold': th} else: return {'status': 400, 'message': 'upsupported file type'} def preprocess_samsung_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_test, y_test = [], [] y_segment_test = [] x_test_resid = [] label_seq, test_seq = [], [] # Samsung test_df = df if temporal == True: test_df = np.array(add_temporal_info('samsung', test_df, test_df.date)) test_df = test_df[:, 6:-1].astype(float) else: if decomposition == True: test_holiday = np.array(add_temporal_info('samsung', test_df, test_df.date)['holiday']) test_weekend = np.array(add_temporal_info('samsung', test_df, test_df.date)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(test_df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler() test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'samsung') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for _ in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_iot_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_test, y_test = [], [] y_segment_test = [] x_test_resid = [] label_seq, test_seq = [], [] # IoT Modbus date_format = '%d-%b-%y' time_format = '%H:%M:%S' df['date'] = [datetime.strptime(date, date_format) for date in df['date']] df['date'] = df['date'].dt.date df['time'] = df['time'].str.strip() df['time'] = pd.to_datetime(df['time'], format=time_format).dt.time datetimes = ['date', 'time'] df['timestamp'] =df[datetimes].apply(lambda row: ' '.join(row.values.astype(str)), axis=1) df.insert(0, 'timestamp', df.pop('timestamp')) df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y-%m-%d %H:%M:%S') df.sort_values('timestamp', inplace=True) df.reset_index(drop=True, inplace=True) drop_list = ['ts', 'date', 'time', 'type'] df = df.drop(drop_list, axis=1) if temporal == True: test_df = df test_df = add_temporal_info('IoT_modbus', test_df, test_df.timestamp) test_df.set_index(test_df['timestamp'], inplace=True) test_df = np.array(test_df.drop(['timestamp'], axis=1)) test_df = test_df[:, 3:-1].astype(float) labels = test_df[:, -1].astype(int) else: if decomposition == True: test_df = df test_holiday = np.array(add_temporal_info('IoT_modbus', test_df, test_df.timestamp)['holiday']) test_weekend = np.array(add_temporal_info('IoT_modbus', test_df, test_df.timestamp)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) start_time = time.time() deep_pattern = decompose_model(x_test_normal, seq_length) deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) print(f"Deep Decomposer Taken: {time.time() - start_time}") test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation start_time = time.time() print('Start Wavelet Transform') test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for _ in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) print(f'Wavelet Transform Taken: {time.time() - start_time}') if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_kpi_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_train, x_test, y_test = [], [], [] y_segment_test = [] x_train_resid, x_test_resid = [], [] label_seq, test_seq = [], [] # for avoid RuntimeWarning: invalid value encountered in true_divide (wavelet) df['value'] = df['value'] * 1e+6 if temporal == True: test_df = df[['timestamp', 'value', 'label']] test_df['timestamp'] = pd.to_datetime(test_df['timestamp'], unit='s') test_df = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)) test_df = test_df[:, 2:-1].astype(float) else: if decomposition == True: test_df = df[['timestamp', 'value', 'label']] test_df['timestamp'] = pd.to_datetime(test_df['timestamp'], unit='s') test_holiday = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)['holiday']) test_weekend = np.array(add_temporal_info('kpi', test_df, test_df.timestamp)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = df['value'].values.reshape(-1, 1) labels = df['label'].values.astype(int) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'kpi') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def preprocess_energy_file(df, seq_length, stride, weight, wavelet_num, historical=False, temporal=False, decomposition=False, segmentation=False): x_train, x_test, y_test = [], [], [] y_segment_test = [] x_train_resid, x_test_resid = [], [] label_seq, test_seq = [], [] test_df = df # test_df['date'] = pd.to_datetime(test_df['date'], format='%Y-%m-%d %H:%M:%S') if temporal == True: test_df = np.array(add_temporal_info('energy', test_df, test_df.date)) test_df = test_df[:, 1:-1].astype(float) labels = test_df[:, -1].astype(int) else: if decomposition == True: test_holiday = np.array(add_temporal_info('energy', test_df, test_df.date)['holiday']) test_weekend = np.array(add_temporal_info('energy', test_df, test_df.date)['is_weekend']) test_temporal = (test_holiday + test_weekend).reshape(-1, 1) test_df = np.array(test_df) labels = test_df[:, -1].astype(int) test_df = test_df[:, 1:-1].astype(float) scaler = MinMaxScaler(feature_range=(0, 1)) test_df = scaler.fit_transform(test_df) if decomposition == True: stl_loader = load_STL_results(test_df) test_seasonal = stl_loader['test_seasonal'] test_trend = stl_loader['test_trend'] test_normal = test_seasonal + test_trend x_test_normal = _create_sequences(test_normal, seq_length, stride, historical) print("#"*10, "Deep Decomposer Generating...", "#"*10) deep_pattern = decompose_model(x_test_normal, 'energy') deep_test = deep_pattern['rec_test'] deep_test_pattern = _decreate_sequences(deep_test) test_resid = (test_df - deep_test_pattern) * (1 + weight * test_temporal) # Wavelet transformation test_resid_wav = _wavelet(test_resid) test_resid_wavelet = _wavelet(test_resid_wav) for iter in range(wavelet_num): test_resid_wavelet = _wavelet(test_resid_wavelet) if temporal == True: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) else: x_test.append(test_df) else: if seq_length > 0: x_test.append(_create_sequences(test_df, seq_length, stride, historical)) y_test.append(_create_sequences(labels, seq_length, stride, historical)) else: x_test.append(test_df) y_test.append(labels) if decomposition == True: x_test_resid.append(_create_sequences(test_resid_wavelet, seq_length, stride, historical)) y_segment_test.append(_count_anomaly_segments(labels)[1]) label_seq.append(labels) # For plot traffic raw data test_seq.append(test_df) # Only return temporal auxiliary information if temporal == True: return {'x_test': x_test} # There are four cases. # 1) Decompose time series and evaluate through traditional metrics if (decomposition == True) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'x_test_resid': x_test_resid, 'label_seq': label_seq, 'test_seq': test_seq} # 2) Decompose time series and evalutate new metrics elif (decomposition == True) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test, 'x_test_resid': x_test_resid} # 3) Evaluate through new metrics with common methods elif (decomposition == False) and (segmentation == True): return {'x_test': x_test, 'y_test': label_seq, 'y_segment_test': y_segment_test} # 4) Evaluate through traditional metrics with common methods elif (decomposition == False) and (segmentation == False): return {'x_test': x_test, 'y_test': y_test, 'label_seq': label_seq, 'test_seq': test_seq} def load_detector(dataset_name): return tf.keras.models.load_model(f'pretrained_models/AD_{dataset_name}') # tf.keras.models.load_model('pretrained_models/Samsung') def run_detector(upload_data): stride = 1 SEED = 0 MODEL = "Bi-GRU" TEMPORAL = 0 DECOMPOSITION = 1 # 0 SEGMENTATION = 1 # 0 lamda_t = -0.7 wavelet_num = 3 dataset_name = get_dataset_name(upload_data.columns) detector = load_detector(dataset_name) # implicitly check dataset_name by feature number ? column names ? etc ? if dataset_name == "IoT": preprocessor = preprocess_iot_file seq_length = 60 elif dataset_name == 'samsung': preprocessor = preprocess_samsung_file seq_length = 36 elif dataset_name == 'energy': preprocessor = preprocess_energy_file seq_length = 60 aux_data = None if TEMPORAL: aux_data = preprocessor(upload_data, seq_length, stride, lamda_t, wavelet_num, temporal=TEMPORAL) data = preprocessor(upload_data, seq_length, stride, lamda_t, wavelet_num, decomposition=DECOMPOSITION, segmentation=SEGMENTATION) # preprocess file print('start detection phase') start_time = time.time() anomaly_scores, th = detect_anomaly(data, aux_data, detector, detector, MODEL, TEMPORAL, DECOMPOSITION, SEGMENTATION) print(f'dection phase taken {time.time() - start_time}') return anomaly_scores, th
python
import datetime import time from open_publishing.core.enums import EventTarget, EventAction, EventType class Events(object): def __init__(self, ctx): self._ctx = ctx def get(self, references=None, target=None, action=None, type=None, filters=None, since=None, till=None, history=False): """ Return specified events. Since parameter filters all events since given timestamp. Till parameter filters all events till given timestamp. If history is set to False (default) per object only the latest event will be returned. If history is set to True all events will be returned. """ event_types = self._get_event_types(target, action, type, filters) references = self._normalize_references(references) from_timestamp = self._normalize_timestamp(since) to_timestamp = self._normalize_timestamp(till) method= 'history' if history else 'list_status' response = self._ctx.gjp.fetch_events(method=method, event_types=event_types, references=references, from_timestamp=from_timestamp, to_timestamp=to_timestamp) execution_timestamp = datetime.datetime.fromtimestamp(response['execution_timestamp']) result = EventsList(execution_timestamp) def add_items(items): for item in items: timestamp = None if 'last_modified' in item: timestamp = item['last_modified'] if 'log_time' in item: timestamp = item['log_time'] result.append(EventsList.Event(target=EventTarget.from_id(item['target']), action=EventAction.from_id(item['action']), type=EventType.from_id(item['type']), timestamp=datetime.datetime.fromtimestamp(timestamp), guid=(item['source_type'] + '.' + str(item['reference_id'])).lower(), app=item.get('app', None), uuid=item.get('uuid', None))) add_items(response['items']) while 'resumption_token' in response: response = self._ctx.gjp.fetch_events(method=method, resumption_token=response['resumption_token']) add_items(response['items']) result.sort(key=lambda a: a.timestamp) return result def last_event(self, references, target=None, action=None, type=None, filters=None): event_types = self._get_event_types(target, action, type, filters) if isinstance(references, (list, tuple)): str_references = ','.join(set(references)) else: raise TypeError('references: expected list or tuple, got: {0}'.format(type(references))) events = {} def add_items(items): for item in items: guid = (item['source_type'] + '.' + str(item['reference_id'])).lower() if guid not in events or events[guid]['last_modified'] < item['last_modified']: events[guid] = item response = self._ctx.gjp.fetch_events(method='list_status', event_types=event_types, references=str_references) execution_timestamp = datetime.datetime.fromtimestamp(response['execution_timestamp']) add_items(response['items']) while 'resumption_token' in response: response = self._ctx.gjp.fetch_events('list_status', resumption_token=response['resumption_token']) add_items(response['items']) result = EventsList(execution_timestamp) for ref in references: guid = ref.lower() if guid in events: result.append(EventsList.Event(target=EventTarget.from_id(events[guid]['target']), action=EventAction.from_id(events[guid]['action']), type=EventType.from_id(events[guid]['type']), timestamp=datetime.datetime.fromtimestamp(events[guid]['last_modified']), guid=guid)) else: result.append(None) return result def _get_event_types(self, target, action, type, filters): if target is not None or action is not None or type is not None: if filters is not None: raise KeyError('filters or target/action/type should be set, not both') elif ((target is not None and target not in EventTarget) or (action is not None and action not in EventAction) or (type is not None and type not in EventType)): raise ValueError('target/action/type should be None or from op.events.target/action.type respectively, got: {0}, {1}, {2}'.format(target, action, type)) else: event_types = '({target},{action},{type})'.format(target=target if target is not None else '', action=action if action is not None else '', type=type if type is not None else '') else: if filters is None: event_types = '(,,)' #All events else: if not isinstance(filters, list): raise ValueError('filters should be list of tuples of (op.events.target, op.events.action, op.event.type), got: {0}'.format(filters)) event_types = [] for target, action, type in filters: if ((target is not None and target not in EventTarget) or (action is not None and action not in EventAction) or (type is not None and type not in EventType)): raise ValueError('filters should be list of tuples of (op.events.target|None, op.events.action|None, op.event.type|None), got: {0}'.format(filters)) else: event_types.append('({target},{action},{type})'.format(target=target if target is not None else '', action=action if action is not None else '', type=type if type is not None else '')) event_types = ';'.join(event_types) return event_types @staticmethod def _normalize_timestamp(timestamp): """Normalize timestamp to the format needed by API.""" if timestamp is None: return None if not isinstance(timestamp, (datetime.datetime, datetime.date)): raise TypeError('since should be datetime.datetime or datetime.date, got {0}'.format(timestamp)) return int(time.mktime(timestamp.timetuple())) @staticmethod def _normalize_references(references): if references is None: return None if not isinstance(references, (list, tuple)): raise TypeError('references: expected list or tuple, got: {0}'.format(type(references))) return ','.join(references) class EventsList(list): """List of Open Publishing Events.""" class Event(object): """Open Publishing Event object.""" def __init__(self, target, action, type, timestamp, guid, app=None, uuid=None): self._target = target self._action = action self._type = type self._timestamp = timestamp self._guid = guid self._app = app self._uuid = uuid @property def target(self): return self._target @property def action(self): return self._action @property def type(self): return self._type @property def tuple(self): return (self.target, self.action, self.type) @property def timestamp(self): return self._timestamp @property def guid(self): return self._guid @property def app(self): return self._app @property def uuid(self): return self._uuid def __repr__(self): '''Returns representation of the object''' return("{}(guid={}, target={}, action={}, type={}, app={})".format(self.__class__.__name__, self.guid, self.target, self.action, self.type, self.app)) def __init__(self, execution_timestamp): super(EventsList, self).__init__([]) self._execution_timestamp = execution_timestamp @property def execution_timestamp(self): return self._execution_timestamp
python
def add(x, y): return x + y def double(x): return x + x
python
import math import datetime block_size = 0.5 def block_name(lat, lon): discretized_lat = (math.floor(lat/block_size)+0.5)*block_size discretized_lon = (math.floor(lon/block_size)+0.5)*block_size return (discretized_lat, discretized_lon) def inside_polygon(x, y, points): """ Return True if a coordinate (x, y) is inside a polygon defined by a list of verticies [(x1, y1), (x2, x2), ... , (xN, yN)]. Reference: http://www.ariel.com.au/a/python-point-int-poly.html """ n = len(points) inside = False p1y, p1x = points[0] for i in range(1, n + 1): p2y, p2x = points[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= xinters: inside = not inside p1x, p1y = p2x, p2y return inside def get_covered_blocks(polygon): if polygon[0] != polygon[-1]: polygon.append(polygon[0]) lats = [pos[1] for pos in polygon] max_lat = max(lats) min_lat = min(lats) longs = [pos[0] for pos in polygon] max_long = max(longs) min_long = min(longs) max_block = block_name(max_lat, max_long) min_block = block_name(min_lat, min_long) covered_blocks = [] for lat_i in range(int((max_block[0] - min_block[0])/block_size)): for long_i in range(int((max_block[1] - min_block[1])/block_size)): la, lo = min_block[0] + lat_i * block_size, min_block[1] + long_i * block_size if inside_polygon(la, lo, polygon): covered_blocks.append((la, lo)) return covered_blocks def add_1_day(string): new_date = datetime.datetime.strptime(string, "%Y%m%d") + datetime.timedelta(days = 1) return datetime.datetime.strftime(new_date, '%Y%m%d') def sub_1_day(string): new_date = datetime.datetime.strptime(string, "%Y%m%d") - datetime.timedelta(days = 1) return datetime.datetime.strftime(new_date, '%Y%m%d') def wx_json_2_timestamp(string): return int(datetime.datetime.strftime(datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%SZ"),'%s'))* 1000
python
""" Results represent Prefect Task inputs and outputs. In particular, anytime a Task runs, its output is encapsulated in a `Result` object. This object retains information about what the data is, and how to "handle" it if it needs to be saved / retrieved at a later time (for example, if this Task requests for its outputs to be cached or checkpointed). An instantiated Result object has the following attributes: - a `value`: the value of a Result represents a single piece of data - a `safe_value`: this attribute maintains a reference to a `SafeResult` object which contains a "safe" representation of the `value`; for example, the `value` of a `SafeResult` might be a URI or filename pointing to where the raw data lives - a `result_handler` that holds onto the `ResultHandler` used to read / write the value to / from its handled representation To distinguish between a Task that runs but does not return output from a Task that has yet to run, Prefect also provides a `NoResult` object representing the _absence_ of computation / data. This is in contrast to a `Result` whose value is `None`. """ from typing import Any from prefect.engine.result_handlers import ResultHandler class ResultInterface: """ A necessary evil so that Results can store SafeResults and NoResults in its attributes without pickle recursion problems. """ def __eq__(self, other: Any) -> bool: if type(self) == type(other): eq = True for attr in self.__dict__: if attr.startswith("_"): continue eq &= getattr(self, attr, object()) == getattr(other, attr, object()) return eq return False def __repr__(self) -> str: val = self.value # type: ignore return "<{type}: {val}>".format(type=type(self).__name__, val=repr(val)) def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ If no result handler provided, returns self. If a ResultHandler is provided, however, it will become the new result handler for this result. Args: - result_handler (optional): an optional result handler to override the current handler Returns: - ResultInterface: a potentially new Result object """ if result_handler is not None: self.result_handler = result_handler return self def store_safe_value(self) -> None: """Performs no computation.""" class Result(ResultInterface): """ A representation of the result of a Prefect task; this class contains information about the value of a task's result, a result handler specifying how to serialize or store this value securely, and a `safe_value` attribute which holds information about the current "safe" representation of this result. Args: - value (Any): the value of the result - result_handler (ResultHandler, optional): the result handler to use when storing / serializing this result's value; required if you intend on persisting this result in some way """ def __init__(self, value: Any, result_handler: ResultHandler = None): self.value = value self.safe_value = NoResult # type: SafeResult self.result_handler = result_handler # type: ignore def store_safe_value(self) -> None: """ Populate the `safe_value` attribute with a `SafeResult` using the result handler """ # don't bother with `None` values if self.value is None: return if self.safe_value == NoResult: assert isinstance( self.result_handler, ResultHandler ), "Result has no ResultHandler" # mypy assert value = self.result_handler.write(self.value) self.safe_value = SafeResult( value=value, result_handler=self.result_handler ) class SafeResult(ResultInterface): """ A _safe_ representation of the result of a Prefect task; this class contains information about the serialized value of a task's result, and a result handler specifying how to deserialize this value Args: - value (Any): the safe represenation of a value - result_handler (ResultHandler): the result handler to use when reading this result's value """ def __init__(self, value: Any, result_handler: ResultHandler): self.value = value self.result_handler = result_handler @property def safe_value(self) -> "SafeResult": return self def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ Read the value of this result using the result handler and return a fully hydrated Result. If a new ResultHandler is provided, it will instead be used to read the underlying value and the `result_handler` attribute of this result will be reset accordingly. Args: - result_handler (optional): an optional result handler to override the current handler Returns: - ResultInterface: a potentially new Result object """ if result_handler is not None: self.result_handler = result_handler value = self.result_handler.read(self.value) res = Result(value=value, result_handler=self.result_handler) res.safe_value = self return res class NoResultType(SafeResult): """ A `SafeResult` subclass representing the _absence_ of computation / output. A `NoResult` object returns itself for its `value` and its `safe_value`. """ def __init__(self) -> None: super().__init__(value=None, result_handler=ResultHandler()) def __eq__(self, other: Any) -> bool: if type(self) == type(other): return True else: return False def __repr__(self) -> str: return "<No result>" def __str__(self) -> str: return "NoResult" def to_result(self, result_handler: ResultHandler = None) -> "ResultInterface": """ Performs no computation and returns self. Args: - result_handler (optional): a passthrough for interface compatibility """ return self NoResult = NoResultType()
python
import numpy import pytest from pauxy.systems.ueg import UEG from pauxy.estimators.ueg import fock_ueg, local_energy_ueg from pauxy.estimators.greens_function import gab from pauxy.utils.testing import get_random_wavefunction from pauxy.utils.misc import timeit @pytest.mark.unit def test_fock_build(): sys = UEG({'rs': 2.0, 'ecut': 2, 'nup': 7, 'ndown': 7, 'thermal': True}) numpy.random.seed(7) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]).astype(numpy.complex128) nb = sys.nbasis # from pyscf import gto, scf, ao2mo # mol = gto.M() # mol.nelec = sys.nelec # mf = scf.UHF(mol) # U = sys.compute_real_transformation() # h1_8 = numpy.dot(U.conj().T, numpy.dot(sys.H1[0], U)) # mf.get_hcore = lambda *args: h1_8 # mf.get_ovlp = lambda *args: numpy.eye(nb) # mf._eri = sys.eri_8() # mf._eri = ao2mo.restore(8, eri_8, nb) # veff = mf.get_veff(dm=dm) eris = sys.eri_4() F = fock_ueg(sys, G) vj = numpy.einsum('pqrs,xqp->xrs', eris, G) vk = numpy.einsum('pqrs,xqr->xps', eris, G) fock = numpy.zeros((2,33,33), dtype=numpy.complex128) fock[0] = sys.H1[0] + vj[0] + vj[1] - vk[0] fock[1] = sys.H1[1] + vj[0] + vj[1] - vk[1] assert numpy.linalg.norm(fock - F) == pytest.approx(0.0) @pytest.mark.unit def test_build_J(): sys = UEG({'rs': 2.0, 'ecut': 2.0, 'nup': 7, 'ndown': 7, 'thermal': True}) Gkpq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) Gpmq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]) from pauxy.estimators.ueg import coulomb_greens_function for s in [0,1]: coulomb_greens_function(len(sys.qvecs), sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, Gkpq[s], Gpmq[s], G[s]) from pauxy.estimators.ueg import build_J J1 = timeit(build_J)(sys, Gpmq, Gkpq) from pauxy.estimators.ueg_kernels import build_J_opt J2 = timeit(build_J_opt)(len(sys.qvecs), sys.vqvec, sys.vol, sys.nbasis, sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, Gkpq, Gpmq) assert numpy.linalg.norm(J1-J2) == pytest.approx(0.0) @pytest.mark.unit def test_build_K(): sys = UEG({'rs': 2.0, 'ecut': 2.0, 'nup': 7, 'ndown': 7, 'thermal': True}) Gkpq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) Gpmq = numpy.zeros((2,len(sys.qvecs)), dtype=numpy.complex128) psi = get_random_wavefunction(sys.nelec, sys.nbasis).real trial = numpy.eye(sys.nbasis, sys.nelec[0]) G = numpy.array([gab(psi[:,:sys.nup], psi[:,:sys.nup]), gab(psi[:,sys.nup:], psi[:,sys.nup:])]).astype(numpy.complex128) from pauxy.estimators.ueg import build_K from pauxy.estimators.ueg_kernels import build_K_opt K1 = timeit(build_K)(sys, G) K2 = timeit(build_K_opt)(len(sys.qvecs), sys.vqvec, sys.vol, sys.nbasis, sys.ikpq_i, sys.ikpq_kpq, sys.ipmq_i, sys.ipmq_pmq, G) assert numpy.linalg.norm(K1-K2) == pytest.approx(0.0)
python
import subprocess host = ["www.google.com", "192.0.0.25"] rounds = 32 ping = subprocess.Popen( ["ping", "-c", str(rounds), host[1]], stdout = subprocess.PIPE, stderr = subprocess.PIPE ) out, error = ping.communicate() print "Out : %s"%out import re matcher = re.compile("rtt min/avg/max/mdev = (\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)") values = matcher.search(out).groups() print "Output : %s"%out print "Min : %s"%values[0] print "Average: %s"%values[1] print "Maximum: %s"%values[2] print "MDeviation: %s"%values[3]
python
#!/usr/bin/env python3 # This script is used to avoid issues with `xcopy.exe` under Windows Server 2016 (https://github.com/moby/moby/issues/38425) import glob, os, shutil, sys # If the destination is an existing directory then expand wildcards in the source destination = sys.argv[2] if os.path.isdir(destination) == True: sources = glob.glob(sys.argv[1]) else: sources = [sys.argv[1]] # Copy each of our source files/directories for source in sources: if os.path.isdir(source): dest = os.path.join(destination, os.path.basename(source)) shutil.copytree(source, dest) else: shutil.copy2(source, destination) print('Copied {} to {}.'.format(source, destination), file=sys.stderr)
python
from django.urls import reverse from rest_framework import status from rest_framework.test import APITestCase class TicketTests(APITestCase): def setUp(self): """ Configurations to be made available before each individual test case inheriting from this class. """ url = reverse('account-registration') data = { "username": "Adenike", "email": "[email protected]", "password": "dayo" } self.response = self.client.post(url, data, format='json') url = reverse('create-flight') data = { "flight_type": "economy", "to_location": "Abuja", "from_location": "Lagos", "departure_date": "2019-08-22T14:47:05Z", "return_date": "2019-08-27T14:47:05Z", "total_seats": 50, "available_seats": 37, } token = 'Bearer ' + self.response['Authorization'] self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') url = '/ticket/flight/13/' data = { 'cost': 67 } self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') def test_ticket_is_created_successfully(self): """ Ensure a ticket is successfully created """ url = '/ticket/flight/12/' data = {"ticket_class":"BS","cost":0} token = 'Bearer ' + self.response['Authorization'] response = self.client.post(url, data, HTTP_AUTHORIZATION=token, format='json') self.assertEqual(response.status_code, status.HTTP_201_CREATED) def test_all_tickets_created_successfully(self): """ Ensure all tickets are gotten """ url = '/ticket/' token = 'Bearer ' + self.response['Authorization'] response = self.client.get(url, HTTP_AUTHORIZATION=token, format='json') self.assertEqual(response.status_code, status.HTTP_200_OK)
python
from interface import create_new_user import unittest from passlock import User,Credentials class TestClass(unittest.TestCase): ''' A Test class that defines test case for the user behaviour ''' def setUp(self) : ''' set up mehtod that runs each testcase ''' self.new_user = User('mark','mark002.') return super().setUp() def test_init(self): ''' test_init testcase that test if the object has been initialized correctly ''' self.assertEquals(self.new_user.username,'mark') self.assertEquals(self.new_user.password,'mark002.') def test_save_user(self): ''' test case to test if useer object is saved in the user list ''' self.new_user.save_user() self.assertEquals(len(User.user_list),1) class TestCredentials(unittest.TestCase): ''' A Test class that defines test case for the credentials ''' def setUp(self): ''' Mehtod that run each testcase ''' new_credentials =Credentials(self,'Gmial','mark_kk','mark002.') def test_inti(self,account,userName,password): ''' testcase method that check if user credentials instances have been initialized ''' self.assertEquals(self.new_credentials.account,'Gmail') self.assertEquals(self.new_credenital.userName,'mark_kk') self.assertEquals(self.new_credentials.password,'mark002.') def test_save_credentials(self): ''' testcase to check if credentials object is saved in credentials list ''' self.new_credentials(self) self.new_credentials(len(Credentials.Credentials_list),1) def teaarDown(self): ''' method that clean up after each test case has run ''' Credentials.Credentials_list = [] def test_save_many_accounts(self): ''' test to check if many credentials can be saved in the credentials list ''' test_credentials = Credentials('mark','mark002','markoo2') test_credentials.save_details() self.assertEquals(Credentials.Credentials_list_found) if __name == '__main__': unittest.main()
python
import unittest import numpy as np import theano import theano.tensor as T from daps.model import weigthed_binary_crossentropy class test_loss_functions(unittest.TestCase): def test_weigthed_binary_crossentropy(self): w0_val, w1_val = 0.5, 1.0 x_val, y_val = np.random.rand(5, 3), np.random.randint(0, 2, (5, 3)) expected_val = -(w1_val * y_val * np.log(x_val) + w0_val * (1 - y_val) * np.log(1 - x_val)) w0, w1 = T.constant(w0_val), T.constant(w1_val) x, y = T.matrix('pred'), T.matrix('true') loss = weigthed_binary_crossentropy(x, y, w0, w1) f = theano.function([x, y], loss, allow_input_downcast=True) np.testing.assert_array_almost_equal(expected_val, f(x_val, y_val))
python
''' Filters that operate on ImageStim inputs. ''' import numpy as np from PIL import Image from PIL import ImageFilter as PillowFilter from pliers.stimuli.image import ImageStim from .base import Filter class ImageFilter(Filter): ''' Base class for all ImageFilters. ''' _input_type = ImageStim class ImageCroppingFilter(ImageFilter): ''' Crops an image. Args: box (tuple): a 4-length tuple containing the left, upper, right, and lower coordinates for the desired region of the image. If none is specified, crops out black borders from the image. ''' _log_attributes = ('box',) VERSION = '1.0' def __init__(self, box=None): self.box = box super().__init__() def _filter(self, stim): if self.box: x0, y0, x1, y1 = self.box else: pillow_img = Image.fromarray(stim.data) x0, y0, x1, y1 = pillow_img.getbbox() new_img = stim.data[y0:y1, x0:x1] return ImageStim(stim.filename, data=new_img) class ImageResizingFilter(ImageFilter): ''' Resizes an image, while optionally maintaining aspect ratio. Args: size (tuple of two ints): new size of the image. maintain_aspect_ratio (boolean): if true, resize the image while maintaining aspect ratio, and pad the rest with zero values. Otherwise, potentially distort the image during resizing to fit the new size. resample str: resampling method. One of 'nearest', 'bilinear', 'bicubic', 'lanczos', 'box', and 'hamming'. See https://pillow.readthedocs.io/en/5.1.x/handbook/concepts.html#concept-filters for more information. ''' _log_attributes = ('size', 'maintain_aspect_ratio', 'resample') VERSION = '1.0' def __init__(self, size, maintain_aspect_ratio=False, resample='bicubic'): self.size = size self.maintain_aspect_ratio = maintain_aspect_ratio resampling_mapping = { 'nearest': Image.NEAREST, 'bilinear': Image.BILINEAR, 'bicubic': Image.BICUBIC, 'lanczos': Image.LANCZOS, 'box': Image.BOX, 'hamming': Image.HAMMING, } if resample.lower() not in resampling_mapping.keys(): raise ValueError( "Unknown resampling method '{}'. Allowed values are '{}'" .format(resample, "', '".join(resampling_mapping.keys()))) self.resample = resampling_mapping[resample] super().__init__() def _filter(self, stim): pillow_img = Image.fromarray(stim.data) if not self.maintain_aspect_ratio: new_img = np.array( pillow_img.resize(self.size, resample=self.resample)) else: # Resize the image to the requested size in one of the dimensions. # We then create a black image of the requested size and paste the # resized image into the middle of this new image. The effect is # that there is a black border on the top and bottom or the left # and right of the resized image. orig_size = pillow_img.size ratio = max(self.size) / max(orig_size) inter_size = (np.array(orig_size) * ratio).astype(np.int32) inter_img = pillow_img.resize(inter_size, resample=self.resample) new_img = Image.new('RGB', self.size) upper_left = ( (self.size[0] - inter_size[0]) // 2, (self.size[1] - inter_size[1]) // 2) new_img.paste(inter_img, box=upper_left) new_img = np.array(new_img) return ImageStim(stim.filename, data=new_img) class PillowImageFilter(ImageFilter): ''' Uses the ImageFilter module from PIL to run a pre-defined image enhancement filter on an ImageStim. Sample of available filters: BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE, EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN Args: image_filter (str or type or ImageFilter): specific name or type of the filter to be used, with supporting *args and **kwargs. Also accepted to directly pass an instance of PIL's ImageFilter.Filter args, kwargs: Optional positional and keyword arguments passed onto the pillow ImageFilter initializer. ''' _log_attributes = ('filter',) def __init__(self, image_filter=None, *args, **kwargs): if image_filter is None: pillow_url = "http://pillow.readthedocs.io/en/3.4.x/reference/" "ImageFilter.html#filters" raise ValueError("Must enter a valid filter to use. See %s" "for a list of valid PIL filters." % pillow_url) if isinstance(image_filter, type): image_filter = image_filter(*args, **kwargs) if isinstance(image_filter, PillowFilter.Filter): self.filter = image_filter elif isinstance(image_filter, str): self.filter = getattr(PillowFilter, image_filter)(*args, **kwargs) else: raise ValueError("Must provide an image_filter as a string, type, " "or ImageFilter object. ") super().__init__() def _filter(self, stim): pillow_img = Image.fromarray(stim.data) new_img = np.array(pillow_img.filter(self.filter)) return ImageStim(stim.filename, data=new_img)
python
# # @lc app=leetcode.cn id=206 lang=python3 # # [206] 反转链表 # # https://leetcode-cn.com/problems/reverse-linked-list/description/ # # algorithms # Easy (58.01%) # Total Accepted: 38.9K # Total Submissions: 66.5K # Testcase Example: '[1,2,3,4,5]' # # 反转一个单链表。 # # 示例: # # 输入: 1->2->3->4->5->NULL # 输出: 5->4->3->2->1->NULL # # 进阶: # 你可以迭代或递归地反转链表。你能否用两种方法解决这道题? # # # Definition for singly-linked list. # class ListNode: # def __init__(self, x): # self.val = x # self.next = None class Solution1: def reverseList(self, head: ListNode) -> ListNode: node, last = head, None while node: tmp_last = last last = node tmp_node_next = node.next last.next = tmp_last node = tmp_node_next return last class Solution2: def reverseList(self, head: ListNode) -> ListNode: last = self._reverseList(head, None) return last def _reverseList(self, node, last): if not node: return last next_node = node.next node.next = last last = node return self._reverseList(next_node, last) class Solution: def reverseList(self, head: ListNode) -> ListNode: node, last = head, None while node: last, last.next, node = node, last, node.next return last
python
# encoding: utf-8 """Test utility functions.""" from unittest import TestCase import os from viltolyckor.utils import parse_result_page from requests.exceptions import HTTPError DATA_DIR = "tests/data" class TestUtils(TestCase): def setUp(self): pass def test_parse_result_page(self): file_path = os.path.join(DATA_DIR, "result_page.html") with open(file_path) as f: content = f.read() data = [x for x in parse_result_page(content)] assert len(data) == 13 * 14 result = data[0] assert "year" in result assert "viltslag" in result assert "month" in result assert "value" in result assert isinstance(result["value"], int)
python
# -*- coding: utf-8 -*- """ *** Same as its parent apart that text baselines are reflected as a LineString (instead of its centroid) DU task for ABP Table: doing jointly row BIO and near horizontal cuts SIO block2line edges do not cross another block. The cut are based on baselines of text blocks, with some positive or negative inclination. - the labels of cuts are SIO Copyright Naver Labs Europe(C) 2018 JL Meunier Developed for the EU project READ. The READ project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 674943. """ import sys, os import math try: #to ease the use without proper Python installation import TranskribusDU_version except ImportError: sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) ) import TranskribusDU_version TranskribusDU_version from common.trace import traceln from tasks import _exit from tasks.DU_CRF_Task import DU_CRF_Task from tasks.DU_Table.DU_ABPTableSkewed import GraphSkewedCut, main from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import SkewedCutAnnotator from tasks.DU_Table.DU_ABPTableSkewed_txtBIO_sepSIO_line import DU_ABPTableSkewedRowCutLine from tasks.DU_Table.DU_ABPTableSkewed_txtBIOH_sepSIO_line import DU_ABPTableSkewedRowCutLine_BIOH # ---------------------------------------------------------------------------- if __name__ == "__main__": version = "v.01" usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version) # parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels") #FOR GCN # parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges") parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)") parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method") parser.add_option("--line_see_line", dest='iLineVisibility', action="store", type=int, default=GraphSkewedCut.iLineVisibility, help="seeline2line: how far in pixel can a line see another cut line?") parser.add_option("--block_see_line", dest='iBlockVisibility', action="store", type=int, default=GraphSkewedCut.iBlockVisibility, help="seeblock2line: how far in pixel can a block see a cut line?") parser.add_option("--height", dest="fCutHeight", default=GraphSkewedCut.fCutHeight , action="store", type=float, help="Minimal height of a cut") # parser.add_option("--cut-above", dest='bCutAbove', action="store_true", default=False # ,help="Each object defines one or several cuts above it (instead of below as by default)") parser.add_option("--angle", dest='lsAngle' , action="store", type="string", default="-1,0,+1" ,help="Allowed cutting angles, in degree, comma-separated") parser.add_option("--graph", dest='bGraph', action="store_true", help="Store the graph in the XML for displaying it") parser.add_option("--bioh", "--BIOH", dest='bBIOH', action="store_true", help="Text are categorised along BIOH instead of BIO") parser.add_option("--text", "--txt", dest='bTxt', action="store_true", help="Use textual features.") # --- #parse the command line (options, args) = parser.parse_args() options.bCutAbove = True # Forcing this! if options.bBIOH: DU_CLASS = DU_ABPTableSkewedRowCutLine_BIOH else: DU_CLASS = DU_ABPTableSkewedRowCutLine if options.bGraph: import os.path # hack DU_CLASS.bCutAbove = options.bCutAbove traceln("\t%s.bCutAbove=" % DU_CLASS.__name__, DU_CLASS.bCutAbove) DU_CLASS.lRadAngle = [math.radians(v) for v in [float(s) for s in options.lsAngle.split(",")]] traceln("\t%s.lRadAngle=" % DU_CLASS.__name__, DU_CLASS.lRadAngle) for sInputFilename in args: sp, sf = os.path.split(sInputFilename) sOutFilename = os.path.join(sp, "graph-" + sf) doer = DU_CLASS("debug", "." , iBlockVisibility=options.iBlockVisibility , iLineVisibility=options.iLineVisibility , fCutHeight=options.fCutHeight , bCutAbove=options.bCutAbove , lRadAngle=[math.radians(float(s)) for s in options.lsAngle.split(",")] , bTxt=options.bTxt) o = doer.cGraphClass() o.parseDocFile(sInputFilename, 9) o.addEdgeToDoc() print('Graph edges added to %s'%sOutFilename) o.doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True) SkewedCutAnnotator.gtStatReport() exit(0) # --- try: sModelDir, sModelName = args except Exception as e: traceln("Specify a model folder and a model name!") _exit(usage, 1, e) main(DU_CLASS, sModelDir, sModelName, options)
python
import pytest from pybatfish.client.session import Session from pybatfish.datamodel import PathConstraints, HeaderConstraints from test_suite.sot_utils import (SoT, BLOCKED_PREFIXES, SNAPSHOT_NODES_SPEC, OPEN_CLIENT_PORTS) @pytest.mark.network_independent def test_no_forwarding_loops(bf: Session) -> None: """Check that there are no forwarding loops in the network.""" looping_flows = bf.q.detectLoops().answer().frame() assert looping_flows.empty, \ "Found flows that loop: {}".format(looping_flows.to_dict(orient="records")) @pytest.mark.network_independent def test_subnet_multipath_consistency(bf: Session) -> None: """ Check that all flows between all pairs are multipath consistent. Searches across all flows between subnets that are treated differently (i.e., dropped versus forwarded) by different paths in the network and returns example flows. """ multipath_inconsistent_flows = bf.q.subnetMultipathConsistency().answer().frame() assert multipath_inconsistent_flows.empty, \ "Found flows that are multipath inconsistent: {}".format(multipath_inconsistent_flows.to_dict(orient="records")) def test_public_services(bf: Session, sot: SoT) -> None: """Check that all public services are accessible from the Internet.""" for service in sot.public_services: failed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="internet"), headers=HeaderConstraints( srcIps='0.0.0.0/0 \\ ({})'.format(",".join(BLOCKED_PREFIXES)), srcPorts=OPEN_CLIENT_PORTS, dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="failure").answer().frame() assert failed_flows.empty, \ "Some flows to public service '{}' fail: {}".format(service["description"], failed_flows["Flow"]) def test_private_services(bf: Session, sot: SoT) -> None: """Check that all private services are inaccessible from the Internet.""" for service in sot.private_services: allowed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="internet"), headers=HeaderConstraints( dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="success").answer().frame() assert allowed_flows.empty, \ "Some traffic to private service {} is allowed: {}".format(service["description"], allowed_flows["Flow"]) def test_external_services(bf: Session, sot: SoT) -> None: """Check that all external services are accessible from all leaf routers.""" for service in sot.external_services: failed_flows = bf.q.reachability( pathConstraints=PathConstraints(startLocation="/leaf.*/"), headers=HeaderConstraints( dstIps=",".join(service["ips"]), applications=",".join(service["applications"])), actions="failure").answer().frame() assert failed_flows.empty, \ "Some flows to external service {} fail: {}".format(service["description"], failed_flows["Flow"]) def test_all_svi_prefixes_are_on_all_leafs(bf: Session, sot: SoT): """Check that all SVI prefixes are on all leafs.""" all_leafs = set(sot.inventory.get_groups_dict()['leaf']) # for each prefix set on each vlan interface for svi_prefixes in bf.q.interfaceProperties(interfaces="/vlan.*/").answer().frame()['All_Prefixes']: for prefix in svi_prefixes: # each vlan prefix should be present on each leaf leafs_with_prefix = set(bf.q.routes(nodes="/leaf.*/", network=prefix).answer().frame()["Node"].unique()) assert all_leafs == leafs_with_prefix def test_default_route_presence(bf: Session, sot: SoT): """Check that all routers have the default route.""" all_nodes = {host.get_name() for host in sot.inventory.get_hosts()} nodes_with_default = set(bf.q.routes(nodes=SNAPSHOT_NODES_SPEC, network="0.0.0.0/0").answer().frame()["Node"].unique()) assert all_nodes == nodes_with_default
python
"""Externalized strings for better structure and easier localization""" setup_greeting = """Dwarf - First run configuration Insert your bot's token, or enter 'cancel' to cancel the setup:""" not_a_token = "Invalid input. Restart Dwarf and repeat the configuration process." choose_prefix = """Choose a prefix. A prefix is what you type before a command. A typical prefix would be the exclamation mark. Can be multiple characters. You will be able to change it later and add more of them. Choose your prefix:""" confirm_prefix = """Are you sure you want {0} as your prefix? You will be able to issue commands like this: {0}help Type yes to confirm or no to change it""" no_prefix_set = "No prefix set. Defaulting to !" setup_finished = """ The configuration is done. Do not exit this session to keep your bot online. All commands will have to be issued via Discord, this session will now be read only. Press enter to continue""" logging_into_discord = "Logging into Discord..." invalid_credentials = """Invalid login credentials. If they worked before Discord might be having temporary technical issues. In this case, press enter and try again later. Otherwise you can type 'reset' to delete the current configuration and redo the setup process again the next start. > """ keep_updated = "Make sure to keep Dwarf updated by using the {}update command." official_server = "Official server: {}" invite_link = "https://discord.gg/rAHwvyE" bot_is_online = "{} is now online." connected_to = "Connected to:" connected_to_servers = "{} servers" connected_to_channels = "{} channels" connected_to_users = "{} users" prefix_singular = "Prefix" prefix_plural = "Prefixes" use_this_url = "Use this URL to bring your bot to a server:" update_the_api = """\nYou are using an outdated discord.py.\n Update using pip3 install -U discord.py""" command_not_found = "No command called {} found." command_disabled = "That command is disabled." exception_in_command = "Exception in command '{}'" error_in_command = "Error in command '{}' - {}: {}" not_available_in_dm = "That command is not available in DMs." command_has_no_subcommands = "Command {0.name} has no subcommands." group_help = "{} command group" owner_recognized = "{} has been recognized and set as owner." user_registered = """{}, thanks for using my commands! I just registered you in my database so you can use all my features. I hope that's okay for you. If it isn't, please use the `unregister` command. That will remove all of the data I store about you. The only thing I will still keep is your ID so I don't forget that you don't want data about you to be stored. Keep in mind that if I'm not allowed to store data about you, you won't be able to use many of my commands. If you ever change your mind about this, use the `register` command. Whatever your decision looks like, I wish you lots of fun on Discord."""
python
#coding=utf-8 from django.conf.urls import patterns, url, include from cmdb.views import contract urlpatterns = patterns('', url(r'^$', contract.list_contract, name='contract_index'), url(r'add/$', contract.add_contract, name='add_contract'), url(r'del/(?P<contract_id>\d+)/$', contract.del_contract, name='del_contract'), url(r'(?P<contract_id>\d+)/$', contract.edit_contract, name='edit_contract'), url(r'list/$', contract.list_contract, name='list_contract'), )
python
## Problem: Finding Numbers in a Haystack # use regex library import re # open file for reading # save the file into the same directory textfile_handle = open("regex_sum_42.txt") # list of all numbers found so far num_all_list = list() # read through and parse a file with text and numbers # loop over every line of file for line in textfile_handle: line = line.rstrip() # random numbers are inserted throughout the text # numbers can appear anywhere in the line. there can be any number of numbers in each line (including none) # extract any number from line and put it into list of numbers of that line # use regular expressions num_line_list = re.findall('[0-9]+', line) # skip lines without any number if len(num_line_list) == 0: continue # convert extracted strings to integers # append list of all numbers found so far with list of numbers of that line for num in num_line_list: num_all_list.append(int(num)) # sum up all integers in list of all numbers found sum_num_all = sum(num_all_list) print(sum_num_all) # Example: http://py4e-data.dr-chuck.net/regex_sum_42.txt # (There are 90 values with a sum=445833)
python
from numpy import asarray from datetime import datetime, timedelta from PyQt5.QtCore import Qt from PyQt5.QtChart import QChart, QLineSeries, QBarCategoryAxis, QValueAxis from PyQt5.QtGui import QPainter from core import AppCore from widget.GeometryRestoreWidget import GeometryRestoreWidget from gen.ui_AnalysisWidget import Ui_AnalysisWidget from qt.ChartWidget import ChartWidget from widget.LinearTestWidget import LinearTestWidget class AnalysisWidget(GeometryRestoreWidget): """ Widget to display analysis graphs from transformations performed on DB data """ core = AppCore() ui = None linear_analysis_widget = None linear_test_widget = None linear_analysis_chart = None linear_analyis_series = QLineSeries() def __init__(self, parent=None): """ Create analysis widget :param parent: Parent widget """ # Restore geometry super().__init__("AnalysisWidget", parent) # Load UI self.ui = Ui_AnalysisWidget() self.ui.setupUi(self) # Setup analysis widget self.linear_analysis_widget = ChartWidget() # Setup analysis chart self.linear_analysis_chart = QChart() self.linear_analysis_chart.setTheme(QChart.ChartThemeBlueCerulean) self.linear_analysis_chart.setBackgroundVisible(False) self.linear_analysis_chart.setAnimationOptions(QChart.SeriesAnimations) self.linear_analysis_chart.legend().setVisible(True) self.linear_analysis_chart.legend().setAlignment(Qt.AlignBottom) self.linear_analysis_widget.ui.chartView.setRenderHint(QPainter.Antialiasing) self.linear_analysis_widget.ui.chartView.setChart(self.linear_analysis_chart) # Add to display self.ui.linearRegTab.layout().addWidget(self.linear_analysis_widget) # Create test widget self.linear_test_widget = LinearTestWidget() self.ui.linearTestTab.layout().addWidget(self.linear_test_widget) # Update analysis from test model config changes self.linear_test_widget.model_updated.connect(self.update_linear_analysis) def update_linear_analysis(self): """ Populate the linear analysis for N days using the configuration from the test widget """ # Load most recent open value query = "SELECT open FROM time_series_daily_adjusted WHERE symbol = " + \ '\'' + self.linear_test_widget.symbol + '\'' + " ORDER BY timestamp DESC LIMIT 1" self.core.data_store.cursor.execute(query) value = self.core.data_store.cursor.fetchall() if len(value) == 0: # Some error return # Create a chart using the values, clear # any existing series from chart if len(self.linear_analysis_chart.series()) > 0: self.linear_analysis_chart.removeAllSeries() self.linear_analyis_series = QLineSeries() x_axis = self.linear_analysis_chart.axes(Qt.Horizontal)[0] y_axis = self.linear_analysis_chart.axes(Qt.Vertical)[0] self.linear_analysis_chart.removeAxis(y_axis) self.linear_analysis_chart.removeAxis(x_axis) # Predict 7 days ahead using the model generated # through the configuration widget for training and # test, starting with the current open value value = value[0][0] n = 0 categories = [] max = value min = value self.linear_analyis_series.append(n, value) categories.append((datetime.utcnow() + timedelta(days=n)).strftime("%Y-%m-%d")) while n < 7: n += 1 prediction = self.linear_test_widget.model.predict(asarray(value).reshape(-1, 1)) value = prediction.flatten()[0] categories.append((datetime.utcnow() + timedelta(days=n)).strftime("%Y-%m-%d")) self.linear_analyis_series.append(n, value) if value > max: max = value if value < min: min = value # Series names self.linear_analyis_series.setName("Forecast close values") self.linear_analysis_chart.setTitle(self.linear_test_widget.symbol + " Linear regression 7-day forecast") # Add series self.linear_analysis_chart.addSeries(self.linear_analyis_series) # Axis setup x_axis = QBarCategoryAxis() x_axis.setTitleText("Date") x_axis.setLabelsAngle(-90) x_axis.setCategories(categories) self.linear_analysis_chart.addAxis(x_axis, Qt.AlignBottom) self.linear_analyis_series.attachAxis(x_axis) y_axis = QValueAxis() y_axis.setLabelFormat("%f") y_axis.setTitleText("Value (USD)") pad = max - min y_axis.setRange(min - pad, max + pad) self.linear_analysis_chart.addAxis(y_axis, Qt.AlignLeft) self.linear_analyis_series.attachAxis(y_axis) def update_analysis(self, symbol): """ Update the analysis configuration widget to let the user dynamically configure the parameter to use for linear regression training and display the test data """ # Update test/train display self.linear_test_widget.update_symbol(symbol) # Perform initial analysis self.update_linear_analysis()
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: task.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from easy_command_sdk.model.inspection import user_or_user_group_pb2 as easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='task.proto', package='inspection', syntax='proto3', serialized_options=_b('ZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspection'), serialized_pb=_b('\n\ntask.proto\x12\ninspection\x1a:easy_command_sdk/model/inspection/user_or_user_group.proto\"\xc8\x04\n\x0eInspectionTask\x12\x18\n\x10inspectionTaskId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x13\n\x0bisAllNotify\x18\x03 \x01(\x08\x12\x1c\n\x14notifyPassComparator\x18\x04 \x01(\t\x12\x13\n\x0bnotifyScore\x18\x05 \x01(\x02\x12-\n\x04\x61rgs\x18\x06 \x03(\x0b\x32\x1f.inspection.InspectionTask.Args\x12\x39\n\nnotifyUser\x18\x07 \x01(\x0b\x32%.inspection.InspectionUserOrUserGroup\x12>\n\x0fnotifyUserGroup\x18\x08 \x01(\x0b\x32%.inspection.InspectionUserOrUserGroup\x12\x10\n\x08taskType\x18\t \x01(\t\x12\x1a\n\x12performanceTargets\x18\n \x01(\t\x12\x17\n\x0fqueryStrategyId\x18\x0b \x01(\t\x12\x15\n\rtaskScheduler\x18\x0c \x01(\t\x12\x33\n\x07targets\x18\r \x03(\x0b\x32\".inspection.InspectionTask.Targets\x12\x0c\n\x04memo\x18\x0e \x01(\t\x12\x12\n\ntemplateId\x18\x0f \x01(\t\x12\x14\n\x0ctemplateName\x18\x10 \x01(\t\x1a\x32\n\x04\x41rgs\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x0e\n\x06source\x18\x03 \x01(\t\x1a\x1d\n\x07Targets\x12\x12\n\ninstanceId\x18\x01 \x01(\tBFZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspectionb\x06proto3') , dependencies=[easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2.DESCRIPTOR,]) _INSPECTIONTASK_ARGS = _descriptor.Descriptor( name='Args', full_name='inspection.InspectionTask.Args', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='key', full_name='inspection.InspectionTask.Args.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='value', full_name='inspection.InspectionTask.Args.value', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='source', full_name='inspection.InspectionTask.Args.source', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=590, serialized_end=640, ) _INSPECTIONTASK_TARGETS = _descriptor.Descriptor( name='Targets', full_name='inspection.InspectionTask.Targets', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='instanceId', full_name='inspection.InspectionTask.Targets.instanceId', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=642, serialized_end=671, ) _INSPECTIONTASK = _descriptor.Descriptor( name='InspectionTask', full_name='inspection.InspectionTask', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='inspectionTaskId', full_name='inspection.InspectionTask.inspectionTaskId', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='name', full_name='inspection.InspectionTask.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='isAllNotify', full_name='inspection.InspectionTask.isAllNotify', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='notifyPassComparator', full_name='inspection.InspectionTask.notifyPassComparator', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='notifyScore', full_name='inspection.InspectionTask.notifyScore', index=4, number=5, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='args', full_name='inspection.InspectionTask.args', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='notifyUser', full_name='inspection.InspectionTask.notifyUser', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='notifyUserGroup', full_name='inspection.InspectionTask.notifyUserGroup', index=7, number=8, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='taskType', full_name='inspection.InspectionTask.taskType', index=8, number=9, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='performanceTargets', full_name='inspection.InspectionTask.performanceTargets', index=9, number=10, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='queryStrategyId', full_name='inspection.InspectionTask.queryStrategyId', index=10, number=11, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='taskScheduler', full_name='inspection.InspectionTask.taskScheduler', index=11, number=12, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='targets', full_name='inspection.InspectionTask.targets', index=12, number=13, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='memo', full_name='inspection.InspectionTask.memo', index=13, number=14, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='templateId', full_name='inspection.InspectionTask.templateId', index=14, number=15, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='templateName', full_name='inspection.InspectionTask.templateName', index=15, number=16, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[_INSPECTIONTASK_ARGS, _INSPECTIONTASK_TARGETS, ], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=87, serialized_end=671, ) _INSPECTIONTASK_ARGS.containing_type = _INSPECTIONTASK _INSPECTIONTASK_TARGETS.containing_type = _INSPECTIONTASK _INSPECTIONTASK.fields_by_name['args'].message_type = _INSPECTIONTASK_ARGS _INSPECTIONTASK.fields_by_name['notifyUser'].message_type = easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2._INSPECTIONUSERORUSERGROUP _INSPECTIONTASK.fields_by_name['notifyUserGroup'].message_type = easy__command__sdk_dot_model_dot_inspection_dot_user__or__user__group__pb2._INSPECTIONUSERORUSERGROUP _INSPECTIONTASK.fields_by_name['targets'].message_type = _INSPECTIONTASK_TARGETS DESCRIPTOR.message_types_by_name['InspectionTask'] = _INSPECTIONTASK _sym_db.RegisterFileDescriptor(DESCRIPTOR) InspectionTask = _reflection.GeneratedProtocolMessageType('InspectionTask', (_message.Message,), { 'Args' : _reflection.GeneratedProtocolMessageType('Args', (_message.Message,), { 'DESCRIPTOR' : _INSPECTIONTASK_ARGS, '__module__' : 'task_pb2' # @@protoc_insertion_point(class_scope:inspection.InspectionTask.Args) }) , 'Targets' : _reflection.GeneratedProtocolMessageType('Targets', (_message.Message,), { 'DESCRIPTOR' : _INSPECTIONTASK_TARGETS, '__module__' : 'task_pb2' # @@protoc_insertion_point(class_scope:inspection.InspectionTask.Targets) }) , 'DESCRIPTOR' : _INSPECTIONTASK, '__module__' : 'task_pb2' # @@protoc_insertion_point(class_scope:inspection.InspectionTask) }) _sym_db.RegisterMessage(InspectionTask) _sym_db.RegisterMessage(InspectionTask.Args) _sym_db.RegisterMessage(InspectionTask.Targets) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
python
import os DB_HOST = os.environ["REDIS_HOST"] DB_PORT = int(os.environ["REDIS_PORT"]) DB_NAME = int(os.environ["REDIS_ID"]) DB_QUEUE = os.environ["INPUT_QUEUE"] BATCH_SIZE = 16 SERVER_SLEEP = 0.25
python
#!/usr/bin/env python import roslib import rospy import math import time import numpy as np import os from std_msgs.msg import Int32MultiArray from std_msgs.msg import Int32 from rospy_tutorials.msg import Floats from gpiozero import PWMOutputDevice #initialize all variables #current positon Xc=0 Yc=0 #final position Xf=0 Yf=0 #goal angel Theta_g=0 #current angel Theta_c=0 #initial value of flag Flag =0 R=3.25 #radius of wheel cm L=18.5 #seperation distance cm #define publishers pub1 = rospy.Publisher('Wr_target_Rob3', Int32, queue_size=10) pub2 = rospy.Publisher('Wl_target_Rob3', Int32, queue_size=10) pub3 = rospy.Publisher('Flag_3', Int32, queue_size=10) #get data from the callbacks(goal angel, current and final positions) def callback1(data): try: global Theta_g Theta_g=data.data[0] except IndexError: pass def callback2(data): global Xc, Yc , Theta_c #C=current position Xc=data.data[0] Yc=data.data[1] Theta_c=data.data[2] def callback3(data): global Xf, Yf # f=final position Xf=data.data[0] Yf=data.data[1] #run the smooth function def callback4(data): smooth() #set the subscribers def listener(): rospy.init_node('SmoothController_Rob3') rospy.Subscriber('theta_goal_Rob3' ,Int32MultiArray,callback1) rospy.Subscriber('rob3_CurrentPose',Int32MultiArray,callback2) rospy.Subscriber('robot3_goal_cm' ,Int32MultiArray,callback3) rospy.Subscriber('len_route3' ,Int32,callback4) def smooth(): # get the error in the global reference frame if ((Xf > 0) and (Yf >0)) : global Flag errorX= Xf - Xc errorY= Yf - Yc error_th = Theta_c - Theta_g error_th_rad = error_th * (math.pi / 180) theta_rad= Theta_c * (math.pi / 180) #get error in the robot's ref frame gr_X=round( (errorX*math.cos(theta_rad))+(errorY*math.sin(theta_rad)),2) gr_Y=round( (-errorX*math.sin(theta_rad))+(errorY*math.cos(theta_rad)),2) #calculate Rho and alpha rho =round((math.sqrt(gr_X**2 + gr_Y**2)),2) alpha = round(error_th_rad,2) if alpha > math.pi: #alpha [ -pi , pi ] alpha = alpha - (math.pi*2) #define gains K_rho=0.14 K_alpha=0.3102 #calculate control commands while ((abs(gr_X) <= 4 ) and (abs(gr_Y) <= 4) ): print 'Reached The goal' #if reached goal set angular velocities zero and raise the flag WR= 0 WL =0 Flag =1 #publish angular velocities and raised flags pub1.publish(WR) pub2.publish(WL) pub3.publish(Flag) #updating the error listener() errorX= Xf - Xc errorY= Yf - Yc error_th = Theta_c - Theta_g theta_rad= Theta_c * (math.pi / 180) gr_X=round( (errorX*math.cos(theta_rad))+(errorY*math.sin(theta_rad)),2) gr_Y=round( (-errorX*math.sin(theta_rad))+(errorY*math.cos(theta_rad)),2) #reset flag Flag =0 pub3.publish(Flag) #calculate linear and angular velocity V=round((K_rho *rho),2) V=max(min(15,V),1.8) W= round((K_alpha *alpha),2) #kinematics WR = round(abs((V + ((W*L)/2)) /R)) #right_wheel_angular_vel WL = round(abs((V - ((W*L)/2)) /R)) #left_wheel_angular_vel pub1.publish(WR) pub2.publish(WL) listener() #print WR ,WL # os.system('clear') if __name__ == '__main__': while not rospy.is_shutdown(): listener() rospy.spin()
python
from datetime import datetime from django import forms from .models import Location class StrikeFilterForm(forms.Form): daterange = forms.CharField(label='Date', max_length=23) country__name = forms.ChoiceField(label='Country', choices=()) province = forms.CharField(label='Province', max_length=100, required=False) town = forms.CharField(label='City / Town', max_length=100, required=False) def __init__(self, *args, **kwargs): super(StrikeFilterForm, self).__init__(*args, **kwargs) country_choices = [ (l, l) for l in Location.objects.all().values_list( 'country__name', flat=True).distinct()] country_choices.append(('all', '')) self.fields['country__name'] = forms.ChoiceField(choices=country_choices) def get_values(self): """ Get filter unpackable values. """ if not self.is_valid(): return {} # Only retrieve existing data. data = {} for item in self.cleaned_data: if self.cleaned_data[item] not in ['', None]: data[item] = self.cleaned_data[item] # Set province key if 'province' in data: data['location'] = data.pop('province') # Set country default value if data.get('country__name', '') == 'all': data.pop('country__name') return data def clean_daterange(self): """ Parses and validates daterange string. """ error = forms.ValidationError("Date range must be 'mm/dd/yyyy - mm/dd/yyyy'.") if not self.is_valid(): raise error daterange = self.cleaned_data['daterange'] dates = daterange.split(' - ') if len(dates) != 2: raise error try: daterange = { 'date__gte': datetime.strptime(dates[0], '%m/%d/%Y').date(), 'date__lte': datetime.strptime(dates[1], '%m/%d/%Y').date(), } except ValueError: raise error return daterange
python
"""Model generation""" from abc import ABC from collections import namedtuple from copy import copy import functools import itertools import numpy as np from scipy.special import expit # pylint: disable = no-name-in-module import sympy from sympy.utilities.lambdify import lambdify from synmod import constants from synmod.aggregators import Aggregator, TabularAggregator Polynomial = namedtuple("Polynomial", ["relevant_feature_map", "sym_polynomial_fn", "polynomial_fn"]) # pylint: disable = invalid-name class Model(ABC): """Model base class""" def __init__(self, aggregator, polynomial, X=None): # pylint: disable = unused-argument self._aggregator = aggregator # object to perform aggregation over time and generate feature vector # relevant_feature-map: Mapping from frozensets containing one or more feature names to their polynomial coefficients self.relevant_feature_map, self.sym_polynomial_fn, self._polynomial_fn = polynomial @property def relevant_feature_names(self): """Convenience function to get feature names""" return list(functools.reduce(set.union, self.relevant_feature_map, set())) def predict(self, X, **kwargs): """Predict outputs on input instances""" class Classifier(Model): """Classification model""" def __init__(self, aggregator, polynomial, X): super().__init__(aggregator, polynomial) assert X is not None self._threshold = np.median(self._polynomial_fn(self._aggregator.operate(X).transpose(), 0)) def predict(self, X, **kwargs): """ Predict output probabilities on instances in X by aggregating features over time, applying a polynomial, thresholding, then applying a sigmoid. Parameters ---------- X: Matrix/tensor Instances to predict model outputs for labels: bool, optional, default False Flag to return output labels instead of probabilities noise: 1D float array, optional, default 0 Noise term(s) to add to polynomial before applying sigmoid """ labels = kwargs.get("labels", False) noise = kwargs.get("noise", 0) values = expit(self._polynomial_fn(self._aggregator.operate(X).transpose(), noise) - self._threshold) # Sigmoid output if labels: values = (values > 0.5).astype(np.int32) return values class Regressor(Model): """Regression model""" def predict(self, X, **kwargs): """ Predict outputs on instances in X by aggregating features over time and applying a polynomial Parameters ---------- X: Matrix/tensor Instances to predict model outputs for noise: 1D float array, optional, default 0 Noise term(s) to add to polynomial """ noise = kwargs.get("noise", 0) # TODO: this is the noise multiplier return self._polynomial_fn(self._aggregator.operate(X).transpose(), noise) def get_model(args, features, instances): """Generate and return model""" args = copy(args) args.rng = np.random.default_rng(args.seed) # Reset RNG for consistent model independent of instances # Select relevant features relevant_features = get_relevant_features(args) polynomial = gen_polynomial(args, relevant_features) if args.synthesis_type == constants.TABULAR: aggregator = TabularAggregator() else: # Select time window for each feature windows = [feature.window for feature in features] for fid, _ in enumerate(features): relevance = "relevant" if fid in relevant_features else "irrelevant" args.logger.info(f"Window for {relevance} feature id {fid}: ({windows[fid][0]}, {windows[fid][1]})") aggregator = Aggregator([feature.aggregation_fn for feature in features], windows, instances, args.standardize_features) # Select model model_class = {constants.CLASSIFIER: Classifier, constants.REGRESSOR: Regressor}[args.model_type] return model_class(aggregator, polynomial, instances) def get_window(args): """Randomly select appropriate window for model to operate in""" # TODO: allow soft-edged windows (smooth decay of influence of feature values outside window) right = args.sequence_length - 1 # Anchor half the windows on the right if args.rng.uniform() < 0.5: right = args.rng.choice(range(args.sequence_length // 2, args.sequence_length)) left = args.rng.choice(range(0, right)) return (left, right) def gen_polynomial(args, relevant_features): """Generate polynomial which decides the ground truth and noisy model""" # Note: using sympy to build function appears to be 1.5-2x slower than erstwhile raw numpy implementation (for linear terms) sym_features = sympy.symbols([f"x_{x}" for x in range(args.num_features)]) sym_noise = sympy.Symbol("beta", real=True) # multiplier for irrelevant features in approximate model relevant_feature_map = {} # map of relevant feature sets to coefficients # Generate polynomial expression # Pairwise interaction terms sym_polynomial_fn = 0 sym_polynomial_fn = update_interaction_terms(args, relevant_features, relevant_feature_map, sym_features, sym_polynomial_fn) # Linear terms sym_polynomial_fn = update_linear_terms(args, relevant_features, relevant_feature_map, sym_features, sym_noise, sym_polynomial_fn) args.logger.info(f"Ground truth polynomial:\ny = {sym_polynomial_fn}") # Generate model expression polynomial_fn = lambdify([sym_features, sym_noise], sym_polynomial_fn, "numpy") return Polynomial(relevant_feature_map, sym_polynomial_fn, polynomial_fn) def get_relevant_features(args): """Get set of relevant feature identifiers""" num_relevant_features = max(1, round(args.num_features * args.fraction_relevant_features)) coefficients = np.zeros(args.num_features) coefficients[:num_relevant_features] = 1 args.rng.shuffle(coefficients) relevant_features = {idx for idx in range(args.num_features) if coefficients[idx]} return relevant_features def update_interaction_terms(args, relevant_features, relevant_feature_map, sym_features, sym_polynomial_fn): """Pairwise interaction terms for polynomial""" # TODO: higher-order interactions num_relevant_features = len(relevant_features) num_interactions = min(args.num_interactions, num_relevant_features * (num_relevant_features - 1) / 2) if not num_interactions: return sym_polynomial_fn potential_pairs = list(itertools.combinations(sorted(relevant_features), 2)) potential_pairs_arr = np.empty(len(potential_pairs), dtype=np.object) potential_pairs_arr[:] = potential_pairs interaction_pairs = args.rng.choice(potential_pairs_arr, size=num_interactions, replace=False) for interaction_pair in interaction_pairs: coefficient = args.rng.uniform() if args.model_type == constants.CLASSIFIER: coefficient *= args.rng.choice([-1, 1]) # Randomly flip sign relevant_feature_map[frozenset(interaction_pair)] = coefficient sym_polynomial_fn += coefficient * functools.reduce(lambda sym_x, y: sym_x * sym_features[y], interaction_pair, 1) return sym_polynomial_fn # pylint: disable = too-many-arguments def update_linear_terms(args, relevant_features, relevant_feature_map, sym_features, sym_noise, sym_polynomial_fn): """Order one terms for polynomial""" interaction_features = set() for interaction in relevant_feature_map.keys(): interaction_features.update(interaction) # Let half the interaction features have nonzero interaction coefficients but zero linear coefficients interaction_only_features = [] if interaction_features and args.include_interaction_only_features: interaction_only_features = args.rng.choice(sorted(interaction_features), len(interaction_features) // 2, replace=False) linear_features = sorted(relevant_features.difference(interaction_only_features)) coefficients = sym_noise * np.ones(args.num_features) coefficients[list(relevant_features)] = 1 coefficients *= args.rng.uniform(-1, 1, size=args.num_features) for linear_feature in linear_features: relevant_feature_map[frozenset([linear_feature])] = coefficients[linear_feature] sym_polynomial_fn += coefficients.dot(sym_features) return sym_polynomial_fn
python
# -*- coding: UTF-8 -*- # Copyright 2011-2018 Rumma & Ko Ltd # License: BSD (see file COPYING for details) """Desktop UI for this plugin. Documentation is in :doc:`/specs/users` and :doc:`/dev/users` """ from __future__ import unicode_literals from textwrap import wrap from django.conf import settings from django.db import models from lino.api import dd, rt, _ from lino.core import actions from lino.core.roles import SiteAdmin, SiteUser from lino.core.utils import djangoname from .choicelists import UserTypes from .actions import SendWelcomeMail, SignIn, SignInWithSocialAuth def mywrap(t, ls=80): t = '\n'.join([ ln.strip() for ln in t.splitlines() if ln.strip()]) return '\n'.join(wrap(t, ls)) class UserDetail(dd.DetailLayout): box1 = """ username user_type:20 partner first_name last_name initials email language time_zone id created modified """ main = """ box1 #MembershipsByUser:20 remarks:40 AuthoritiesGiven:20 SocialAuthsByUser:30 """ main_m = """ username user_type partner first_name last_name initials email language time_zone id created modified remarks AuthoritiesGiven """ class UserInsertLayout(dd.InsertLayout): window_size = (60, 'auto') main = """ username email first_name last_name partner language user_type """ class Users(dd.Table): #~ debug_actions = True model = 'users.User' #~ order_by = "last_name first_name".split() order_by = ["username"] active_fields = 'partner' parameters = dict( user_type=UserTypes.field(blank=True)) simple_parameters = ['user_type'] #~ column_names = 'username first_name last_name is_active is_staff is_expert is_superuser *' column_names = 'username user_type first_name last_name *' detail_layout = 'users.UserDetail' insert_layout = UserInsertLayout() column_names_m = 'mobile_item *' @classmethod def render_list_item(cls, obj, ar): return "<p>{}</p>".format(obj.username) #~ @classmethod #~ def get_row_permission(cls,action,user,obj): #~ """ #~ Only system managers may edit other users. #~ See also :meth:`User.disabled_fields`. #~ """ #~ if not super(Users,cls).get_row_permission(action,user,obj): #~ return False #~ if user.level >= UserLevel.manager: return True #~ if action.readonly: return True #~ if user is not None and user == obj: return True #~ return False class AllUsers(Users): required_roles = dd.login_required(SiteAdmin) send_welcome_email = SendWelcomeMail() class UsersOverview(Users): required_roles = set([]) column_names = 'username user_type language' exclude = dict(user_type='') sign_in = SignIn() # if settings.SITE.social_auth_backends is None: # sign_in = SignIn() # else: # sign_in = SignInWithSocialAuth() class MySettings(Users): # use_as_default_table = False # hide_top_toolbar = True required_roles = dd.login_required() default_list_action_name = 'detail' # detail_layout = 'users.UserDetail' @classmethod def get_default_action(cls): return actions.ShowDetail(cls.detail_layout, hide_navigator=True) class Authorities(dd.Table): required_roles = dd.login_required(SiteAdmin) model = 'users.Authority' class AuthoritiesGiven(Authorities): required_roles = dd.login_required() master_key = 'user' label = _("Authorities given") column_names = 'authorized' auto_fit_column_widths = True class AuthoritiesTaken(Authorities): required_roles = dd.login_required() master_key = 'authorized' label = _("Authorities taken") column_names = 'user' auto_fit_column_widths = True if settings.SITE.social_auth_backends: try: import social_django except ImportError: raise Exception( "Sites with social_auth_backends must also install PSA " "into their environment: " "$ pip install social-auth-app-django") class SocialAuths(dd.Table): label = _("Third-party authorizations") required_roles = dd.login_required(SiteAdmin) model = 'social_django.UserSocialAuth' class SocialAuthsByUser(SocialAuths): required_roles = dd.login_required(SiteUser) master_key = 'user' else: class SocialAuthsByUser(dd.Dummy): pass class UserRoles(dd.VirtualTable): label = _("User roles") required_roles = dd.login_required(SiteAdmin) @classmethod def get_data_rows(self, ar): return settings.SITE.user_roles @dd.displayfield(_("Name")) def name(self, obj, ar): return djangoname(obj) @dd.displayfield(_("Description")) def description(self, obj, ar): return mywrap(obj.__doc__ or '', 40) @classmethod def setup_columns(cls): def w(ut): def func(fld, obj, ar): if isinstance(ut.role, obj): return "☑" return "" return func names = [] for ut in UserTypes.get_list_items(): name = "ut" + ut.value # vf = dd.VirtualField( # models.BooleanField(str(ut.value)), w(ut)) vf = dd.VirtualField( dd.DisplayField(str(ut.value)), w(ut)) cls.add_virtual_field(name, vf) names.append(name+":3") # cls.column_names = "name:20 description:40 " + ' '.join(names) cls.column_names = "name:20 " + ' '.join(names)
python
""" Checks the bam header: * to make sure all rgs have the same sample * enforce PL to be ILLUMINA Writes out a new header with the aliquot submitter id as the SM and/or PL as ILLUMINA as needed. @author: Kyle Hernandez """ import os import time import sys import pysam import argparse import logging PLATFORM = "ILLUMINA" def main(args: argparse.Namespace) -> None: """ Main wrapper for processing bam file headers. """ logger.info("Extracting bam header...") bam = pysam.AlignmentFile(args.input_bam, mode="rb") try: pass_sm = check_samples(bam) pass_pl = check_platforms(bam) conditionally_generate_new_header( bam, pass_sm, pass_pl, args.aliquot_id, args.output_header ) finally: bam.close() def check_samples(bam: pysam.AlignmentFile) -> bool: """ Checks the bam readgroups for missing SM fields and mismatched SMs. """ samples = [] for item in bam.header["RG"]: if not item.get("SM", "").strip(): logger.warn("Unable to find sample in rg {}".format(item)) return False else: samples.append(item["SM"]) if len(set(samples)) != 1: logger.warn("Found multiple sample IDs! {}".format(set(samples))) return False return True def check_platforms(bam: pysam.AlignmentFile) -> bool: """ Checks whether the bam rgs all have PL set to PLATFORM """ for item in bam.header["RG"]: if not item.get("PL", "").strip(): logger.warn("Unable to find platform in rg {}".format(item)) return False elif item["PL"] != PLATFORM: logger.warn( "Found readgroup with platform != '{}' - {}".format(PLATFORM, item) ) return False return True def conditionally_generate_new_header( bam: pysam.AlignmentFile, pass_sm: bool, pass_pl: bool, aliquot_id: str, out_file: str, ) -> None: """ If pass_sm or pass_pl are False, generates the new bam header, otherwise does nothing. """ if pass_sm and pass_pl: logger.info("No issues detected. No header written.") else: logger.info("Detected RG problems, will create new header.") fix_header = {} for key, vals in bam.header.items(): if key not in fix_header: fix_header[key] = [] if key == "RG": for item in vals: if not pass_sm: item["SM"] = aliquot_id if not pass_pl: item["PL"] = PLATFORM fix_header[key].append(item) else: fix_header[key] = vals obam = pysam.AlignmentFile(out_file, mode="w", header=fix_header) obam.close() def setup_logger(): """ Sets up the logger. """ logger = logging.getLogger("check_bam_header") LoggerFormat = "[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s" logger.setLevel(level=logging.INFO) handler = logging.StreamHandler(sys.stderr) formatter = logging.Formatter(LoggerFormat, datefmt="%Y%m%d %H:%M:%S") handler.setFormatter(formatter) logger.addHandler(handler) return logger if __name__ == "__main__": """ CLI Entrypoint. """ start = time.time() logger = setup_logger() logger.info("-" * 80) logger.info("check_bam_header_samples.py") logger.info("Program Args: {0}".format(" ".join(sys.argv))) logger.info("-" * 80) p = argparse.ArgumentParser( "Utility for checking samples in bam header and fixing if needed" ) p.add_argument("--input_bam", required=True, help="Input bam file.") p.add_argument( "--aliquot_id", required=True, help="Aliquot id to use for sample name if new header is needed.", ) p.add_argument( "--output_header", required=True, help="Output header file name if a new header is needed.", ) args = p.parse_args() # Process logger.info("Processing bam file {0}...".format(args.input_bam)) main(args) # Done logger.info("Finished, took {0} seconds.".format(time.time() - start))
python
from AndroidFTPBackup.utils import FileHelper, ConfigHelper configHelper: ConfigHelper = None fileHelper: FileHelper = None
python
# python3 import sys, threading from collections import deque def compute_height_brute_force(n, parents): # Replace this code with a faster implementation max_height = 0 for vertex in range(n): height = 0 current = vertex while current != -1: height += 1 current = parents[current] max_height = max(max_height, height) return max_height class Tree: ''' a sample class to refresh your memories about a tree data structure ''' def __init__(self, value, children=[]): self._value = value self._children = children # a list of subtrees (recursive) def __str__(self): ans = "[" ans += str(self._value) for child in self._children: ans += ", " ans += str(child) return ans + "]" @property def value(self): return self._value def children(self): for child in self._children: yield child def height(self): height = 1 for child in self._children: height = max(height, child.height() + 1) return height def compute_height_recursive(n, parents): ''' this function only works for trees of medium size(number of nodes) such as 2,000, when applied on trees with more than 100,000 nodes, it definitely fails. To handle large inputs, recursion is always a very bad idea, even the memoization cannot save you. Whenever you expect the input data to be very huge, please find an alternative algorithm. ''' X = {} # height for each subtree, for memoization def build_tree_height(node): if node not in X: if node not in parents: # a leaf X[node] = 1 return X[node] children = [] for node_id, node_parent in enumerate(parents): if node_parent == node: if node_id not in X: X[node_id] = build_tree_height(node_id) children.append(X[node_id]) X[node] = max(children) + 1 return X[node] for node in range(n): if parents[node] == -1: root = node X[node] = build_tree_height(node) return X[root] def compute_height_BFS(n, parents): ''' In fact, trees are just a special form of undirected/directed graphs, depends on how you model it. all the graph algorithms you've learned can always be slightly modified and then applied on trees. for instance, to compute the height/depth of a tree, it's pretty much similar to computing the total number of layers for the breadth-first search algorithm to fully traverse a graph. Here we'll replace the tree recursion with a BFS traversal, since BFS has linear running time. To apply the BFS, we need to build a tree graph and avoid any recursion, so don't use Class Tree(). ''' G = {} # represent the tree graph by adjacency lists {parent:[children], ...} for child, parent in enumerate(parents): if child not in G: G[child] = [] if parent == -1: root = child if parent not in G: G[parent] = [child] else: G[parent].append(child) Q = deque([root]) layer = {root:1} while Q: node = Q.popleft() for child in G[node]: layer[child] = layer[node] + 1 Q.append(child) # print("G:", G) # for debugging # print("layer:", layer) # for debugging return max(layer.values()) def main(): n = int(input()) parents = list(map(int, input().split())) print(compute_height_BFS(n, parents)) # In Python, the default limit on recursion depth is rather low, # so raise it here for this problem. Note that to take advantage # of bigger stack, we have to launch the computation in a new thread. sys.setrecursionlimit(10**7) # max depth of recursion threading.stack_size(2**27) # new thread will get stack of such size threading.Thread(target=main).start()
python
import time import pickle # To work with cookies import json from selenium.webdriver.support.wait import WebDriverWait class Login(): def __init__(self, driver, profile, password): self.profile = profile self.driver = driver self.password = password def run(self): self.driver.get('https://www.instagram.com/') # open and logging without cookies self.driver.implicitly_wait(20) print("Logging into instagram") self.driver.find_element_by_name('username').send_keys(self.profile) # passing username to logging self.driver.find_element_by_name('password').send_keys(self.password) # passing password to logging self.driver.find_element_by_xpath('//*[@id="loginForm"]/div/div[3]/button').click() print("Logging successfully completed") time.sleep(5)
python
a = (2 ** 2) b = (2 ** 2) c = 2 print("a ** b ** c =", a ** b ** c)
python
# Code adapted from Fei Xia import glob import os import cv2 import meshcut import numpy as np from tqdm import tqdm from PIL import Image def load_obj_np(filename_obj, normalization=False, texture_size=4, load_texture=False, texture_wrapping='REPEAT', use_bilinear=True): """Load Wavefront .obj file into numpy array This function only supports vertices (v x x x) and faces (f x x x). """ # load vertices vertices = [] with open(filename_obj) as f: lines = f.readlines() for line in lines: if len(line.split()) == 0: continue if line.split()[0] == 'v': vertices.append([float(v) for v in line.split()[1:4]]) vertices = np.vstack(vertices).astype(np.float32) # load faces faces = [] for line in lines: if len(line.split()) == 0: continue if line.split()[0] == 'f': vs = line.split()[1:] nv = len(vs) v0 = int(vs[0].split('/')[0]) for i in range(nv - 2): v1 = int(vs[i + 1].split('/')[0]) v2 = int(vs[i + 2].split('/')[0]) faces.append((v0, v1, v2)) faces = np.vstack(faces).astype(np.int32) - 1 # load textures textures = None assert load_texture is False # Since I commented out the block below # if load_texture: # for line in lines: # if line.startswith('mtllib'): # filename_mtl = os.path.join(os.path.dirname(filename_obj), line.split()[1]) # textures = load_textures(filename_obj, filename_mtl, texture_size, # texture_wrapping=texture_wrapping, # use_bilinear=use_bilinear) # if textures is None: # raise Exception('Failed to load textures.') # textures = textures.cpu().numpy() assert normalization is False # Since I commented out the block below # # normalize into a unit cube centered zero # if normalization: # vertices -= vertices.min(0)[0][None, :] # vertices /= torch.abs(vertices).max() # vertices *= 2 # vertices -= vertices.max(0)[0][None, :] / 2 if load_texture: return vertices, faces, textures else: return vertices, faces def get_hist_num_faces(obj_filepath): vertices, faces = load_obj_np(obj_filepath) z_faces = [] weights = [] z = np.array([0, 0, 1]) for face in tqdm(faces): normal = np.cross(vertices[face[2]] - vertices[face[1]], vertices[face[1]] - vertices[face[0]]) dist = np.dot(normal, z) / np.linalg.norm(normal) if dist < -0.99: z_faces.append(vertices[face[0]][-1]) a = np.linalg.norm(vertices[face[2]] - vertices[face[1]]) b = np.linalg.norm(vertices[face[2]] - vertices[face[0]]) c = np.linalg.norm(vertices[face[0]] - vertices[face[1]]) s = (a + b + c) / 2 area = (s*(s-a)*(s-b)*(s-c)) ** 0.5 weights.append(area) hist = np.histogram(np.array(z_faces), bins=100, weights=np.array(weights)) return hist def get_floor_height(hist, n_floors=1): heights = [] for i in range(n_floors): pos = np.where(hist[0] == np.max(hist[0]))[0][0] height = (hist[1][pos] + hist[1][pos + 1]) / 2.0 hist[0][np.abs(hist[1][1:] - height) < 0.5] = 0 heights.append(height) return heights def gen_map(obj_filepath, mesh_dir, img_filename_format='floor_{}.png'): vertices, faces = load_obj_np(obj_filepath) xmin, ymin, _ = vertices.min(axis=0) xmax, ymax, _ = vertices.max(axis=0) max_length = np.max([np.abs(xmin), np.abs(ymin), np.abs(xmax), np.abs(ymax)]) max_length = np.ceil(max_length).astype(np.int) with open(os.path.join(mesh_dir, 'floors.txt')) as f: floors = map(float, f.readlines()) floors = sorted(floors) print(floors) for i_floor, floor in enumerate(floors): z = float(floor) + 0.5 cross_section = meshcut.cross_section(vertices, faces, plane_orig=(0, 0, z), plane_normal=(0, 0, 1)) floor_map = np.ones((2 * max_length * 100, 2 * max_length * 100)) for item in cross_section: for i in range(len(item) - 1): x1, x2 = (item[i:i+2, 0]+max_length) * 100 y1, y2 = (item[i:i+2, 1]+max_length) * 100 cv2.line(floor_map, (x1, y1), (x2, y2), color=(0, 0, 0), thickness=2) cur_img = Image.fromarray((floor_map * 255).astype(np.uint8)) #cur_img = Image.fromarray(np.flipud(cur_img)) img_filename = img_filename_format.format(i_floor) cur_img.save(os.path.join(mesh_dir, img_filename)) write_yaml(mesh_dir, np.array(cur_img), img_filename, 'floor_{}.yaml'.format(i_floor), resolution=0.01) def get_obj_filepath(mesh_dir): return mesh_dir + '/mesh_z_up.obj' def get_n_floors(mesh_dir): return 1 #def get_n_floors(mesh_dir): # house_seg_filepaths = glob.glob(os.path.join(mesh_dir, 'house_segmentations', '*.house')) # assert len(house_seg_filepaths) == 1 # with open(house_seg_filepaths[0]) as f: # content = f.readlines() # content = [x.strip() for x in content]# # n_levels = 0 # for line in content: # if line.startswith('L '): # n_levels += 1 # return n_levels def fill_template(map_filepath, resolution, origin): # NOTE: Copied from generate_map_yaml.py """Return a string that contains the contents for the yaml file, filling out the blanks where appropriate. Args: map_filepath: Absolute path to map file (e.g. PNG). resolution: Resolution of each pixel in the map in meters. origin: Uhhh. """ template = """image: MAP_FILEPATH resolution: RESOLUTION origin: [ORIGIN_X, ORIGIN_Y, YAW] negate: 0 occupied_thresh: 0.65 free_thresh: 0.196 """ template = template.replace('MAP_FILEPATH', map_filepath) template = template.replace('RESOLUTION', str(resolution)) template = template.replace('ORIGIN_X', str(origin[0])) template = template.replace('ORIGIN_Y', str(origin[1])) template = template.replace('YAW', str(origin[2])) return template def write_yaml(mesh_dir, map_img, map_img_filepath, yaml_filename, resolution=0.01): # NOTE: Copied from generate_map_yaml.py origin_px_coord = (map_img.shape[0] / 2, map_img.shape[1] / 2) # (row, col) cur_origin_map_coord = (-float(origin_px_coord[1]) * resolution, float(origin_px_coord[0] - map_img.shape[0]) * resolution, 0.0) # (x, y, yaw) yaml_content = fill_template(map_img_filepath, resolution=resolution, origin=cur_origin_map_coord) cur_yaml_filepath = os.path.join(mesh_dir, yaml_filename) print('Writing to:', cur_yaml_filepath) with open(cur_yaml_filepath, 'w') as f: f.write(yaml_content) def generate_floorplan(mesh_dir): obj_filepath = get_obj_filepath(mesh_dir) # Generate floors.txt files print(mesh_dir) n_floors = get_n_floors(mesh_dir) # Get number of floors hist = get_hist_num_faces(obj_filepath) hist = list(hist) hist[0] = np.nan_to_num(hist[0]) hist = tuple(hist) heights = get_floor_height(hist, n_floors=n_floors) with open(os.path.join(mesh_dir, 'floors.txt'), 'w') as f: for height in heights: f.write("{}\n".format(height)) gen_map(obj_filepath, mesh_dir) # Generate floor maps import sys if __name__ == '__main__': generate_floorplan(sys.argv[1])
python
import os.path import ranger.api import ranger.core.fm import ranger.ext.signals from subprocess import Popen, PIPE hook_init_prev = ranger.api.hook_init def hook_init(fm): def zoxide_add(signal): path = signal.new.path process = Popen(["zoxide", "add", path]) process.wait() fm.signal_bind("cd", zoxide_add) return hook_init_prev(fm) ranger.api.hook_init = hook_init class z(ranger.api.commands.Command): """ :z Jump around with zoxide (z) """ def execute(self): results = self.query(self.args[1:]) if os.path.isdir(results[0]): self.fm.cd(results[0]) def query(self, args): try: p = Popen( ["zoxide", "query"] + self.args[1:], stdout=PIPE, stderr=PIPE ) stdout, stderr = p.communicate() if p.returncode == 0: output = stdout.decode("utf-8").strip() if output: return output.splitlines() else: self.fm.notify("zoxide exited with status {}".format(p.returncode), bad=True) else: output = stderr.decode("utf-8").strip() or "zoxide: unexpected error" self.fm.notify(output, bad=True) except Exception as e: self.fm.notify(e, bad=True) def tab(self, tabnum): results = self.query(self.args[1:]) return ["z {}".format(x) for x in results]
python
from __future__ import division # These functions have their own module in order to be compiled with the right # __future__ flag (and be tested alongside the 2.x legacy division operator). def truediv_usecase(x, y): return x / y def itruediv_usecase(x, y): x /= y return x
python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python import pywrap_tensorflow from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.layers.pooling import max_pooling3d from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import custom_gradient from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import gradients from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad # pylint: disable=unused-import from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.training import training class BackpropTest(test.TestCase): @test_util.run_in_graph_and_eager_modes def testAggregateGradients(self): def fn(x): ind1 = constant_op.constant(np.array([0, 1])) ind2 = constant_op.constant(np.array([2, 3])) ind3 = constant_op.constant(np.array([1, 3])) # A mixture of IndexedSlices and dense tensor to aggregate. g1 = embedding_ops.embedding_lookup(x, ind1) g2 = embedding_ops.embedding_lookup(x, ind2) g3 = embedding_ops.embedding_lookup(x, ind3) g4 = math_ops.reduce_sum(x * constant_op.constant(2.0)) return g1 * g2 * g3 * g4 var_np = np.random.rand(4, 2).astype(np.float32) var = constant_op.constant(var_np) grad = backprop.gradients_function(fn, [0])(var)[0] grad = self.evaluate(ops.convert_to_tensor(grad)) if not context.executing_eagerly(): tf_var = array_ops.constant(var_np, dtypes.float32) tf_ind1 = array_ops.constant([0, 1]) tf_ind2 = array_ops.constant([2, 3]) tf_ind3 = array_ops.constant([1, 3]) tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1) tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2) tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3) tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1)) tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4 tf_grad = gradients.gradients(tf_y, [tf_var])[0] tf_dense_grad = math_ops.unsorted_segment_sum( tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0]) self.assertAllClose(grad, self.evaluate(tf_dense_grad)) def testImplicitGradWithResourceVariable(self): x = resource_variable_ops.ResourceVariable( initial_value=constant_op.constant(1.0), name='x') def fn(): b = constant_op.constant(2.0) c = math_ops.add(x.value(), b) return math_ops.add(c, constant_op.constant(3.0)) grads_and_vars = backprop.implicit_grad(fn)() self.assertAllEqual(grads_and_vars[0][0], 1.0) self.assertAllEqual(id(grads_and_vars[0][1]), id(x)) def testGradientInsideLoop(self): with ops.Graph().as_default(): v = resource_variable_ops.ResourceVariable(1.0) def body(_): _ = v + 1.0 # This reads the variable inside the loop context with backprop.GradientTape() as t: result = v * 2 self.assertTrue(t.gradient(result, v) is not None) return 1.0 control_flow_ops.while_loop(lambda i: False, body, [1.0]) def testWhereGradient(self): # Note: where is special because only some of its arguments are of # differentiable dtypes. def f(x): return array_ops.where(x < 10, x, x * x) g = backprop.gradients_function(f) self.assertAllEqual(g(5.)[0], 1.0) self.assertAllEqual(g(50.)[0], 100.0) def testTwoTargets(self): with backprop.GradientTape() as t: x = constant_op.constant(3.0) y = constant_op.constant(2.0) t.watch([x, y]) xx = 2 * x yy = 3 * y dx, dy = t.gradient([xx, yy], [x, y]) self.assertAllEqual(dx, 2.0) self.assertAllEqual(dy, 3.0) def testOutputGradUsedInComputation(self): with backprop.GradientTape() as t: x = constant_op.constant(3.0) y = constant_op.constant(2.0) t.watch([x, y]) loss = x * y dx, = t.gradient([loss, x], [x], output_gradients=[1.0, 2.0]) self.assertAllEqual(dx, 4.0) def testDy(self): def f(x): return x grad_fn = backprop.gradients_function(f) self.assertAllEqual(2., grad_fn(1., dy=2.)[0]) def testGradientInteger(self): def f(x): return x + x int_tensor = constant_op.constant(1) self.assertEqual(backprop.gradients_function(f)(int_tensor)[0], None) def testErrors(self): @custom_gradient.custom_gradient def f(x): def grad(_): raise RuntimeError('x') return x, grad # TODO(apassos) raise the right error here with self.assertRaises(RuntimeError): backprop.gradients_function(f)(constant_op.constant(1.0)) def testGradientsFunctionInCustomGradient(self): @custom_gradient.custom_gradient def f(x): (y,) = backprop.gradients_function(lambda x: x * x)(x) def grad(dy): return [2 * dy] return y, grad self.assertAllEqual(f(1.0), 2.0) def testImplicitGradOverEmbeddingLookup(self): batch_size = 8 embedding_size = 512 vocab_size = 1000 lrn_rate = 0.1 random_init = random_ops.random_uniform([vocab_size, embedding_size]) x = array_ops.ones((batch_size), dtypes.int64) embedding = resource_variable_ops.ResourceVariable( initial_value=random_init, dtype=dtypes.float32, name='embedding') def f(): embedded_x = embedding_ops.embedding_lookup(embedding, x) return constant_op.constant(1.0, dtypes.float32) - embedded_x grad = backprop.implicit_grad(f)()[0][0] opt = training.GradientDescentOptimizer(lrn_rate) with ops.Graph().as_default(), self.cached_session(): tf_x = array_ops.ones((batch_size), dtypes.int64) # TODO(ashankar,apassos): Change to ResourceVariable. tf_embedding = variables.Variable( random_init.numpy(), name='tf_embedding') tf_embedded_x = embedding_ops.embedding_lookup(tf_embedding, tf_x) tf_y = 1.0 - tf_embedded_x tf_grad = gradients.gradients(tf_y, [tf_embedding])[0] tf_opt = training.GradientDescentOptimizer(0.1) tf_embedding.initializer.run() self.assertAllClose(tf_grad.indices.eval(), grad.indices) self.assertAllClose(tf_grad.values.eval(), grad.values) tf_opt.apply_gradients([(tf_grad, tf_embedding)]).run() expected = self.evaluate(tf_embedding) opt.apply_gradients([(grad, embedding)]) self.assertAllClose(expected, embedding.read_value()) def testImplicitGradOrdering(self): v0 = resource_variable_ops.ResourceVariable(1.0) v1 = resource_variable_ops.ResourceVariable(2.0) def f(): x = v1 * v1 y = v0 * v0 return x + y grads = backprop.implicit_grad(f)() ordered_variables = [x[1] for x in grads] self.assertTrue(ordered_variables[0] is v0) self.assertTrue(ordered_variables[1] is v1) def testTapeNoOpGradient(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = x self.assertEqual(t.gradient(y, x).numpy(), 1.0) def testTapeIdentityGradientIsIdentity(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = array_ops.identity(x) self.assertEqual(t.gradient(y, x).numpy(), 1.0) def testTapeGradientMultiTargetOneIsSource(self): x = constant_op.constant(2.0) with backprop.GradientTape() as t: t.watch(x) y = x*x self.assertEqual(t.gradient([x, y], x).numpy(), 5.0) def testTapeNoOpGradientWithMultiTargetAllSource(self): x = constant_op.constant(3.0) with backprop.GradientTape() as t: t.watch(x) y = x self.assertEqual(t.gradient([y, y], x).numpy(), 2.0) def testTapeNoOpGradientWithMultiTargetMultiSource(self): x = constant_op.constant(3.0) y = constant_op.constant(5.0) with backprop.GradientTape() as t: t.watch(x) t.watch(y) z = y * y self.assertAllEqual(t.gradient([x, y, z], [x, y]), [1.0, 11.0]) def testTapeNoOpOnVariableIsIdentity(self): v0 = resource_variable_ops.ResourceVariable(1.0) with backprop.GradientTape() as t: y = v0.read_value() self.assertEqual(t.gradient(y, v0).numpy(), 1.0) @test_util.assert_no_new_tensors @test_util.assert_no_garbage_created def testTapeNoOpGradient2By2(self): a_2_by_2 = constant_op.constant(2.0, shape=[2, 2]) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) dy_dy = tape.gradient(a_2_by_2, [a_2_by_2])[0] self.assertAllEqual(dy_dy.numpy(), constant_op.constant(1.0, shape=[2, 2]).numpy()) @test_util.assert_no_new_pyobjects_executing_eagerly def testTapeNoOpGradientMultiTarget2By2(self): a_2_by_2 = constant_op.constant(2.0, shape=[2, 2]) with backprop.GradientTape(persistent=True) as tape: tape.watch(a_2_by_2) dy_dy = tape.gradient([a_2_by_2, a_2_by_2], [a_2_by_2])[0] self.assertAllEqual(dy_dy.numpy(), constant_op.constant(2.0, shape=[2, 2]).numpy()) def testTapeStopRecording(self): with backprop.GradientTape() as t: x = resource_variable_ops.ResourceVariable(1.0) with t.stop_recording(): y = x * x self.assertEqual(t.gradient(y, x), None) def testTapeStopStartRecording(self): with backprop.GradientTape(persistent=True) as t: x = resource_variable_ops.ResourceVariable(1.0) x2 = x * 2 # This should be differentiated through. with t.stop_recording(): y = x2 * x2 z = x2 * x2 self.assertEqual(t.gradient(y, x2), None) # If the x*2 was not differentiated through, this would be 2.0, not 4.0 self.assertEqual(t.gradient(z, x2).numpy(), 4.0) def testTapeReset(self): with backprop.GradientTape() as t: v = resource_variable_ops.ResourceVariable(1.0) loss = v * v t.reset() loss += v * v self.assertAllEqual(t.gradient(loss, v), 2.0) def testAutomaticWatchedVariables(self): with backprop.GradientTape() as t: self.assertEqual(0, len(t.watched_variables())) v = resource_variable_ops.ResourceVariable(1.0) loss = v * v self.assertAllEqual([v], t.watched_variables()) t.reset() self.assertEqual(0, len(t.watched_variables())) loss += v * v self.assertAllEqual([v], t.watched_variables()) def testExplicitWatchedVariables(self): with backprop.GradientTape() as t: self.assertEqual(0, len(t.watched_variables())) v = resource_variable_ops.ResourceVariable(1.0) t.watch(v) self.assertAllEqual([v], t.watched_variables()) t.reset() self.assertEqual(0, len(t.watched_variables())) t.watch(v) self.assertAllEqual([v], t.watched_variables()) @test_util.assert_no_new_tensors def testGradientNone(self): def loss(x, l): return math_ops.reduce_mean( nn_ops.softmax_cross_entropy_with_logits(logits=x, labels=l), constant_op.constant([0])) logits = constant_op.constant([[0.0, 0.0]]) labels = constant_op.constant([[1.0, 0.0]]) # softmax_cross_entropy_with_logits returns two outputs and in this case the # gradient wrt the second is None. g, = backprop.gradients_function(loss, [0])(logits, labels) self.assertAllEqual(g.numpy(), [[-0.5, 0.5]]) @test_util.run_in_graph_and_eager_modes def testGradientWithinTapeBlock(self): v1 = resource_variable_ops.ResourceVariable(1.) self.evaluate(v1.initializer) with backprop.GradientTape() as t: loss = 2 * v1 grad = t.gradient(loss, v1) self.assertAllEqual(self.evaluate(grad), 2.0) with backprop.GradientTape(persistent=True) as t: loss = 2 * v1 grad = t.gradient(loss, v1) self.assertAllEqual(self.evaluate(grad), 2.0) @test_util.run_in_graph_and_eager_modes def testNestedSelfContexts(self): v1 = resource_variable_ops.ResourceVariable(1.) self.evaluate(v1.initializer) with backprop.GradientTape() as t: with self.assertRaises(ValueError): with t: pass @test_util.assert_no_new_tensors def testSecondGrad(self): def first(x): l = constant_op.constant([[0.0]]) x = nn_ops.softmax_cross_entropy_with_logits(labels=l, logits=x) x = math_ops.reduce_sum(x, constant_op.constant([0])) return x def second(x): grad = backprop.gradients_function(first, [0])(x)[0] return math_ops.reduce_sum(grad, constant_op.constant([0])) f = constant_op.constant([[0.1]]) grad = backprop.gradients_function(second, [0])(f)[0] self.assertAllEqual([[0.0]], grad) @test_util.run_in_graph_and_eager_modes def testWatchingIsTapeLocal(self): x1 = resource_variable_ops.ResourceVariable(2.0, trainable=False) x2 = resource_variable_ops.ResourceVariable(2.0, trainable=False) with backprop.GradientTape() as tape1: with backprop.GradientTape() as tape2: tape1.watch(x1) tape2.watch([x1, x2]) y = x1 ** 3 z = x2 ** 2 dy, dz = tape2.gradient([y, z], [x1, x2]) d2y, d2z = tape1.gradient([dy, dz], [x1, x2]) self.evaluate([x1.initializer, x2.initializer]) self.assertEqual(self.evaluate(d2y), 12.0) self.assertIsNone(d2z) @test_util.assert_no_new_tensors def testMakeVJP(self): def f(x): return x * x wrapped_fn = backprop.make_vjp(f, persistent=False) result, vjp = wrapped_fn(constant_op.constant(3.0)) self.assertAllEqual(result, 9.0) self.assertAllEqual(vjp(2.0)[0], 12.0) def testPersistentMakeVJP(self): def f(x): return x * x wrapped_fn = backprop.make_vjp(f, persistent=True) _, vjp = wrapped_fn(constant_op.constant(3.0)) vjp_result1 = vjp(2.0)[0] vjp_result2 = vjp(2.0)[0] self.assertAllEqual(vjp_result1, vjp_result2, 12.0) @test_util.assert_no_new_tensors def testGradGrad(self): def sq(x): return x * x def grad(x): value = backprop.gradients_function(sq, [0])(x)[0] return value gradgrad = backprop.gradients_function(grad, [0]) self.assertAllEqual(gradgrad(constant_op.constant(3.0))[0], 2.0) @test_util.assert_no_new_tensors def testGradGradExp(self): def grad(x): value = backprop.gradients_function(math_ops.exp, [0])(x)[0] return value gradgrad = backprop.gradients_function(grad, [0]) self.assertAllEqual(gradgrad(constant_op.constant(0.0))[0], 1.0) @test_util.assert_no_new_tensors def testStopGradient(self): grad = backprop.gradients_function( lambda x: array_ops.stop_gradient(math_ops.argmax(x))) self.assertAllEqual(grad([0.0])[0], None) @test_util.assert_no_new_tensors def testArgmax(self): def argmax(x): i = math_ops.argmax(x) return array_ops.stop_gradient(i) grad = backprop.gradients_function(argmax) self.assertAllEqual(grad([0.0])[0], None) @test_util.assert_no_new_tensors def testGPU(self): if not context.context().num_gpus(): self.skipTest('No GPUs found') def fn(x): with context.device('/gpu:0'): b = constant_op.constant(2.0) c = math_ops.add(x.gpu(), b) # TODO(apassos): remove cpu below by making TensorVSPace aware # of devices. return math_ops.add(c, constant_op.constant(3.0)).cpu() grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0] self.assertAllEqual(grad, 1.0) @test_util.assert_no_new_tensors def testGPUImplicitGrad(self): if not context.context().num_gpus(): self.skipTest('No GPU found') with context.device('gpu:0'): v = resource_variable_ops.ResourceVariable( constant_op.constant(1.0), name='v') def f(): with context.device('gpu:0'): return v.read_value() self.assertEqual( backprop.implicit_grad(f)()[0][0].cpu().numpy(), 1.0) @test_util.assert_no_new_tensors def testCPU(self): def fn(x): b = constant_op.constant(2.0) c = math_ops.add(x, b) return math_ops.add(c, constant_op.constant(3.0)) grad = backprop.gradients_function(fn, [0])(constant_op.constant(1.0))[0] self.assertAllEqual(grad, 1.0) @test_util.assert_no_new_tensors def testTensorCopyGPU2CPU2GPU(self): if not context.context().num_gpus(): self.skipTest('No GPUs found') def f(a, b): return a.cpu() + b.cpu() with context.device('/gpu:0'): a = constant_op.constant(1.0) b = constant_op.constant(2.0) grad = backprop.gradients_function(f, [0])(a, b)[0] self.assertAllEqual(grad, 1.0) @test_util.assert_no_new_tensors def testEmptyParams(self): def fn(a, b): return a * b x = constant_op.constant(1.0) y = constant_op.constant(2.0) dx, dy = backprop.gradients_function(fn)(x, y) self.assertAllEqual(dx, y.numpy()) self.assertAllEqual(dy, x.numpy()) @test_util.assert_no_new_tensors def testUnconnectedNone(self): v = resource_variable_ops.ResourceVariable( 1.0, name='testUnconnectedNone') def f(): v.read_value() return constant_op.constant(1.0) self.assertEqual(backprop.implicit_grad(f)()[0][0], None) @test_util.assert_no_new_tensors def testGradientTapeReEnterContext(self): g = backprop.GradientTape() with g: x = constant_op.constant(3.0) g.watch(x) y = 2*x with g: z = 2*y grad = g.gradient(target=z, sources=[x]) self.assertEqual(self.evaluate(grad), [4.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeRepeatedSource(self): with backprop.GradientTape(persistent=False) as g: x = constant_op.constant(3.0) g.watch(x) y = 2 * x grad = g.gradient(target=y, sources=[x, x]) self.assertEqual(self.evaluate(grad), [2.0, 2.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testPersistentGradientTapeRepeatedSource(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) y = constant_op.constant(5.0) g.watch(x) g.watch(y) z = x * x + x * y grad = g.gradient(target=z, sources=[x, x]) self.assertEqual(self.evaluate(grad), [11.0, 11.0]) grad = g.gradient(target=z, sources=[y, x]) self.assertEqual(self.evaluate(grad), [3.0, 11.0]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeStructure(self): with backprop.GradientTape(persistent=True) as g: # Using different constant values because constant tensors are # cached, leading to a different gradient then what one might expect. x1 = constant_op.constant(3.0) x2 = constant_op.constant(3.1) x3 = constant_op.constant(3.2) g.watch(x1) g.watch(x2) g.watch(x3) y = x1 + 2 * x2 + 3 * x3 self.assertEqual(self.evaluate(g.gradient(y, x1)), [1.0]) self.assertEqual(self.evaluate(g.gradient(y, (x1,))), (1.0,)) self.assertEqual(self.evaluate(g.gradient(y, (x1, x2))), (1.0, 2.0)) self.assertEqual(self.evaluate(g.gradient(y, [(x1, x2), (x2, x3)])), [(1.0, 2.0), (2.0, 3.0)]) self.assertEqual(self.evaluate(g.gradient(y, (x1, x2, [x1, x3]))), (1.0, 2.0, [1.0, 3.0])) self.assertEqual(self.evaluate(g.gradient(y, [x1, {'x2': x2, 'x3': x3}])), [1.0, {'x2': 2.0, 'x3': 3.0}]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTape(self): with backprop.GradientTape() as g: x = constant_op.constant(3.0) g.watch(x) y = x * x with backprop.GradientTape() as gg: gg.watch(y) z = 2 * y inner_grad = gg.gradient(z, [y])[0] self.assertEqual(self.evaluate(inner_grad), 2.0) y += inner_grad grad = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(grad), 6.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGadientTapeCalledOnConstantTarget(self): with backprop.GradientTape() as g: x = variables.Variable([3.0]) y = variables.Variable([2.0]) with self.assertRaisesRegexp( ValueError, 'GradientTape.gradient is not supported for variable targets.'): g.gradient(x, y) @test_util.run_in_graph_and_eager_modes def testGradientTapeWithCond(self): x = constant_op.constant(3.0) def true_fn(): return x def false_fn(): return x * x with backprop.GradientTape() as g: g.watch(x) y = control_flow_ops.cond(x < x, true_fn, false_fn) if not context.executing_eagerly(): with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'): dy = g.gradient(y, [x])[0] else: dy = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy), 6.0) @test_util.run_in_graph_and_eager_modes def testGradientTapeWithWhileLoop(self): i = constant_op.constant(1) x = constant_op.constant(2.) def cond(i, _): return i < 3 def body(i, x): return i + 1, x * 2 with backprop.GradientTape() as g: g.watch([x]) _, y = control_flow_ops.while_loop(cond, body, [i, x]) if not context.executing_eagerly(): with self.assertRaisesRegexp(NotImplementedError, 'tf.gradients'): dy = g.gradient(y, [x])[0] else: dy = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy), 4.0) @test_util.assert_no_new_tensors def testGradientTapeGradientCalledMultipleTimes(self): with backprop.GradientTape() as g: x = constant_op.constant(3.0) g.watch(x) y = x * x z = y * y g.gradient(z, [x]) with self.assertRaisesRegexp( RuntimeError, 'GradientTape.gradient can only be called once'): g.gradient(y, [x]) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testPersistentTape(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x * x z = y * y dz_dx = g.gradient(z, [x])[0] self.assertEqual(self.evaluate(dz_dx), 4 * 3 * 3 * 3) dy_dx = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(dy_dx), 2 * 3) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testHigherOrderGradient(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x ** 3 # y := x^3 dy_dx = g.gradient(y, x) # dy/dx := 3x^2 d2y_dx2 = g.gradient(dy_dx, x) # d2y/dx2 := 6x d3y_dx3 = g.gradient(d2y_dx2, x) # d3y/dx3 := 6 x = 3 self.assertEqual(self.evaluate(y), x ** 3) self.assertEqual(self.evaluate(dy_dx), 3 * x ** 2) self.assertEqual(self.evaluate(d2y_dx2), 6 * x) self.assertEqual(self.evaluate(d3y_dx3), 6) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testPersistentNestedTape(self): with backprop.GradientTape(persistent=True) as g: x = constant_op.constant(3.0) g.watch(x) y = x * x with backprop.GradientTape(persistent=True) as gg: gg.watch(y) z = 2 * y for _ in range(2): inner_grad = gg.gradient(z, [y])[0] self.assertEqual(self.evaluate(inner_grad), 2.0) y += inner_grad del gg grad = g.gradient(y, [x])[0] self.assertEqual(self.evaluate(grad), 6.0) grad = g.gradient(z, [x])[0] self.assertEqual(self.evaluate(grad), 12.0) del g @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testGradientTapeVariable(self): v = resource_variable_ops.ResourceVariable(1.0, name='v') self.evaluate(v.initializer) with backprop.GradientTape() as g: y = v * v grad = g.gradient(y, [v])[0] self.assertAllEqual(self.evaluate(grad), 2.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testNestedGradients(self): x = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch(x) y = x * x z = y * y dz_dx, dz_dy = g.gradient(z, [x, y]) self.assertEqual(self.evaluate(dz_dx), 108.0) self.assertEqual(self.evaluate(dz_dy), 18.0) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsDefault(self): x = constant_op.constant(1.0) y = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 dz_dx = g.gradient(z, x) self.assertEqual(dz_dx, None) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsZeros(self): x = constant_op.constant(1.0, shape=[2, 2]) y = constant_op.constant(3.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 dz_dx = g.gradient(z, x, unconnected_gradients='zero') self.assertAllEqual([[0.0, 0.0], [0.0, 0.0]], self.evaluate(dz_dx)) @test_util.assert_no_new_tensors @test_util.run_in_graph_and_eager_modes def testUnknownUnconnectedGradientsValueGiven(self): x = constant_op.constant(1.0) y = constant_op.constant(1.0) with backprop.GradientTape() as g: g.watch([x, y]) z = y * 2 with self.assertRaisesRegexp( ValueError, "Unknown value for unconnected_gradients: 'nonsense'"): g.gradient(z, x, unconnected_gradients='nonsense') @test_util.run_in_graph_and_eager_modes def testUnconnectedGradientsNestedDefunZeros(self): @function.defun def f(x): return x * x @function.defun def h(y): z = f(y) return array_ops.stop_gradient(z) x = constant_op.constant(1.0) with backprop.GradientTape() as g: g.watch(x) y = h(x) dy_dx = g.gradient(y, x, unconnected_gradients='zero') self.assertEqual(0.0, self.evaluate(dy_dx)) @test_util.assert_no_new_tensors def testEmptyParamsForValueAndGradFunction(self): def fn(a, b): return a * b val_and_grads_fn = backprop.val_and_grad_function(fn) x = 2.0 y = 3.0 val, (dx, dy) = val_and_grads_fn(x, y) self.assertAllClose(val, x * y) self.assertAllEqual(dx, y) self.assertAllEqual(dy, x) @test_util.assert_no_new_tensors def testNonEmptyParamsForValueAndGradFunction(self): def fn(a, b): return a * b val_and_grad_fn = backprop.val_and_grad_function(fn, params=[1]) x = 2.0 y = 3.0 val, grads = val_and_grad_fn(x, y) self.assertAllClose(val, x * y) self.assertEqual(1, len(grads)) self.assertAllEqual(grads[0], x) @test_util.assert_no_new_tensors def testTensorCopyCPU2GPU2CPU(self): if not context.context().num_gpus(): self.skipTest('No GPUs found') # forward: a (cpu->gpu) -> add (gpu) -> c (gpu->cpu) -> add (cpu) -> e (cpu) # back: e (cpu) -> add (cpu) -> c (cpu->gpu) -> add (gpu) -> grad (gpu->cpu) def f(a, b): with context.device('/gpu:0'): c = math_ops.add(a.gpu(0), b.gpu(0)) return math_ops.add(c.cpu(), constant_op.constant(3.0)) with context.device('/cpu:0'): a = constant_op.constant(1.0) b = constant_op.constant(2.0) grad = backprop.gradients_function(f, [0])(a, b)[0] self.assertAllEqual(grad, 1.0) def testGetAttrType(self): typ = backprop.op_attr_type('Add', 'T') self.assertEqual(typ, pywrap_tensorflow.TF_ATTR_TYPE) def testGetAttrList(self): typ = backprop.op_attr_type('MaxPool', 'ksize') self.assertEqual(typ, [pywrap_tensorflow.TF_ATTR_INT]) def testMakeAttrType(self): self.assertEqual(dtypes.float32, backprop.make_attr(pywrap_tensorflow.TF_ATTR_TYPE, 1)) def testMakeAttrTypeList(self): self.assertEqual([dtypes.float32], backprop.make_attr([pywrap_tensorflow.TF_ATTR_TYPE], [1])) def testMulType(self): def mul(x): return math_ops._mul_dispatch(x, x) # pylint: disable=protected-access self.assertAllEqual( backprop.gradients_function(mul)(3.0)[0].numpy(), 6.0) def testMakeAttrShape(self): for s in ([], None, [1, 2, 3], [None, None], [1, None, 3]): expected = tensor_shape.TensorShape(s).as_proto() actual = backprop.make_attr(pywrap_tensorflow.TF_ATTR_SHAPE, s) self.assertEqual( expected, actual, msg=('For shape %r, expected %r != %r actual' % (s, expected, actual))) def testMakeAttrShapeList(self): shape_list = [[], None, [1, 2, 3], [None, None], [1, None, 3]] self.assertEqual( [tensor_shape.TensorShape(s).as_proto() for s in shape_list], backprop.make_attr([pywrap_tensorflow.TF_ATTR_SHAPE], shape_list)) def testArgsGradientFunction(self): def f(*args): return args[0] * args[0] grad = backprop.gradients_function(f) self.assertAllEqual(grad(1.0)[0], 2.0) def testPartial(self): def f(x, y): return x * y part = functools.partial(f, constant_op.constant(2.0)) self.assertAllEqual( backprop.gradients_function(part)(constant_op.constant(1.0))[0], 2.0) def testReturnSameThing(self): def f(x): return x, 2 * x self.assertAllEqual(backprop.gradients_function(f)(1.0)[0], 3.0) @test_util.assert_no_new_tensors def testExceptionSafety(self): def f(unused_x): raise ValueError() try: backprop.gradients_function(f)(1.0) except ValueError: pass def real_f(x): return x * x self.assertAllEqual(backprop.gradients_function(real_f)(1.0)[0], 2.0) @test_util.assert_no_new_tensors def testMultiValueConvertToTensor(self): x = resource_variable_ops.ResourceVariable( initial_value=array_ops.constant([1.0]), name='x') def fn(): a = math_ops.add(x.value(), 1.0) # Make sure convert_to_tensor works correctly with list of TensorNodes. b = array_ops.stack([a, a], axis=0) return math_ops.reduce_mean(b) grad = backprop.implicit_grad(fn)()[0][0] self.assertAllEqual([1.0], grad) def testOutput(self): def multiout(x): return x + 2, x * x x = constant_op.constant([0.0, 1.0, 2.0]) grad = backprop.gradients_function(multiout)(x)[0] self.assertAllEqual([1.0, 3.0, 5.0], grad) def testMultiValuePreservesIfNotDiffedAgainst(self): def tfe_conv2d(timage, tkernel, conv2dstrides): return nn_ops.conv2d(timage, tkernel, conv2dstrides, 'SAME') i = constant_op.constant([[[[1.0]]]]) k = constant_op.constant([[[[2.0]]]]) s = [1, 1, 1, 1] grad = backprop.gradients_function(tfe_conv2d, params=(0,))(i, k, s)[0] self.assertAllEqual([[[[2.0]]]], grad) def testSameObjectForMultipleArguments(self): def f(x, y): return math_ops.multiply(x, y) g = backprop.gradients_function(f) def np_g(x, y): dx, dy = g(x, y) return [dx.numpy(), dy.numpy()] x = constant_op.constant(1.) self.assertAllEqual([1., 1.], np_g(x, x)) x = 1. self.assertAllEqual([1., 1.], np_g(x, x)) x = constant_op.constant([[1.]]) self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x)) x = [[1.]] self.assertAllEqual([[[1.]], [[1.]]], np_g(x, x)) v = resource_variable_ops.ResourceVariable( initial_value=1., name='testSameObjectForMultipleArguments.Variable') self.assertAllEqual([1., 1.], np_g(v, v)) @test_util.assert_no_new_tensors def testImplicitGradientsCustomGradientAndCachedVariableValue(self): @custom_gradient.custom_gradient def my_square(x): result = math_ops.square(x) def grad(dr): return 2 * dr * x + 1 return result, grad x = resource_variable_ops.ResourceVariable( initial_value=3., name='X.' + self.id()) def f(): return my_square(x) g = backprop.implicit_grad(f) grads_and_vars = g() self.assertEqual(1, len(grads_and_vars)) grad, var = grads_and_vars[0] self.assertAllEqual(7, grad) self.assertAllEqual(x, var) @test_util.assert_no_new_tensors def testCustomGradient(self): @custom_gradient.custom_gradient def my_mul(x, y): result = x*y def grad(dr): return [dr*y, dr*x] return result, grad lr = 0.25 x = resource_variable_ops.ResourceVariable(2., name='x') def loss(x): return my_mul(2., x.read_value()) loss_grads_fn = backprop.implicit_val_and_grad(loss) losses = [] for _ in range(5): loss, grads_and_vars = loss_grads_fn(x) losses.append(loss.numpy()) for (grad, var) in grads_and_vars: var.assign_sub(lr*grad) self.assertAllEqual(losses, [4.0, 3., 2., 1., 0.]) @test_util.assert_no_new_tensors def testCustomGradientIdentity(self): @custom_gradient.custom_gradient def my_identity(x): def grad(dresult): return [2 * dresult] return x, grad self.assertAllEqual(backprop.gradients_function(my_identity)(1.0)[0], 2.0) def testDifferentiatingFunctionThatReturnsNone(self): def fn(x, y): result = x*y # pylint: disable=unused-variable x = constant_op.constant(1) y = constant_op.constant(2) loss_grads_fn = backprop.implicit_val_and_grad(fn) with self.assertRaisesRegexp( ValueError, 'Cannot differentiate a function that returns None; ' 'did you forget to return a value from fn?'): loss_grads_fn(x, y) val_and_grads_fn = backprop.val_and_grad_function(fn) with self.assertRaisesRegexp( ValueError, 'Cannot differentiate a function that returns None; ' 'did you forget to return a value from fn?'): val_and_grads_fn(x, y) def testZerosCacheDoesntLeakAcrossGraphs(self): with ops.Graph().as_default(): def get_grad(): with ops.Graph().as_default(), self.cached_session(): t = constant_op.constant(1, dtype=dtypes.float32, shape=(10, 4)) x = constant_op.constant(2, dtype=dtypes.float32, shape=(10, 4)) with backprop.GradientTape() as tape: tape.watch(x) x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1) y1 = x1**2 y = array_ops.concat([y1, t], axis=1) return self.evaluate(tape.gradient(y, x)) grad1 = get_grad() grad2 = get_grad() self.assertAllEqual(grad1, grad2) @test_util.run_in_graph_and_eager_modes def testSelectivelyWatchVariables(self): x1 = resource_variable_ops.ResourceVariable(1.0) x2 = resource_variable_ops.ResourceVariable(1.0) with backprop.GradientTape(watch_accessed_variables=False) as tape: tape.watch(x2) y = x1**2 z = x2**3 self.assertTupleEqual(tape.watched_variables(), (x2,)) dy, dz = tape.gradient([y, z], [x1, x2]) self.evaluate([x1.initializer, x2.initializer]) self.assertIsNone(dy) self.assertEqual(self.evaluate(dz), 3.0) @test_util.run_in_graph_and_eager_modes def testDifferentiatingScalarCache(self): # In the following test, if x2 = x1 (i.e the objects are the exact same), # then y is essentially, 2*x1, and dy/dx1 = 2. # When we had a pure scalar cache in eager, this would be the case. This # test prevents us from going back to that case. with backprop.GradientTape(persistent=False) as g: x1 = constant_op.constant(3.0) x2 = constant_op.constant(3.0) g.watch(x1) g.watch(x2) y = x1 + x2 grad = g.gradient(target=y, sources=[x1]) self.assertEqual(self.evaluate(grad), [1.0]) def testVariablesAndConstantsProduceTheSameGradients(self): # In the following test, differentiating [y, z] against [a, b] gives: # (dy/da + dz/da, dy/db + dz/db). # If a and b are the same constant, dz/da will not be 0 (which it should # be). # This is solved by using variable since doing a read_value on a tensor will # produce a new tensor and corresponding TensorHandle, and not reuse the # same tensor (which would happen if we are using a cache and reusing # EagerTensor objects). def get_grads(a, b): with backprop.GradientTape() as tape: tape.watch([a, b]) y = a**3 z = b**2 return tape.gradient([y, z], [a, b]) gradients_constants = get_grads( constant_op.constant(2.0), constant_op.constant(2.0)) gradients_variables = get_grads( resource_variable_ops.ResourceVariable(2.0), resource_variable_ops.ResourceVariable(2.0)) self.assertAllEqual(gradients_constants, gradients_variables) def testUnknownShapes(self): with ops.Graph().as_default(): with backprop.GradientTape() as tape: a = array_ops.placeholder(dtype=dtypes.float32, shape=None) tape.watch(a) b = a**3 db_da = tape.gradient(b, a) with self.cached_session() as sess: self.assertEqual((8.0, 12.0), sess.run((b, db_da), feed_dict={a: 2.0})) @test_util.run_in_graph_and_eager_modes def testCustomGradientInEagerAndGraph(self): @custom_gradient.custom_gradient def f(x): y = x * x def grad(dy): return [4 * dy] return y, grad with backprop.GradientTape() as t: c = constant_op.constant(1.0) t.watch(c) g = f(c) self.assertAllEqual(self.evaluate(t.gradient(g, c)), 4.0) @test_util.run_in_graph_and_eager_modes def testMaxPooling3DGradient(self): def forward(a): r = max_pooling3d(a, pool_size=pool_size, strides=strides, padding='SAME') return r input_sizes = [1, 3, 2, 4, 1] pool_size = (2, 2, 1) strides = (1, 1, 1) total_size = np.prod(input_sizes) x = np.arange(1, total_size + 1, dtype=np.float32) aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32) da = backprop.gradients_function(forward)(aa) if not context.executing_eagerly(): tf_aa = constant_op.constant(x, shape=input_sizes, dtype=dtypes.float32) tf_max = max_pooling3d( tf_aa, pool_size=pool_size, strides=strides, padding='SAME') tf_da = gradients.gradients(tf_max, [tf_aa]) self.assertAllEqual(da[0], tf_da[0].eval()) if __name__ == '__main__': test.main()
python
""" Manage sound and music """ from engine.const import CONST snd_manager = None class SndManager(): def __init__(self): self.sounds = {} self.permanent_sound = [] self.playlist = [] self.music_index = 0 self.music = None self.sounds_playing = [] def set_playlist(self,music_list): pass def add_music_to_playlist(self, name): pass def play_music(self,name): pass def update_music_status(self): pass def sanitize_sounds(self,delete_sounds=[]): del_snd_tmp = [] if delete_sounds == []: for snd_filename in self.sounds.keys(): if snd_filename not in self.permanent_sound: del_snd_tmp.append(snd_filename) else: del_snd_tmp = delete_sounds for snd_filename in del_snd_tmp: del self.sounds[snd_filename] snd_manager = SndManager() if CONST.render == 'sfml': from render_engine.sfml_engine.sfml_snd_manager import SFMLSndManager snd_manager = SFMLSndManager() ''' elif CONST.render == 'pookoo': def set_playlist(music_list): """ Set a new playlist and play the first element """ global playlist, music music = pookoo.audio.AudioStreamObject(playlist[0]) def add_music_to_playlist(self, name): """ Add a music at the end of the playlist """ global playlist playlist.append(name) def fadeout_music(t=0): """TODO: Fadeout and then stop it after time t (seconds)""" pass def play_music(name): """ Set the playlist as one element and play it """ global playlist set_playlist([name]) def update_music_status(): """ Switch to next music if it's over, must be called to have smooth transition """ global music, music_index, playlist, sounds_playing if CONST.render == 'sfml': pass def check_music_status(): """ Return True if a music is currently playing """ global music if CONST.render == 'sfml': return music.status == sfml.Music.STOPPED def load_sound(name, permanent=False): """Load a sound in the system and returns it""" global sounds, permanent_sound try: sounds[name] except KeyError: if CONST.render == 'sfml': sounds[name] = sfml.SoundBuffer.from_file(name) elif CONST.render == 'pookoo': sounds[name] = pookoo.audio.AudioSoundObject(name) if permanent: permanent_sound.append(name) return sounds[name] def play_sound(sound): """ Plays a given sound """ global sounds_playing if CONST.render == 'sfml': sound_playing = sfml.Sound(sound) sound_playing.play() sounds_playing.append(sound_playing) '''
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import base import cw class Summary(base.CWBinaryBase): """見出しデータ(Summary.wsm)。 type:見出しデータには"-1"の値を付与する。 """ def __init__(self, parent, f, yadodata=False, nameonly=False, materialdir="Material", image_export=True, wpt120=False): base.CWBinaryBase.__init__(self, parent, f, yadodata, materialdir, image_export) self.type = -1 self.image = f.image() self.name = f.string() if nameonly: return self.description = f.string() self.author = f.string() self.required_coupons = f.string(True) self.required_coupons_num = f.dword() self.area_id = f.dword() if self.area_id <= 19999: self.version = 0 elif self.area_id <= 39999: self.version = 2 self.area_id = self.area_id - 20000 elif self.area_id <= 49999: self.version = 4 self.area_id = self.area_id - 40000 else: # version 5~6は存在しない self.version = 7 self.area_id = self.area_id - 70000 steps_num = f.dword() self.steps = [Step(self, f) for _cnt in xrange(steps_num)] flags_num = f.dword() self.flags = [Flag(self, f) for _cnt in xrange(flags_num)] if wpt120: return _w = f.dword() # 不明 if 0 < self.version: self.level_min = f.dword() self.level_max = f.dword() else: self.level_min = 0 self.level_max = 0 # タグとスキンタイプ。読み込みが終わった後から操作する self.skintype = "" self.tags = "" self.data = None def get_data(self): if self.data is None: if self.image: self.imgpath = self.export_image() else: self.imgpath = "" self.data = cw.data.make_element("Summary") prop = cw.data.make_element("Property") e = cw.data.make_element("Name", self.name) prop.append(e) e = cw.data.make_element("ImagePath", self.imgpath) prop.append(e) e = cw.data.make_element("Author", self.author) prop.append(e) e = cw.data.make_element("Description", self.description) prop.append(e) e = cw.data.make_element("Level") e.set("min", str(self.level_min)) e.set("max", str(self.level_max)) prop.append(e) e = cw.data.make_element("RequiredCoupons", self.required_coupons) e.set("number", str(self.required_coupons_num)) prop.append(e) e = cw.data.make_element("StartAreaId", str(self.area_id)) prop.append(e) e = cw.data.make_element("Tags", self.tags) prop.append(e) e = cw.data.make_element("Type", self.skintype) prop.append(e) self.data.append(prop) e = cw.data.make_element("Flags") for flag in self.flags: e.append(flag.get_data()) self.data.append(e) e = cw.data.make_element("Steps") for step in self.steps: e.append(step.get_data()) self.data.append(e) e = cw.data.make_element("Labels", "") self.data.append(e) return self.data @staticmethod def unconv(f, data): image = None name = "" description = "" author = "" required_coupons = "" required_coupons_num = 0 area_id = 0 steps = [] flags = [] variants = [] level_min = 0 level_max = 0 for e in data: if e.tag == "Property": for prop in e: if prop.tag == "Name": name = prop.text elif prop.tag in ("ImagePath", "ImagePaths"): image = base.CWBinaryBase.import_image(f, prop) elif prop.tag == "Author": author = prop.text elif prop.tag == "Description": description = prop.text elif prop.tag == "Level": level_min = int(prop.get("min")) level_max = int(prop.get("max")) elif prop.tag == "RequiredCoupons": required_coupons = prop.text required_coupons_num = int(prop.get("number")) elif prop.tag == "StartAreaId": level_max = int(prop.text) elif e.tag == "Flags": flags = e elif e.tag == "Steps": steps = e elif e.tag == "Variants": variants = e f.write_image(image) f.write_string(name) f.write_string(description) f.write_string(author) f.write_string(required_coupons, True) f.write_dword(required_coupons_num) f.write_dword(area_id + 40000) f.write_dword(len(steps)) for step in steps: Step.unconv(f, step) f.write_dword(len(flags)) for flag in flags: Flag.unconv(f, flag) for variant in variants: f.check_wsnversion("4", u"コモン") f.write_dword(0) # 不明 f.write_dword(level_min) f.write_dword(level_max) class Step(base.CWBinaryBase): """ステップ定義。""" def __init__(self, parent, f, yadodata=False): base.CWBinaryBase.__init__(self, parent, f, yadodata) self.name = f.string() self.default = f.dword() self.variable_names = [f.string() for _cnt in xrange(10)] self.data = None def get_data(self): if self.data is None: self.data = cw.data.make_element("Step") self.data.set("default", str(self.default)) e = cw.data.make_element("Name", self.name) self.data.append(e) e = cw.data.make_element("Value0", self.variable_names[0]) self.data.append(e) e = cw.data.make_element("Value1", self.variable_names[1]) self.data.append(e) e = cw.data.make_element("Value2", self.variable_names[2]) self.data.append(e) e = cw.data.make_element("Value3", self.variable_names[3]) self.data.append(e) e = cw.data.make_element("Value4", self.variable_names[4]) self.data.append(e) e = cw.data.make_element("Value5", self.variable_names[5]) self.data.append(e) e = cw.data.make_element("Value6", self.variable_names[6]) self.data.append(e) e = cw.data.make_element("Value7", self.variable_names[7]) self.data.append(e) e = cw.data.make_element("Value8", self.variable_names[8]) self.data.append(e) e = cw.data.make_element("Value9", self.variable_names[9]) self.data.append(e) return self.data @staticmethod def unconv(f, data): name = "" default = int(data.get("default")) if data.getbool(".", "spchars", False): f.check_wsnversion("2", u"ステップ値中の特殊文字の展開") variable_names = [""] * 10 for e in data: if e.tag == "Name": name = e.text elif e.tag.startswith("Value"): variable_names[int(e.tag[5:])] = e.text f.write_string(name) f.write_dword(default) for variable_name in variable_names: f.write_string(variable_name) class Flag(base.CWBinaryBase): """フラグ定義。""" def __init__(self, parent, f, yadodata=False): base.CWBinaryBase.__init__(self, parent, f, yadodata) self.name = f.string() self.default = f.bool() self.variable_names = [f.string() for _cnt in xrange(2)] self.data = None def get_data(self): if self.data is None: self.data = cw.data.make_element("Flag") self.data.set("default", str(self.default)) e = cw.data.make_element("Name", self.name) self.data.append(e) e = cw.data.make_element("True", self.variable_names[0]) self.data.append(e) e = cw.data.make_element("False", self.variable_names[1]) self.data.append(e) return self.data @staticmethod def unconv(f, data): name = "" default = cw.util.str2bool(data.get("default")) if data.getbool(".", "spchars", False): f.check_wsnversion("2", u"フラグ値中の特殊文字の展開") variable_names = [""] * 2 for e in data: if e.tag == "Name": name = e.text elif e.tag == "True": variable_names[0] = e.text elif e.tag == "False": variable_names[1] = e.text f.write_string(name) f.write_bool(default) for variable_name in variable_names: f.write_string(variable_name) def main(): pass if __name__ == "__main__": main()
python
from django.urls import path from moim.views import * urlpatterns = [ path('', MoimView.as_view()), path('<int:moim_id>/', MoimDetailView.as_view()), path('<int:moim_id>/apply/', MoimApplyView.as_view()) ]
python
# # PySNMP MIB module PPP-SEC-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/PPP-SEC-MIB # Produced by pysmi-0.3.4 at Wed May 1 14:41:52 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint") ppp, = mibBuilder.importSymbols("PPP-LCP-MIB", "ppp") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, iso, Unsigned32, Counter64, IpAddress, ModuleIdentity, Bits, TimeTicks, Integer32, NotificationType, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "iso", "Unsigned32", "Counter64", "IpAddress", "ModuleIdentity", "Bits", "TimeTicks", "Integer32", "NotificationType", "Gauge32") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") pppSecurity = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2)) pppSecurityProtocols = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1)) pppSecurityPapProtocol = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1, 1)) pppSecurityChapMD5Protocol = MibIdentifier((1, 3, 6, 1, 2, 1, 10, 23, 2, 1, 2)) pppSecurityConfigTable = MibTable((1, 3, 6, 1, 2, 1, 10, 23, 2, 2), ) if mibBuilder.loadTexts: pppSecurityConfigTable.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigTable.setDescription('Table containing the configuration and preference parameters for PPP Security.') pppSecurityConfigEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1), ).setIndexNames((0, "PPP-SEC-MIB", "pppSecurityConfigLink"), (0, "PPP-SEC-MIB", "pppSecurityConfigPreference")) if mibBuilder.loadTexts: pppSecurityConfigEntry.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigEntry.setDescription('Security configuration information for a particular PPP link.') pppSecurityConfigLink = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecurityConfigLink.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigLink.setDescription("The value of ifIndex that identifies the entry in the interface table that is associated with the local PPP entity's link for which this particular security algorithm shall be attempted. A value of 0 indicates the default algorithm - i.e., this entry applies to all links for which explicit entries in the table do not exist.") pppSecurityConfigPreference = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecurityConfigPreference.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigPreference.setDescription('The relative preference of the security protocol identified by pppSecurityConfigProtocol. Security protocols with lower values of pppSecurityConfigPreference are tried before protocols with higher values of pppSecurityConfigPreference.') pppSecurityConfigProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 3), ObjectIdentifier()).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecurityConfigProtocol.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigProtocol.setDescription('Identifies the security protocol to be attempted on the link identified by pppSecurityConfigLink at the preference level identified by pppSecurityConfigPreference. ') pppSecurityConfigStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2))).clone('valid')).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecurityConfigStatus.setStatus('mandatory') if mibBuilder.loadTexts: pppSecurityConfigStatus.setDescription('Setting this object to the value invalid(1) has the effect of invalidating the corresponding entry in the pppSecurityConfigTable. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant pppSecurityConfigStatus object.') pppSecuritySecretsTable = MibTable((1, 3, 6, 1, 2, 1, 10, 23, 2, 3), ) if mibBuilder.loadTexts: pppSecuritySecretsTable.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsTable.setDescription('Table containing the identities and secrets used by the PPP authentication protocols. As this table contains secret information, it is expected that access to this table be limited to those SNMP Party-Pairs for which a privacy protocol is in use for all SNMP messages that the parties exchange. This table contains both the ID and secret pair(s) that the local PPP entity will advertise to the remote entity and the pair(s) that the local entity will expect from the remote entity. This table allows for multiple id/secret password pairs to be specified for a particular link by using the pppSecuritySecretsIdIndex object.') pppSecuritySecretsEntry = MibTableRow((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1), ).setIndexNames((0, "PPP-SEC-MIB", "pppSecuritySecretsLink"), (0, "PPP-SEC-MIB", "pppSecuritySecretsIdIndex")) if mibBuilder.loadTexts: pppSecuritySecretsEntry.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsEntry.setDescription('Secret information.') pppSecuritySecretsLink = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: pppSecuritySecretsLink.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsLink.setDescription('The link to which this ID/Secret pair applies. By convention, if the value of this object is 0 then the ID/Secret pair applies to all links.') pppSecuritySecretsIdIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647))).setMaxAccess("readonly") if mibBuilder.loadTexts: pppSecuritySecretsIdIndex.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsIdIndex.setDescription('A unique value for each ID/Secret pair that has been defined for use on this link. This allows multiple ID/Secret pairs to be defined for each link. How the local entity selects which pair to use is a local implementation decision.') pppSecuritySecretsDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local-to-remote", 1), ("remote-to-local", 2)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecuritySecretsDirection.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsDirection.setDescription('This object defines the direction in which a particular ID/Secret pair is valid. If this object is local-to-remote then the local PPP entity will use the ID/Secret pair when attempting to authenticate the local PPP entity to the remote PPP entity. If this object is remote-to-local then the local PPP entity will expect the ID/Secret pair to be used by the remote PPP entity when the remote PPP entity attempts to authenticate itself to the local PPP entity.') pppSecuritySecretsProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 4), ObjectIdentifier()).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecuritySecretsProtocol.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsProtocol.setDescription('The security protocol (e.g. CHAP or PAP) to which this ID/Secret pair applies.') pppSecuritySecretsIdentity = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecuritySecretsIdentity.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsIdentity.setDescription('The Identity of the ID/Secret pair. The actual format, semantics, and use of pppSecuritySecretsIdentity depends on the actual security protocol used. For example, if pppSecuritySecretsProtocol is pppSecurityPapProtocol then this object will contain a PAP Peer-ID. If pppSecuritySecretsProtocol is pppSecurityChapMD5Protocol then this object would contain the CHAP NAME parameter.') pppSecuritySecretsSecret = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecuritySecretsSecret.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsSecret.setDescription('The secret of the ID/Secret pair. The actual format, semantics, and use of pppSecuritySecretsSecret depends on the actual security protocol used. For example, if pppSecuritySecretsProtocol is pppSecurityPapProtocol then this object will contain a PAP Password. If pppSecuritySecretsProtocol is pppSecurityChapMD5Protocol then this object would contain the CHAP MD5 Secret.') pppSecuritySecretsStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 10, 23, 2, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("invalid", 1), ("valid", 2))).clone('valid')).setMaxAccess("readwrite") if mibBuilder.loadTexts: pppSecuritySecretsStatus.setStatus('mandatory') if mibBuilder.loadTexts: pppSecuritySecretsStatus.setDescription('Setting this object to the value invalid(1) has the effect of invalidating the corresponding entry in the pppSecuritySecretsTable. It is an implementation-specific matter as to whether the agent removes an invalidated entry from the table. Accordingly, management stations must be prepared to receive tabular information from agents that corresponds to entries not currently in use. Proper interpretation of such entries requires examination of the relevant pppSecuritySecretsStatus object.') mibBuilder.exportSymbols("PPP-SEC-MIB", pppSecurityConfigPreference=pppSecurityConfigPreference, pppSecurity=pppSecurity, pppSecuritySecretsStatus=pppSecuritySecretsStatus, pppSecurityConfigLink=pppSecurityConfigLink, pppSecuritySecretsProtocol=pppSecuritySecretsProtocol, pppSecurityChapMD5Protocol=pppSecurityChapMD5Protocol, pppSecuritySecretsLink=pppSecuritySecretsLink, pppSecuritySecretsSecret=pppSecuritySecretsSecret, pppSecuritySecretsIdentity=pppSecuritySecretsIdentity, pppSecuritySecretsDirection=pppSecuritySecretsDirection, pppSecurityPapProtocol=pppSecurityPapProtocol, pppSecuritySecretsTable=pppSecuritySecretsTable, pppSecuritySecretsEntry=pppSecuritySecretsEntry, pppSecurityConfigProtocol=pppSecurityConfigProtocol, pppSecurityConfigStatus=pppSecurityConfigStatus, pppSecurityConfigEntry=pppSecurityConfigEntry, pppSecurityConfigTable=pppSecurityConfigTable, pppSecuritySecretsIdIndex=pppSecuritySecretsIdIndex, pppSecurityProtocols=pppSecurityProtocols)
python
import random import math Menor = int(input("Insira o menor limite : ")) Maior = int(input("Insira o maior limite : ")) ## Retorna um número entre os x e y (Os 2 inclusos) Rand = random.randint(Menor,Maior) # Número mínimo de adivinhação = log 2 (limite superior - limite inferior + 1) print("\n\t\tVocê tem apenas ", round(math.log(Maior - Menor + 1, 2))," chances para adivinhar o número!\n") Tentativas = round(math.log(Maior - Menor + 1, 2)) Cont = 0 while Cont < Tentativas: Cont += 1 Chute = int(input("Tente um número : ")) if Rand == Chute: print("Parabéns, você acertou em ", Cont, " tentativa(s)!!") break elif Rand > Chute: print("Arriscou um valor muito baixo...") elif Rand < Chute: print("Arriscou um valor muito alto...") if Cont >= Tentativas: print("\n\tO número era %d."%Rand) print("\tBoa sorte na próxima vez !!")
python
class News: def __init__(self,title,description,urlToImage,content,url): self.title=title self.description=description self.urlToImage=urlToImage self.content=content self.url=url class Sources: def __init__(self, id, name, description, url, category): self.id = id self.name = name self.description = description self.url = url self.category = category class Articles: def __init__(self,title,author,description,url,urlToImage,publishedAt): self.title=title self.author=author self.description=description self.url=url self.urlToImage=urlToImage self.publishedAt=publishedAt
python
"""Some utilities for caching pages.""" import zlib from beaker.util import func_namespace from mako.runtime import capture def cache_content(request, key, do_work): """Argh! Okay, so. Use this when you want to cache the BODY of a page but not the CHROME (i.e., wrapper or base or whatever). ``request`` The pyramid.request.Request object for the current request. ``key`` The key that uniquely identifies this particular rendering of this page content. ``do_work`` Some function that will stuff a bunch of expensive data in c. This will only be called if the page hasn't yet been cached. It'll be passed the key. The name and module of this function will be used as part of the cache key. Also, DO NOT FORGET TO wrap the cachable part of your template in a <%lib:cache_content> tag, or nothing will get cached! If a page body is pulled from cache, c.timer.from_cache will be set to True. If the page had to be generated, it will be set to False. (If this function wasn't involved at all, it will be set to None.) """ cache = request.environ.get('beaker.cache', None) c = request.tmpl_context # Content needs to be cached per-language # TODO(pyramid) #key = u"{0}/{1}".format(key, c.lang) key += u';' + c.game_language.identifier if request.session.get('cheat_obdurate', False): key += u';obdurate' # If the cache isn't configured for whatever reason (such as when we're # running in a test environment), just skip it. if cache is None: # call do_work immediately so that it isn't skipped during testing # (since tests don't call the renderer) do_work(request, key) def skip_cache(context, mako_def): mako_def.body() c._cache_me = skip_cache return namespace = func_namespace(do_work) # Cache for... ten hours? Sure, whatever # TODO: use get_cache_region instead content_cache = cache.get_cache('content_cache:' + namespace, expiretime=36000) # XXX This is dumb. Caches don't actually respect the 'enabled' # setting, so we gotta fake it. if not content_cache.nsargs.get('enabled', True): def skip_cache(context, mako_def): do_work(request, key) mako_def.body() c._cache_me = skip_cache return # These pages can be pretty big. In the case of e.g. memcached, that's # a lot of RAM spent on giant pages that consist half of whitespace. # Solution: gzip everything. Use level 1 for speed! def cache_me(context, mako_def): c.timer.from_cache = True def generate_page(): c.timer.from_cache = False do_work(request, key) data = capture(context, mako_def.body).encode('utf8') return zlib.compress(data, 1) data = content_cache.get_value(key=key, createfunc=generate_page) context.write( zlib.decompress(data).decode('utf8') ) c._cache_me = cache_me return
python
import os import pickle import numpy as np import soundfile as sf from scipy import signal from scipy.signal import get_window import librosa from librosa.filters import mel from numpy.random import RandomState from pathlib import Path import ipdb from tqdm import tqdm def butter_highpass(cutoff, fs, order=5): nyq = 0.5 * fs normal_cutoff = cutoff / nyq b, a = signal.butter(order, normal_cutoff, btype='high', analog=False) return b, a def pySTFT(x, fft_length=1024, hop_length=256): x = np.pad(x, int(fft_length//2), mode='reflect') noverlap = fft_length - hop_length shape = x.shape[:-1]+((x.shape[-1]-noverlap)//hop_length, fft_length) strides = x.strides[:-1]+(hop_length*x.strides[-1], x.strides[-1]) result = np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides) fft_window = get_window('hann', fft_length, fftbins=True) result = np.fft.rfft(fft_window * result, n=fft_length).T return np.abs(result) mel_basis = mel(16000, 1024, fmin=90, fmax=7600, n_mels=80).T min_level = np.exp(-100 / 20 * np.log(10)) b, a = butter_highpass(30, 16000, order=5) # audio file directory rootDir = './wavs' # rootDir = './kids_speech/wav/' # spectrogram directory rootDirs = [ '../data/LibriTTS/train-clean-100', '../data/kids_speech/wavs' ] # rootDir = '/home/shacharm/Projects/ug/data/LibriTTS/train-clean-100' # rootDir = '/home/shacharm/Projects/ug/data/kids_speech/wavs' targetDir = './spmel' for rootDir in rootDirs: assert Path(rootDir).exists(), "{} does not exist".format(rootDirs) dirName, subdirList, _ = next(os.walk(rootDir)) print('Found directory: %s' % dirName) SAMPLE_RATE = 16000 for subdir in tqdm(sorted(subdirList)): if False: files = (Path(rootDir) / subdir).glob('**/*.wav') if not os.path.exists(os.path.join(targetDir, subdir)): os.makedirs(os.path.join(targetDir, subdir)) _,_, fileList = next(os.walk(os.path.join(dirName,subdir))) try: prng = RandomState(int(subdir[1:])) except: prng = RandomState() for fileName in tqdm(list((Path(rootDir) / subdir).glob('**/*.wav'))): targetSubDir = targetDir / fileName.relative_to(rootDir).parent targetSubDir.mkdir(parents=True, exist_ok=True) targetFile = (targetSubDir / fileName.stem).with_suffix('.npy') if targetFile.exists(): continue # Read audio file #x, fs = sf.read(os.path.join(dirName,subdir,fileName)) x, fs = sf.read(str(fileName)) x = librosa.resample(x, fs, SAMPLE_RATE) fs = SAMPLE_RATE # Remove drifting noise y = signal.filtfilt(b, a, x) # Ddd a little random noise for model roubstness wav = y * 0.96 + (prng.rand(y.shape[0])-0.5)*1e-06 # Compute spect D = pySTFT(wav).T # Convert to mel and normalize D_mel = np.dot(D, mel_basis) D_db = 20 * np.log10(np.maximum(min_level, D_mel)) - 16 S = np.clip((D_db + 100) / 100, 0, 1) # save spect np.save(targetFile, S.astype(np.float32), allow_pickle=False)
python
import tkinter as tk from tkinter import messagebox as mbox from tkinter import filedialog from Phase0 import phase0 from Phase0_1 import phase0_1 from Phase1 import phase1 from Phase2 import phase2 from Phase3 import phase3 from form_viewer import form_viewer #Tk class generating root = tk.Tk() # screen size root.geometry("700x500") # screen title root.title("N1MM to JARL log converter") # パラメータ folder_path = tk.StringVar() form_file = tk.StringVar() adif_file = tk.StringVar() log_file = tk.StringVar() HL_file = tk.StringVar() Ph0_data = [] Callsign = "" FD_coe = 1 Contest_name = "" Multi = "" def ask_form(): """ form.txtファイル選択ボタンの動作 """ global path global folder_path path = filedialog.askdirectory() # form_f = filedialog.askopenfilename(filetypes = [('テキストファイル','*.txt')] , initialdir = './' ) folder_path.set(path) # form_file.set(form_f) # print( "-------- ask_input() " ) # print( "path : ", path ) # print( "folder_path: ", folder_path ) # print( "form_f ; ", form_f ) # print( "form_file : ",form_file ) return def ask_adif(): """ adif.adiファイル選択ボタンの動作 """ # path = filedialog.askdirectory() adif_f = filedialog.askopenfilename(filetypes = [('テキストファイル','*.adi')] , initialdir = './' ) # folder_path.set(path) adif_file.set(adif_f) print( "-------- ask_adif() " ) # print( "path: ", path ) # print( "folder_path: ", folder_path ) print( "adif_f ; ", adif_f ) print( "adif_file: ",adif_file ) return def data_clear(): Remarks1.delete(0, tk.END) My_multi.delete(0, tk.END) Guest.set(False) FD_contest.set(False) Multi_Op.set(False) Contest_type.set(False) AA_contest.set(False) Power_code.set(False) JST_convert_flag.set(False) # Time_convert.set(False) QSLyesno.set(False) form_file.set('') adif_file.set('') def ok_click() : Multi = My_multi.get() mbox.showinfo('My Multi', Multi ) return Callsign def log_generate() : Guest_op =Guest.get() FD = FD_contest.get() Mop = Multi_Op.get() # form = form_file.get() # file_path = folder_path.get() Ph0_data = phase0(Guest_op, FD, Mop ) Callsign = Ph0_data[0] FD_coe = int(Ph0_data[1]) Contest_name = phase0_1( Callsign ) # mbox.showinfo('Log Remarks', 'Remark: ' + a ) # Phase1を起動 # ADIFファイルのログライン分割を1ラインに修正 phase1( Callsign ) # Phase2を起動 # スコアサマリーの生成、JARLサマリーシートへ得点を転記 phase2( Callsign , FD_coe , Contest_name ) # Phase3を起動 Multi = My_multi.get() QSL = QSLyesno.get() JST_conv = JST_convert_flag.get() Power = Power_code.get() AA = AA_contest.get() phase3( Callsign , Contest_name, QSL, JST_conv, Power, Multi, AA, Remarks1.get() ) def form_view() : form_viewer() def closing(): # exit() root.destroy() # チェックON・OFF変数 Guest = tk.BooleanVar() Guest.set(False) FD_contest = tk.BooleanVar() FD_contest.set(False) Multi_Op = tk.BooleanVar() Multi_Op.set(False) Contest_type = tk.BooleanVar() Contest_type.set(False) AA_contest = tk.BooleanVar() AA_contest.set(False) Power_code = tk.BooleanVar() Power_code.set(False) JST_convert_flag = tk.BooleanVar() JST_convert_flag.set(False) #Time_convert = tk.BooleanVar() #Time_convert.set(False) QSLyesno = tk.BooleanVar() QSLyesno.set(False) # check botton check_Guest = tk.Checkbutton(root, variable = Guest, text ="ゲストオペ運用ですか?") check_Guest.place(x=140, y=50) check_FD_contest = tk.Checkbutton(root, variable = FD_contest , text ="FDコンテストですか?") check_FD_contest.place(x=140, y=70) check_Multi_Op = tk.Checkbutton(root, variable = Multi_Op , text ="マルチオペ運用ですか?") check_Multi_Op.place(x=140, y=90) check_Contest_type = tk.Checkbutton(root, variable = Contest_type , text ="通常のContestですか?") check_Contest_type.place(x=140, y=110) check_AA_contest = tk.Checkbutton(root, variable = AA_contest , text ="ALL Asia DX contestですか?") check_AA_contest.place(x=140, y=130) check_Power_code = tk.Checkbutton(root, variable = Power_code , text ="1.2GHzバンド以上のパワーコードをMからLに変換します?") check_Power_code.place(x=140, y=150) check_JST_convert_flag = tk.Checkbutton(root, variable = JST_convert_flag , text ="ロギングはUTCでJSTに変換しますか?") check_JST_convert_flag.place(x=140, y=170) #check_Time_convert = tk.Checkbutton(root, variable = Time_convert , text ="UTCをJSTに変換しますか?") #check_Time_convert.place(x=140, y=190) check_QSLyesno = tk.Checkbutton(root, variable = QSLyesno , text ="QSLカードを発行しますか?") check_QSLyesno.place(x=140, y=190) # label label_contest_number = tk.Label( text="My Contest Multi: ") label_contest_number.place(x=30, y=230) Remarks1 = tk.Label( text="Hamlog Remarks1: ") Remarks1.place(x=30, y=250) label_top = tk.Label( text ="N1MM+ ADIFファイルからJARLコンテストログ生成ツール") label_top.pack() label_term1 = tk.Label( text ="1.パラメータ設定") label_term1.place(x=10,y=30) label_term2 = tk.Label( text ="2.") label_term2.place(x=10,y=350) label_term2 = tk.Label( text ="3.") label_term2.place(x=10,y=390) # ウィジット作成(form.txtファイル) #form_label = tk.Label(root, text="データフォルダ指定") #form_label.place(x=30, y=290) #form_box = tk.Entry(root, textvariable= form_file, width=80) #form_box = tk.Entry(root, textvariable= folder_path, width=80) #form_box.place(x=145, y=290) #form_btn = tk.Button(root, text="参照", command=ask_form) #form_btn.place(x=650, y=290) # ウィジット作成(ADIFファイル) #output_label = tk.Label(root, text="ADIFファイル:") #output_label.place(x=30, y=310) #output_box = tk.Entry(root, textvariable=adif_file, width=80) #output_box.place(x=145, y=310) #output_btn = tk.Button(root, text="参照", command=ask_adif) #output_btn.place(x=650, y=310) # text box My_multi = tk.Entry(width=10) My_multi.place(x=145,y=230) Remarks1 = tk.Entry(width=40) Remarks1.place(x=145,y=250) clear_Button = tk.Button(root,text='パラメータClear', command = data_clear ) #clear_Button.pack( fill = 'none', padx=20, side = 'bottom' ) clear_Button.place(x=40 , y=50) okButton =tk.Button( root, text='form.txtファイルの確認と修正', command = form_view ) #okButton.pack( fill = 'none', padx=20, side = 'bottom' ) okButton.place(x=40 , y=350) okButton =tk.Button( root, text='コンテストログ生成', command = log_generate ) #okButton.pack( fill = 'none', padx=20, side = 'bottom' ) okButton.place(x=40 , y=390) closeButton =tk.Button( root, text='Close', command = closing ) closeButton.place(x=370 , y=470) root.mainloop()
python
import numpy as np import os import tensorflow as tf from sandbox.rocky.tf.optimizers.conjugate_gradient_optimizer import FiniteDifferenceHvp, ConjugateGradientOptimizer from hgail.algos.gail import GAIL import auto_validator import hyperparams import utils # setup args = hyperparams.parse_args() exp_dir = utils.set_up_experiment(exp_name=args.exp_name, phase='imitate') saver_dir = os.path.join(exp_dir, 'imitate', 'log') saver_filepath = os.path.join(saver_dir, 'checkpoint') np.savez(os.path.join(saver_dir, 'args'), args=args) summary_writer = tf.summary.FileWriter(os.path.join(exp_dir, 'imitate', 'summaries')) # build components env, act_low, act_high = utils.build_ngsim_env(args, exp_dir, vectorize=args.vectorize) data = utils.load_data( args.expert_filepath, act_low=act_low, act_high=act_high, min_length=args.env_H + args.env_primesteps, clip_std_multiple=args.normalize_clip_std_multiple, ngsim_filename=args.ngsim_filename ) critic = utils.build_critic(args, data, env, summary_writer) policy = utils.build_policy(args, env) recognition_model = utils.build_recognition_model(args, env, summary_writer) baseline = utils.build_baseline(args, env) reward_handler = utils.build_reward_handler(args, summary_writer) validator = auto_validator.AutoValidator( summary_writer, data['obs_mean'], data['obs_std'], render=args.validator_render, render_every=args.render_every, flat_recurrent=args.policy_recurrent ) # build algo saver = tf.train.Saver(max_to_keep=100, keep_checkpoint_every_n_hours=.5) sampler_args = dict(n_envs=args.n_envs) if args.vectorize else None if args.policy_recurrent: optimizer = ConjugateGradientOptimizer( max_backtracks=50, hvp_approach=FiniteDifferenceHvp(base_eps=1e-5) ) else: optimizer = None algo = GAIL( critic=critic, recognition=recognition_model, reward_handler=reward_handler, env=env, policy=policy, baseline=baseline, validator=validator, batch_size=args.batch_size, max_path_length=args.max_path_length, n_itr=args.n_itr, discount=args.discount, step_size=args.trpo_step_size, saver=saver, saver_filepath=saver_filepath, force_batch_sampler=False if args.vectorize else True, sampler_args=sampler_args, snapshot_env=False, plot=False, optimizer=optimizer, optimizer_args=dict( max_backtracks=50, debug_nan=True ) ) # run it with tf.Session() as session: # running the initialization here to allow for later loading # NOTE: rllab batchpolopt runs this before training as well # this means that any loading subsequent to this is nullified # you have to comment of that initialization for any loading to work session.run(tf.global_variables_initializer()) # loading if args.params_filepath != '': algo.load(args.params_filepath) # run training algo.train(sess=session)
python
# Copyright (c) 2019, CMCC Technologies Co., Ltd. # Copyright (c) 2019, ZTE Corporation. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging from lcm.pub.database.models import NSLcmOpOccModel from lcm.pub.exceptions import NSLCMException from lcm.ns.const import NS_OCC_BASE_URI, NS_INSTANCE_BASE_URI logger = logging.getLogger(__name__) FILTERS = { 'id': 'id', 'operationState': 'operation_state', 'stateEnteredTime': 'state_entered_time', 'startTime': 'start_time', 'nsInstanceId': 'ns_instance_id', 'operation': 'operation' } class QueryNsLcmOpOcc: def __init__(self, data, lcm_op_occ_id=''): self.ns_lcm_op_occ_id = lcm_op_occ_id self.params = data def query_multi_ns_lcm_op_occ(self): query_data = {} logger.debug("QueryMultiNsLcmOpOccs--get--biz::> Check for filters in query params" % self.params) for query, value in list(self.params.items()): if query in FILTERS: query_data[FILTERS[query]] = value # Query the database with filters if the request has fields in request params, else fetch all records if query_data: lcm_ops = NSLcmOpOccModel.objects.filter(**query_data) else: lcm_ops = NSLcmOpOccModel.objects.all() if not lcm_ops.exists(): return [] # raise NSLCMException('LCM Operation Occurances do not exist') return [self.fill_resp_data(lcm_op) for lcm_op in lcm_ops] def fill_resp_data(self, lcm_op): NS_LCM_OP_OCC_URI = NS_OCC_BASE_URI % lcm_op.id resp_data = { 'id': lcm_op.id, 'operationState': lcm_op.operation_state, 'stateEnteredTime': lcm_op.state_entered_time, 'startTime': lcm_op.start_time, 'nsInstanceId': lcm_op.ns_instance_id, 'operation': lcm_op.operation, 'isAutomaticInvocation': lcm_op.is_automatic_invocation, 'operationParams': json.loads(lcm_op.operation_params), 'isCancelPending': lcm_op.is_cancel_pending, 'cancelMode': lcm_op.cancel_mode, 'error': None if not lcm_op.error else json.loads(lcm_op.error), 'resourceChanges': None if not lcm_op.resource_changes else json.loads(lcm_op.resource_changes), '_links': { 'self': {'href': NS_LCM_OP_OCC_URI}, 'nsInstance': {'href': NS_INSTANCE_BASE_URI % lcm_op.ns_instance_id}, 'retry': {'href': NS_LCM_OP_OCC_URI + '/retry'}, 'rollback': {'href': NS_LCM_OP_OCC_URI + '/rollback'}, 'continue': {'href': NS_LCM_OP_OCC_URI + '/continue'}, 'fail': {'href': NS_LCM_OP_OCC_URI + '/fail'}, 'cancel': {'href': NS_LCM_OP_OCC_URI + '/cancel'} } # json.loads(lcm_op.links) } return resp_data def query_single_ns_lcm_op_occ(self): lcm_op = NSLcmOpOccModel.objects.filter(id=self.ns_lcm_op_occ_id) if not lcm_op.exists(): raise NSLCMException('LCM Operation Occurance does not exist') resp_data = self.fill_resp_data(lcm_op[0]) return resp_data
python
# Computes the transition temperature Tc from the temperature dependence of the leading # Bethe-Salpeter eigenvalue. # # Usage: python compute_tc.py T=* # # Author: Urs R. Haehner ([email protected]) import numpy as np from scipy import optimize from matplotlib import pyplot as plt import h5py import os import sys ################################################################################ # Computes the temperature at which an instability occurs, i.e. the temperature T where the leading # eigenvalue eigval crosses 1. # Uses a fit function of the form eigval(T) = p0/(T-p1)^p2. # The transition temperature Tc is then given by Tc = p1^(1/p0) + p1. def computeTransitionTemp(T, eigval): print('\nTemperature/eigenvalue pairs for fit:') for T_ind, T_val in enumerate(T): print(str(T_val) + '\t' + str(eigval[T_ind])) fitfunc = lambda p, x: p[0] / pow((x-p[1]), p[2]) # Target function errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function p0 = [1., 0., 1.] # Initial guess for the parameters p, success = optimize.leastsq(errfunc, p0[:], args=(T, eigval)) Tc = pow(p[0], 1./p[2]) + p[1] print('\nTc = ' + '{0:.3g}'.format(Tc)) T_fine = np.linspace(T[0], T[-1], 100) l_fine = fitfunc(p, T_fine) return Tc, T_fine, l_fine ################################################################################ dirs = sys.argv[1:] # T=... directories T = [] eigval = [] # Read leading eigenvalue for each temperature. for d in dirs: filename = d + '/analysis.hdf5' if (os.path.isfile(filename)): T.append(float(d[2:])) print('Reading ' + filename) data = h5py.File(filename,'r') # Store real part of leading eigenvalue (imaginary part = 0). # Eigenvalues are sorted w.r.t. size in decreasing order. leading_eigenvalues = data['analysis-functions']['leading-eigenvalues']['data'][:] eigval.append(leading_eigenvalues[0][0]) data.close() # Compute the transition temperature Tc. Tc, T_fine, eigval_fine = computeTransitionTemp(T, eigval) # Plot temperature dependence of leading eigenvalue. filename = 'eigval_vs_temp.pdf' print('\nPlotting temperature dependence of leading eigenvalue: ' + filename) xmin = T_fine[0]-0.005 xmax = T_fine[-1]+0.005 plt.plot(T_fine, eigval_fine, '--', label=r'$T_c$ = '+'{0:.3g}'.format(Tc)) plt.plot(T, eigval, 'o') plt.hlines(1., xmin, xmax, 'k') plt.xlim(xmin, xmax) plt.xticks([0.07, 0.08, 0.09, 0.1]) plt.xlabel(r'$T/t$') plt.ylabel(r'$\lambda_d$') plt.legend(loc='best') plt.grid() plt.savefig(filename)
python
import numpy as np import openslide import sys import os from PIL import Image from color_norm.color_normalize import reinhard_normalizer def white_ratio(pat): white_count = 0.0 total_count = 0.001 for x in range(0, pat.shape[0]-200, 100): for y in range(0, pat.shape[1]-200, 100): p = pat[x:x+200, y:y+200, :] whiteness = (np.std(p[:,:,0]) + np.std(p[:,:,1]) + np.std(p[:,:,2])) / 3.0 if whiteness < 14: white_count += 1.0 total_count += 1.0 return white_count/total_count def stain_normalized_tiling(slide_name, patch_size, do_actually_read_image=True): margin = 5 try: oslide = openslide.OpenSlide(slide_name) if openslide.PROPERTY_NAME_MPP_X in oslide.properties: mpp = float(oslide.properties[openslide.PROPERTY_NAME_MPP_X]) elif "XResolution" in oslide.properties: mpp = float(oslide.properties["XResolution"]); elif "tiff.XResolution" in oslide.properties: mpp = float(oslide.properties["tiff.XResolution"]); if mpp > 2.0: mpp = 10000.0/mpp; else: mpp = 0.250 if mpp < 0.375: scale_factor = 1 else: scale_factor = 2 pw = patch_size width = oslide.dimensions[0] height = oslide.dimensions[1] except: print 'Error in {}: exception caught exiting'.format(slide_name) raise Exception('{}: exception caught exiting'.format(slide_name)) return n40X = reinhard_normalizer('color_norm/target_40X.png') for x in range(1, width, pw): for y in range(1, height, pw): if x + pw > width - margin: pw_x = width - x - margin else: pw_x = pw if y + pw > height - margin: pw_y = height - y - margin else: pw_y = pw if pw_x <= 3 or pw_y <= 3: continue if do_actually_read_image: try: patch = oslide.read_region((x, y), 0, (pw_x, pw_y)).convert('RGB') except: print '{}: exception caught'.format(slide_name) continue else: patch = Image.new('RGB', (pw_x, pw_y), (255, 255, 255)) ori_size0 = patch.size[0] ori_size1 = patch.size[1] patch = np.array(patch.resize( (patch.size[0]*scale_factor, patch.size[1]*scale_factor), Image.ANTIALIAS)) if white_ratio(patch) < 0.25: patch = n40X.normalize(patch) yield patch, (x, y, pw_x, pw_y, ori_size0, ori_size1, mpp, scale_factor), (width, height)
python
import win32ui import pyautogui from win10toast import ToastNotifier path = pyautogui.prompt('Please enter the path below:') path = path+"/?" pyautogui.keyDown("win") pyautogui.press("r") pyautogui.keyUp("win") pyautogui.typewrite("cmd") pyautogui.press("enter") pyautogui.press("enter") pyautogui.typewrite(f"{path}") pyautogui.press("enter") wnd = win32ui.GetForegroundWindow() print (wnd.GetWindowText()) if "cmd.exe" in wnd.GetWindowText(): pyautogui.typewrite("exit") pyautogui.press("enter") toaster = ToastNotifier() toaster.show_toast("Testing", "File Does Not Have Any Silent Switches", threaded=True, icon_path=None, duration=3) else: toaster = ToastNotifier() toaster.show_toast("Testing", "File Has Silent Switches", threaded=True, icon_path=None, duration=3)
python
# Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Writes metadata and label file to the Bert NL classifier models.""" from typing import List, Optional, Union from tensorflow_lite_support.metadata.python.metadata_writers import metadata_info from tensorflow_lite_support.metadata.python.metadata_writers import metadata_writer from tensorflow_lite_support.metadata.python.metadata_writers import writer_utils _MODEL_NAME = "BertNLClassifier" _MODEL_DESCRIPTION = ("Classify the input text into a set of known categories.") _OUTPUT_NAME = "probability" _OUTPUT_DESCRIPTION = "Probabilities of the labels respectively." # The input tensor names of models created by Model Maker. _DEFAULT_ID_NAME = "serving_default_input_word_ids:0" _DEFAULT_MASK_NAME = "serving_default_input_mask:0" _DEFAULT_SEGMENT_ID_NAME = "serving_default_input_type_ids:0" class MetadataWriter(metadata_writer.MetadataWriter): """Writes metadata into the Bert NL classifier.""" @classmethod def create_from_metadata_info( cls, model_buffer: bytearray, general_md: Optional[metadata_info.GeneralMd] = None, input_md: Optional[metadata_info.BertInputTensorsMd] = None, output_md: Optional[metadata_info.ClassificationTensorMd] = None): """Creates MetadataWriter based on general/input/output information. Args: model_buffer: valid buffer of the model file. general_md: general information about the model. If not specified, default general metadata will be generated. input_md: input tensor information. If not specified, default input metadata will be generated. output_md: output classification tensor informaton. If not specified, default output metadata will be generated. Returns: A MetadataWriter object. """ if general_md is None: general_md = metadata_info.GeneralMd( name=_MODEL_NAME, description=_MODEL_DESCRIPTION) if input_md is None: input_md = metadata_info.BertInputTensorsMd(model_buffer, _DEFAULT_ID_NAME, _DEFAULT_MASK_NAME, _DEFAULT_SEGMENT_ID_NAME) if output_md is None: output_md = metadata_info.ClassificationTensorMd( name=_OUTPUT_NAME, description=_OUTPUT_DESCRIPTION) if output_md.associated_files is None: output_md.associated_files = [] return cls.create_from_metadata( model_buffer, model_metadata=general_md.create_metadata(), input_metadata=input_md.create_input_tesnor_metadata(), output_metadata=[output_md.create_metadata()], associated_files=[ file.file_path for file in output_md.associated_files ] + input_md.get_tokenizer_associated_files(), input_process_units=input_md.create_input_process_unit_metadata()) @classmethod def create_for_inference( cls, model_buffer: bytearray, tokenizer_md: Union[metadata_info.BertTokenizerMd, metadata_info.SentencePieceTokenizerMd], label_file_paths: List[str], ids_name: str = _DEFAULT_ID_NAME, mask_name: str = _DEFAULT_MASK_NAME, segment_name: str = _DEFAULT_SEGMENT_ID_NAME, ): """Creates mandatory metadata for TFLite Support inference. The parameters required in this method are mandatory when using TFLite Support features, such as Task library and Codegen tool (Android Studio ML Binding). Other metadata fields will be set to default. If other fields need to be filled, use the method `create_from_metadata_info` to edit them. `ids_name`, `mask_name`, and `segment_name` correspond to the `Tensor.name` in the TFLite schema, which help to determine the tensor order when populating metadata. The default values come from Model Maker. Args: model_buffer: valid buffer of the model file. tokenizer_md: information of the tokenizer used to process the input string, if any. Supported tokenziers are: `BertTokenizer` [1] and `SentencePieceTokenizer` [2]. If the tokenizer is `RegexTokenizer` [3], refer to `nl_classifier.MetadataWriter`. [1]: https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L436 [2]: https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L473 [3]: https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L475 label_file_paths: paths to the label files [4] in the classification tensor. Pass in an empty list if the model does not have any label file. [4]: https://github.com/tensorflow/tflite-support/blob/b80289c4cd1224d0e1836c7654e82f070f9eefaa/tensorflow_lite_support/metadata/metadata_schema.fbs#L95 ids_name: name of the ids tensor, which represents the tokenized ids of the input text. mask_name: name of the mask tensor, which represents the mask with 1 for real tokens and 0 for padding tokens. segment_name: name of the segment ids tensor, where `0` stands for the first sequence, and `1` stands for the second sequence if exists. Returns: A MetadataWriter object. """ input_md = metadata_info.BertInputTensorsMd( model_buffer, ids_name, mask_name, segment_name, tokenizer_md=tokenizer_md) output_md = metadata_info.ClassificationTensorMd( name=_OUTPUT_NAME, description=_OUTPUT_DESCRIPTION, label_files=[ metadata_info.LabelFileMd(file_path=file_path) for file_path in label_file_paths ], tensor_type=writer_utils.get_output_tensor_types(model_buffer)[0]) return cls.create_from_metadata_info( model_buffer, input_md=input_md, output_md=output_md)
python
from lib.base import PowerDNSClient class SuggestZone(PowerDNSClient): def _run(self, *args, **kwargs): return self.api.suggest_zone(*args, **kwargs)
python
""" The :mod:`websockets.server` module defines a simple WebSocket server API. """ import asyncio import collections.abc import email.message import logging from .compatibility import asyncio_ensure_future from .exceptions import InvalidHandshake, InvalidOrigin from .handshake import build_response, check_request from .http import USER_AGENT, read_request from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol __all__ = ['serve', 'WebSocketServerProtocol'] logger = logging.getLogger(__name__) class WebSocketServerProtocol(WebSocketCommonProtocol): """ Complete WebSocket server implementation as an :class:`asyncio.Protocol`. This class inherits most of its methods from :class:`~websockets.protocol.WebSocketCommonProtocol`. For the sake of simplicity, it doesn't rely on a full HTTP implementation. Its support for HTTP responses is very limited. """ state = CONNECTING def __init__(self, ws_handler, ws_server, *, origins=None, subprotocols=None, extra_headers=None, **kwds): self.ws_handler = ws_handler self.ws_server = ws_server self.origins = origins self.subprotocols = subprotocols self.extra_headers = extra_headers super().__init__(**kwds) def connection_made(self, transport): super().connection_made(transport) # Register the connection with the server when creating the handler # task. (Registering at the beginning of the handler coroutine would # create a race condition between the creation of the task, which # schedules its execution, and the moment the handler starts running.) self.ws_server.register(self) self.handler_task = asyncio_ensure_future( self.handler(), loop=self.loop) @asyncio.coroutine def handler(self): # Since this method doesn't have a caller able to handle exceptions, # it attemps to log relevant ones and close the connection properly. try: try: path = yield from self.handshake( origins=self.origins, subprotocols=self.subprotocols, extra_headers=self.extra_headers) except ConnectionError as exc: logger.info('Connection error during opening handshake', exc_info=True) raise except Exception as exc: if self._is_server_shutting_down(exc): response = ('HTTP/1.1 503 Service Unavailable\r\n\r\n' 'Server is shutting down.') elif isinstance(exc, InvalidOrigin): response = 'HTTP/1.1 403 Forbidden\r\n\r\n' + str(exc) elif isinstance(exc, InvalidHandshake): response = 'HTTP/1.1 400 Bad Request\r\n\r\n' + str(exc) else: logger.warning("Error in opening handshake", exc_info=True) response = ('HTTP/1.1 500 Internal Server Error\r\n\r\n' 'See server log for more information.') self.writer.write(response.encode()) raise try: yield from self.ws_handler(self, path) except Exception as exc: if self._is_server_shutting_down(exc): yield from self.fail_connection(1001) else: logger.error("Error in connection handler", exc_info=True) yield from self.fail_connection(1011) raise try: yield from self.close() except ConnectionError as exc: if self._is_server_shutting_down(exc): pass logger.info('Connection error in closing handshake', exc_info=True) raise except Exception as exc: if self._is_server_shutting_down(exc): pass else: logger.warning("Error in closing handshake", exc_info=True) raise except Exception: # Last-ditch attempt to avoid leaking connections on errors. try: self.writer.close() except Exception: # pragma: no cover pass finally: # Unregister the connection with the server when the handler task # terminates. Registration is tied to the lifecycle of the handler # task because the server waits for tasks attached to registered # connections before terminating. self.ws_server.unregister(self) def _is_server_shutting_down(self, exc): """ Decide whether an exception means that the server is shutting down. """ return ( isinstance(exc, asyncio.CancelledError) and self.ws_server.closing ) @asyncio.coroutine def handshake(self, origins=None, subprotocols=None, extra_headers=None): """ Perform the server side of the opening handshake. If provided, ``origins`` is a list of acceptable HTTP Origin values. Include ``''`` if the lack of an origin is acceptable. If provided, ``subprotocols`` is a list of supported subprotocols in order of decreasing preference. If provided, ``extra_headers`` sets additional HTTP response headers. It can be a mapping or an iterable of (name, value) pairs. It can also be a callable taking the request path and headers in arguments. Return the URI of the request. """ # Read handshake request. try: path, headers = yield from read_request(self.reader) except ValueError as exc: raise InvalidHandshake("Malformed HTTP message") from exc self.request_headers = headers self.raw_request_headers = list(headers.raw_items()) get_header = lambda k: headers.get(k, '') key = check_request(get_header) if origins is not None: origin = get_header('Origin') if not set(origin.split() or ['']) <= set(origins): raise InvalidOrigin("Origin not allowed: {}".format(origin)) if subprotocols is not None: protocol = get_header('Sec-WebSocket-Protocol') if protocol: client_subprotocols = [p.strip() for p in protocol.split(',')] self.subprotocol = self.select_subprotocol( client_subprotocols, subprotocols) headers = [] set_header = lambda k, v: headers.append((k, v)) set_header('Server', USER_AGENT) if self.subprotocol: set_header('Sec-WebSocket-Protocol', self.subprotocol) if extra_headers is not None: if callable(extra_headers): extra_headers = extra_headers(path, self.raw_request_headers) if isinstance(extra_headers, collections.abc.Mapping): extra_headers = extra_headers.items() for name, value in extra_headers: set_header(name, value) build_response(set_header, key) self.response_headers = email.message.Message() for name, value in headers: self.response_headers[name] = value self.raw_response_headers = headers # Send handshake response. Since the status line and headers only # contain ASCII characters, we can keep this simple. response = ['HTTP/1.1 101 Switching Protocols'] response.extend('{}: {}'.format(k, v) for k, v in headers) response.append('\r\n') response = '\r\n'.join(response).encode() self.writer.write(response) assert self.state == CONNECTING self.state = OPEN self.opening_handshake.set_result(True) return path @staticmethod def select_subprotocol(client_protos, server_protos): """ Pick a subprotocol among those offered by the client. """ common_protos = set(client_protos) & set(server_protos) if not common_protos: return None priority = lambda p: client_protos.index(p) + server_protos.index(p) return sorted(common_protos, key=priority)[0] class WebSocketServer(asyncio.AbstractServer): """ Wrapper for :class:`~asyncio.Server` that triggers the closing handshake. """ def __init__(self, loop): # Store a reference to loop to avoid relying on self.server._loop. self.loop = loop self.closing = False self.websockets = set() def wrap(self, server): """ Attach to a given :class:`~asyncio.Server`. Since :meth:`~asyncio.BaseEventLoop.create_server` doesn't support injecting a custom ``Server`` class, a simple solution that doesn't rely on private APIs is to: - instantiate a :class:`WebSocketServer` - give the protocol factory a reference to that instance - call :meth:`~asyncio.BaseEventLoop.create_server` with the factory - attach the resulting :class:`~asyncio.Server` with this method """ self.server = server def register(self, protocol): self.websockets.add(protocol) def unregister(self, protocol): self.websockets.remove(protocol) def close(self): """ Stop accepting new connections and close open connections. """ # Make a note that the server is shutting down. Websocket connections # check this attribute to decide to send a "going away" close code. self.closing = True # Stop accepting new connections. self.server.close() # Close open connections. For each connection, two tasks are running: # 1. self.worker_task shuffles messages between the network and queues # 2. self.handler_task runs the opening handshake, the handler provided # by the user and the closing handshake # In the general case, cancelling the handler task will cause the # handler provided by the user to exit with a CancelledError, which # will then cause the worker task to terminate. for websocket in self.websockets: websocket.handler_task.cancel() @asyncio.coroutine def wait_closed(self): """ Wait until all connections are closed. This method must be called after :meth:`close()`. """ # asyncio.wait doesn't accept an empty first argument. if self.websockets: # The handler or the worker task can terminate first, depending # on how the client behaves and the server is implemented. yield from asyncio.wait( [websocket.handler_task for websocket in self.websockets] + [websocket.worker_task for websocket in self.websockets], loop=self.loop) yield from self.server.wait_closed() @asyncio.coroutine def serve(ws_handler, host=None, port=None, *, klass=WebSocketServerProtocol, timeout=10, max_size=2 ** 20, max_queue=2 ** 5, loop=None, legacy_recv=False, origins=None, subprotocols=None, extra_headers=None, **kwds): """ This coroutine creates a WebSocket server. It yields a :class:`~asyncio.Server` which provides: * a :meth:`~asyncio.Server.close` method that closes open connections with status code 1001 and stops accepting new connections * a :meth:`~asyncio.Server.wait_closed` coroutine that waits until closing handshakes complete and connections are closed. ``ws_handler`` is the WebSocket handler. It must be a coroutine accepting two arguments: a :class:`WebSocketServerProtocol` and the request URI. :func:`serve` is a wrapper around the event loop's :meth:`~asyncio.BaseEventLoop.create_server` method. ``host``, ``port`` as well as extra keyword arguments are passed to :meth:`~asyncio.BaseEventLoop.create_server`. For example, you can set the ``ssl`` keyword argument to a :class:`~ssl.SSLContext` to enable TLS. The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional arguments is described the documentation of :class:`~websockets.protocol.WebSocketCommonProtocol`. :func:`serve` also accepts the following optional arguments: * ``origins`` defines acceptable Origin HTTP headers — include ``''`` if the lack of an origin is acceptable * ``subprotocols`` is a list of supported subprotocols in order of decreasing preference * ``extra_headers`` sets additional HTTP response headers — it can be a mapping, an iterable of (name, value) pairs, or a callable taking the request path and headers in arguments. Whenever a client connects, the server accepts the connection, creates a :class:`WebSocketServerProtocol`, performs the opening handshake, and delegates to the WebSocket handler. Once the handler completes, the server performs the closing handshake and closes the connection. Since there's no useful way to propagate exceptions triggered in handlers, they're sent to the ``'websockets.server'`` logger instead. Debugging is much easier if you configure logging to print them:: import logging logger = logging.getLogger('websockets.server') logger.setLevel(logging.ERROR) logger.addHandler(logging.StreamHandler()) """ if loop is None: loop = asyncio.get_event_loop() ws_server = WebSocketServer(loop) secure = kwds.get('ssl') is not None factory = lambda: klass( ws_handler, ws_server, host=host, port=port, secure=secure, timeout=timeout, max_size=max_size, max_queue=max_queue, loop=loop, legacy_recv=legacy_recv, origins=origins, subprotocols=subprotocols, extra_headers=extra_headers, ) server = yield from loop.create_server(factory, host, port, **kwds) ws_server.wrap(server) return ws_server
python
import os from pylearn2.utils import serial from theano import tensor as T from theano import function from pylearn2ext.chbmit import CHBMIT from tests.plot_eeg import plot_eeg_predict_seizure_period def predict_plot(model_path, dataset): """ Script to perform seizure detection and plot the results. Parameters ---------- model_path : string Path to the directory to load the trained model. data_path : dataset object Dataset object. """ try: model = serial.load(model_path) except Exception, e: print model_path + "Doesn't seem to be a valid model path, got this error when trying to load it:" print e print "Setting up symbolic expressions..." X = model.get_input_space().make_theano_batch() Y = model.fprop(X) Y = T.argmax(Y, axis=1) f = function([X], Y) # Use smallish batches to avoid running out of memory batch_size = dataset.batch_size model.set_batch_size(batch_size) # Dataset must be multiple of batch size of some batches will have different sizes. # Theano convolution requires a hard-coded batch size. m = dataset.X.shape[0] extra = (batch_size - m) % batch_size assert (m + extra) % batch_size == 0 import numpy as np if extra > 0: dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]), dtype=dataset.X.dtype)), axis=0) assert dataset.X.shape[0] % batch_size == 0 print "Performing predictions..." y = [] for i in xrange(dataset.X.shape[0] / batch_size): x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:] if X.ndim > 2: x_arg = dataset.get_topological_view(x_arg) y.append(f(x_arg.astype(X.dtype))) y = np.concatenate(y) assert y.ndim == 1 assert y.shape[0] == dataset.X.shape[0] # Discard any zero-padding that was used to give the batches uniform size y = y[:m] extra = (dataset.n_channels - y.size) % dataset.n_channels assert (extra + y.size) % dataset.n_channels == 0 if extra > 0: y = np.append(y, np.zeros(extra)) # Reshape y = y.reshape(-1, y.shape[0] / dataset.n_channels) sum_y = np.sum(y, 0) plot_eeg_predict_seizure_period(X=dataset.raw_X, y=np.repeat(sum_y, dataset.sampling_rate), channel_labels=dataset.channel_labels, seizure_seconds=dataset.seizure_seconds, sampling_rate=dataset.sampling_rate, start_second=3600, end_second=3900, is_scale=True, n_X_ticks=6, channel_th_y_lim=[-1, 6], figure_width=800, figure_height=600) if __name__ == '__main__': patient_id = 10 leave_one_out_file = 4 model_path = '../models' data_path = '/Users/akara/Workspace/data/chbmit' save_model_path = os.path.join(model_path, 'sdae_chbmit_p{0}_leave_{1}'.format(patient_id, leave_one_out_file)) dataset = CHBMIT(patient_id=patient_id, which_set='test', preprocessor_path=os.path.join(save_model_path, 'sdae_scaler.pkl'), data_dir=data_path, transform='single_channel', leave_one_out_file=leave_one_out_file, window_size=256, batch_size=20) predict_plot(model_path=os.path.join(save_model_path, 'sdae_all.pkl'), dataset=dataset)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import logging import os import random import time class IdWorker(object): def __init__(self, worker_id, host_id): self.worker_id = worker_id self.host_id = host_id self.logger = logging.getLogger("idworker") # stats self.ids_generated = 0 # Since epicteller start. self.twepoch = 1577808000000 self.sequence = 0 self.worker_id_bits = 8 self.data_center_id_bits = 2 self.max_worker_id = -1 ^ (-1 << self.worker_id_bits) self.max_data_center_id = -1 ^ (-1 << self.data_center_id_bits) self.sequence_bits = 12 self.worker_id_shift = self.sequence_bits self.data_center_id_shift = self.sequence_bits + self.worker_id_bits self.timestamp_left_shift = self.sequence_bits + self.worker_id_bits + self.data_center_id_bits self.sequence_mask = -1 ^ (-1 << self.sequence_bits) self.last_timestamp = -1 # Sanity check for worker_id if self.worker_id > self.max_worker_id or self.worker_id < 0: raise Exception("worker_id", "worker id can't be greater than %i or less than 0" % self.max_worker_id) if self.host_id > self.max_data_center_id or self.host_id < 0: raise Exception("host_id", "data center id can't be greater than %i or less than 0" % self.max_data_center_id) self.logger.info("worker starting. timestamp left shift %d, data center id bits %d, worker id bits %d, sequence bits %d, worker id %d" % (self.timestamp_left_shift, self.data_center_id_bits, self.worker_id_bits, self.sequence_bits, self.worker_id)) def _time_gen(self): return int(time.time() * 1000) def _till_next_millis(self, last_timestamp): timestamp = self._time_gen() while timestamp <= last_timestamp: timestamp = self._time_gen() return timestamp def _next_id(self, timestamp): if self.last_timestamp > timestamp: self.logger.warning("clock is moving backwards. Rejecting request until %i" % self.last_timestamp) raise Exception("Clock moved backwards. Refusing to generate id for %i milliseocnds" % self.last_timestamp) if self.last_timestamp == timestamp: self.sequence = (self.sequence + 1) & self.sequence_mask if self.sequence == 0: timestamp = self._till_next_millis(self.last_timestamp) else: self.sequence = 0 self.last_timestamp = timestamp new_id = ((timestamp - self.twepoch) << self.timestamp_left_shift) | (self.host_id << self.data_center_id_shift) | (self.worker_id << self.worker_id_shift) | self.sequence self.ids_generated += 1 return new_id def get_worker_id(self): return self.worker_id def get_timestamp(self): return self._time_gen() def get_id(self): timestamp = self._time_gen() new_id = self._next_id(timestamp) self.logger.debug("id: %i worker_id: %i host_id: %i" % (new_id, self.worker_id, self.host_id)) return new_id def get_host_id(self): return self.host_id _host_id = os.getenv('HOST_ID', random.randint(0, 3)) _worker_id = os.getenv('WORKER_ID', random.randint(0, 255)) _worker = IdWorker(_worker_id, _host_id) def get_id() -> int: return _worker.get_id()
python
""" CPG locomotion controller. """ import itertools import os from argparse import ArgumentParser from pathlib import Path import farms_pylog as pylog import matplotlib.pyplot as plt import networkx as nx import numpy as np import yaml from farms_container import Container from farms_network.networkx_model import NetworkXModel from farms_network.neural_system import NeuralSystem pylog.set_level("error") def add_mutual_connection(network, node_1, node_2, weight, phi): """ Add mutual connection between two nodes """ network.add_edge(node_1, node_2, weight=weight, phi=phi) network.add_edge(node_2, node_1, weight=weight, phi=-1*phi) def add_connection_antagonist(network, node_1, node_2, **kwargs): """ Add mutual connection between two nodes """ weight = kwargs.pop('weight', 1.0) phi = kwargs.pop('phi', 0.0) add_mutual_connection( network, f"{node_1}_flexion", f"{node_2}_flexion", weight=weight, phi=phi ) add_mutual_connection( network, f"{node_1}_extension", f"{node_2}_extension", weight=weight, phi=phi ) def create_oscillator_network(export_path, **kwargs): """Create the drosophila reduced network. """ # Network properties default_weight = kwargs.pop("default_weight", 100.0) default_phi = kwargs.pop("default_phi", 0.0) # Initialize di graph network network = nx.DiGraph() # Generate list of controlled joints in the model sides = ('L', 'R') positions = ('F', 'M', 'H') segments = ('Coxa', 'Femur', 'Tibia') nodes = [ f"joint_{side}{position}{segment}_roll" if (position in ["M", "H"]) and (segment == "Coxa") else f"joint_{side}{position}{segment}" for side in sides for position in positions for segment in segments ] # Create flexion-extension oscillator for each node for node in nodes: network.add_node(f"{node}_flexion", model="oscillator", f=3.0, R=1.0, a=1.0) network.add_node(f"{node}_extension", model="oscillator", f=3.0, R=1.0, a=1.0) # Connect flexion-extension nodes for node in nodes: if node.split("_")[-1][2:] not in ['Femur', 'Tibia']: add_mutual_connection( network, f"{node}_flexion", f"{node}_extension", weight=default_weight, phi=np.pi ) # Connect leg oscillators for side in sides: for position in positions: for j in range(len(segments[:-1])): node_1 = segments[j] node_2 = segments[j+1] if (position in ["M", "H"]) and (segments[j] == "Coxa"): node_1 = "Coxa_roll" add_mutual_connection( network, f"joint_{side}{position}{node_1}_flexion", f"joint_{side}{position}{node_2}_flexion", weight=default_weight, phi=np.pi/2 ) add_mutual_connection( network, f"joint_{side}{position}{node_1}_extension", f"joint_{side}{position}{node_2}_extension", weight=default_weight, phi=np.pi/2 ) #: Connect base nodes base_connections = [ ['LFCoxa', 'RFCoxa', {'weight': default_weight, 'phi': np.pi}], ['LFCoxa', 'RMCoxa_roll', {'weight': default_weight, 'phi': np.pi}], ['RMCoxa_roll', 'LHCoxa_roll', {'weight': default_weight, 'phi': 0.0}], ['RFCoxa', 'LMCoxa_roll', {'weight': default_weight, 'phi': np.pi}], ['LMCoxa_roll', 'RHCoxa_roll', {'weight': default_weight, 'phi': 0.0}], ] for n1, n2, data in base_connections: add_connection_antagonist(network, f"joint_{n1}", f"joint_{n2}", **data) # Update node positions for visualization with open('locomotion_network_node_positions.yaml', 'r') as file: node_positions = yaml.load(file, yaml.SafeLoader) for node, data in node_positions.items(): network.nodes[node]['x'] = data[0] network.nodes[node]['y'] = data[1] network.nodes[node]['z'] = data[2] # Export graph print(export_path) nx.write_graphml(network, export_path) def run_network(network_path): """ Run the network. Parameters ---------- network_path : <Path> Path to the network config file """ # Initialize network dt = 1e-3 #: Time step (1ms) duration = 2 time_vec = np.arange(0, duration, dt) #: Time container = Container(duration/dt) net = NeuralSystem(network_path, container) # initialize network parameters container.initialize() net.setup_integrator() #: Integrate the network pylog.debug('Begin Integration!') for t in time_vec: net.step(dt=dt) container.update_log() #: Results container.dump(overwrite=True) # Plot results neural_data = container.neural neural_outputs = neural_data.outputs.log neural_outputs_names = neural_data.outputs.names neural_outputs_name_id = neural_data.outputs.name_index # Plot Intra-limb activations for leg in ("RF", "RM", "RH", "LH", "LM", "LH"): leg_data = np.asarray( [ neural_outputs[:, neural_outputs_name_id[name]] for name in neural_outputs_names if leg in name ] ).T leg_names = [ name for name in neural_outputs_names if leg in name ] fig, axs = plt.subplots(nrows=3, ncols=1) axs[0].plot(time_vec, 1 + np.sin(leg_data[:, :2])) axs[1].plot(time_vec, 1 + np.sin(leg_data[:, 2:4])) axs[2].plot(time_vec, 1 + np.sin(leg_data[:, 4:])) axs[0].axes.xaxis.set_visible(False) axs[1].axes.xaxis.set_visible(False) axs[0].set_title(leg_names[0].split('_')[2]) axs[1].set_title(leg_names[2].split('_')[2]) axs[2].set_title(leg_names[4].split('_')[2]) axs[2].set_xlabel("Time[s]") # Plot Inter-limb activations leg_data = np.asarray( [ neural_outputs[:, neural_outputs_name_id[name]] for name in neural_outputs_names if "Coxa" in name and "flexion" in name ] ).T leg_names = [ name for name in neural_outputs_names if "Coxa" in name ] fig, ax = plt.subplots(nrows=1, ncols=1) ax.plot(time_vec, 1 + np.sin(leg_data[:, :])) ax.set_title("Coxa") ax.set_xlabel("Time[s]") #: Show network net.visualize_network(edge_labels=False) plt.show() def parse_args(): """Parse command line arguments to generate and simulate the network. """ parser = ArgumentParser("Network parser") parser.add_argument( "--export-path", required=False, type=str, default=( Path(__file__).parent.absolute() ).joinpath("../config/network/locomotion_network.graphml"), dest="export_path" ) parser.add_argument( "--run-network", required=False, type=bool, default=True, dest="run_network" ) return parser.parse_args() if __name__ == '__main__': # main() clargs = parse_args() create_oscillator_network(clargs.export_path) if clargs.run_network: run_network(clargs.export_path)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import socket import threading import datetime #from threading import Lock from Utils import DebugLock as Lock from Utils import Utils try: from Event import Event from NaptSocket import NaptSocket, NaptSocketStatus from NaptConnectionEventArgs import NaptConnectionEventArgs except Exception as ex: Utils.print_exception(ex) class NaptConnection(object): def __init__(self, client, server): Utils.expects_type(socket.socket, client, 'client') Utils.expects_type(socket.socket, server, 'server', True) self.lock = Lock() self.id = 0 self.client = NaptSocket(self, client, True) self.server = NaptSocket(self, server, False) self.is_initial = True; self.is_connecting = False self.is_connected = False self.is_closed = False self.tag = None self.tls = False self.debug = True self.lastrecvtime = datetime.datetime.now() self.connected = Event() self.closed = Event() self.client_closing = Event() self.server_closing = Event() self.client_closed = Event() self.server_closed = Event() self.client_recieved= Event() self.server_recieved= Event() def __str__(self): return 'NaptConnection{ %s }' % ', '.join([ 'id=%d' % self.id, 'client=%s' % str(self.client), 'server=%s' % str(self.server), 'is_connecting=%s' % str(self.is_connecting), 'is_connected=%s' % str(self.is_connected)]) # public def connect(self, endpoint): Utils.assertion(self.lock.locked(), 'need lock') if self.is_connecting: raise Exception() # InvalidOperationException self.is_connecting = True self.server.status = NaptSocketStatus.Connecting threading.Thread(target = self.do_connect, args = (endpoint,), name = self.__class__.__name__).start() # private def do_connect(self, endpoint): try: self.server.connect(endpoint) # blocking with self.lock: if self.is_closed: # todo close return self.is_connected = True print('INVOKE: on_connected') self.on_connected(None) except Exception as ex: print(' endpoint: %s' % str(endpoint)) Utils.print_exception(ex) # private def update_lastrecvtime(self): self.lastrecvtime = datetime.datetime.now() # public def close(self): if self.debug: print('NaptConnection.close: %s' % str(self)) with self.lock: if self.is_closed: return self.close_client() self.close_server() self.is_closed = True self.on_closed(None) # public def close2(self): Utils.assertion(self.lock.locked(), 'need lock') if self.debug: print('NaptConnection.close: %s' % str(self)) if self.is_closed: return self.close_client() self.close_server() self.is_closed = True #self.on_closed(None) # todo lock for log # protected virtual def on_connected(self, e): self.connected(self, e) # protected virtual def on_closed(self, e): self.closed(self, e) # protected virtual def on_client_closing(self, e): self.client_closing(self, e) # protected virtual def on_server_closing(self, e): self.server_closing(self, e) # protected virtual def on_client_closed(self, e): self.client_closed(self, e) # protected virtual def on_server_closed(self, e): self.server_closed(self, e) # protected virtual def on_client_recieved(self, e): # NaptConnectionEventArgs self.client_recieved(self, e) # protected virtual def on_server_recieved(self, e): # NaptConnectionEventArgs self.server_recieved(self, e) # internal def recv(self, so): Utils.expects_type(NaptSocket, so, 'so') self.update_lastrecvtime(); if so.is_client: self.recv_client() else: self.recv_server() # internal def error(self, so): Utils.expects_type(NaptSocket, so, 'so') # todo error # private def recv_client(self): try: #data= self.client.socket.recv(4096) data= Utils.recv(self.client.socket, 4096) e = NaptConnectionEventArgs(self, data, 0, len(data)) if len(data) == 0: # closed #self.close_client(); self.close() return print(' DATA: %s' % str(data)) self.on_client_recieved(e) self.server.push(data, 0, len(data)) except Exception as ex: # SocketException Utils.print_exception(ex) self.close() # private def recv_server(self): try: #data= self.server.socket.recv(4096) data= Utils.recv(self.server.socket, 4096) e = NaptConnectionEventArgs(self, data, 0, len(data)) if len(data) == 0: # closed #self.close_server() self.close() return print(' DATA: %s' % str(data)) self.on_server_recieved(e) self.client.push(data, 0, len(data)) except Exception as ex: # SocketException Utils.print_exception(ex) self.close() # private def close_client(self): if self.debug: print(' NaptConnection.close_client: %s' % str(self.client)) try: self.on_client_closing(None) if self.client.close(): self.on_client_closed(None) except Exception as ex: Utils.print_exception(ex) # private void def close_server(self): if self.debug: print(' NaptConnection.close_server: %s' % str(self.server)) try: self.on_server_closing(None) if self.server.close(): self.on_server_closed(None); except Exception as ex: Utils.print_exception(ex)
python
import cv2 cap = cv2.VideoCapture(0) fgbg =cv2.createBackgroundSubtractorMOG2() while (1): ret, frame = cap.read() fgmask = fgbg.apply(frame) edges = cv2.Canny(fgmask,100,200) cv2.imshow('Original', frame) cv2.imshow('MOG2', fgmask) cv2.imshow('Output', edges) k = cv2.waitKey(30) & 0xff if k == 27: break cap.release() cv2.destroyAllWindows()
python
import argparse import logging import string import jsonlines from Levenshtein import distance from tqdm.auto import tqdm from src.models.bart_seq2seq_kilt import BartSeq2Seq from src.models.bert_binary_kilt import BertBinary from src.utils import batch_it, chunk_it def normalize(sent): return ( sent.lower() .replace(" ", "") .translate(str.maketrans("", "", string.punctuation)) ) def predictions_and_alternatives(model, sentences, binary): if binary: return [ ( p[0], ["SUPPORTS" if p[0] == "REFUTES" else "REFUTES"], p[1], ) for p in model.sample(sentences) ] else: return [ ( p[0], list( set( [ a.replace(".", "") for a in p[1:] if (len(a) < 5 and normalize(p[0]) != normalize(a)) or distance(normalize(p[0]), normalize(a)) > 4 ] ).difference({p[0]}) ), None, ) for p in batch_it( model.sample( sentences, min_length=0, num_beams=5, num_return_sequences=5, ), 5, ) ] def filtered_rephrases(model, input_, rephrases, binary): pred = model.sample( [input_] + rephrases, min_length=0, num_beams=5, num_return_sequences=1, ) if binary: return [r for p, r in zip(pred[1:], rephrases) if p[0] == pred[0][0]] else: return [ r for p, r in zip(pred[1:], rephrases) if normalize(p) == normalize(pred[0]) ] if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--input_filename", type=str, help="Filename of the KILT dataset", default="../datasets/structured_zeroshot-dev-new.jsonl", ) parser.add_argument( "--output_filename", type=str, help="Filename of the KILT dataset", default="../datasets/structured_zeroshot-dev-new_annotated.jsonl", ) parser.add_argument( "--model", type=str, help="Filename of the model", default="models/bart_seq2seq_structured_zeroshot/version_0/checkpoints/model-epoch=17-valid_acc=0.2207.ckpt", ) parser.add_argument( "--device", type=str, default="cuda:0", ) parser.add_argument( "--batch_size", type=int, default=12, ) parser.add_argument( "--binary", action="store_true", ) parser.add_argument( "-d", "--debug", help="Print lots of debugging statements", action="store_const", dest="loglevel", const=logging.DEBUG, default=logging.WARNING, ) parser.add_argument( "-v", "--verbose", help="Be verbose", action="store_const", dest="loglevel", const=logging.INFO, ) args, _ = parser.parse_known_args() logging.basicConfig( level=args.loglevel, format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", ) logging.info("Loading model") if args.binary: model = ( BertBinary.load_from_checkpoint(args.model, strict=False) .eval() .to(args.device) ) else: model = ( BartSeq2Seq.load_from_checkpoint(args.model, strict=False) .eval() .to(args.device) ) model.freeze() filename = args.input_filename logging.info("Loading {}".format(filename)) with jsonlines.open(filename) as f: dataset = list(f) if not args.binary: dataset = [ {**d, "input": q} for d in dataset for q in d["meta"]["template_questions"] ] for docs in batch_it(tqdm(dataset, desc="Predicting"), args.batch_size): for d, (p, a, l) in zip( docs, predictions_and_alternatives( model, [d["input"] for d in docs], args.binary, ), ): d["prediction"] = p d["alternatives"] = a d["filtered_rephrases"] = filtered_rephrases( model, d["input"], d["rephrases"], args.binary, ) if l: d["logit"] = l.item() filename = args.output_filename logging.info("Saving {}".format(filename)) with jsonlines.open(filename, "w") as f: f.write_all(dataset)
python
from ._base import * from ..tinygrail.bigc import BigC from ..tinygrail.model import TBid @click.command() @click.argument("player_name", type=TG_PLAYER) @click.argument("character_ids", type=int, nargs=-1) def force_view(player_name, character_ids): for cid in character_ids: big_c = BigC(player_name, cid) big_c.create_bid(TBid(Price=2, Amount=2))
python
from algosdk.v2client.indexer import IndexerClient from algosdk.v2client.algod import AlgodClient from tinyman.v1.client import TinymanMainnetClient from tinyman.v1.pools import get_pool_info_from_account_info import datetime import statistics class AlgoTools: def __init__(self, address = None): ### Setup Stuff ### self.indexer_address = 'https://algoexplorerapi.io/idx2' self.indexer_token = '' self.algod_address = 'https://algoexplorerapi.io' self.algod_token = '' self.address = address # Set up API instances self.indexer_client = IndexerClient(self.indexer_token, self.indexer_address, headers={'User-Agent': 'algosdk'}) self.algod_client = AlgodClient(self.algod_token, self.algod_address, headers={'User-Agent': 'algosdk'}) self.tiny = TinymanMainnetClient(algod_client=self.algod_client, user_address=self.address) ### End Setup ### ### Start Functions ### def GetPools(self, address): # Creates a dict of all tinyman pools associated with address. # Contents of each pool will have: # 'pair_name' # 'pool_id' # 'asset1' # 'asset2' all_pools = {} tp = 0 algod = self.algod_client.account_info(address) for asset in algod['assets']: # Look for tinyman assets and pull pools. try: asset_info = self.algod_client.asset_info(asset['asset-id']) except: continue asset_name = asset_info['params']['name'] if 'Tinyman Pool' in asset_name: tinypool = {} pool_info = self.algod_client.account_info(asset_info['params']['creator']) pool = get_pool_info_from_account_info(pool_info) asset1 = self.tiny.fetch_asset(pool['asset1_id']) asset2 = self.tiny.fetch_asset(pool['asset2_id']) tinypool['pair_name'] = asset_name tinypool['pool_id'] = pool['liquidity_asset_id'] tinypool['asset1'] = asset1 tinypool['asset2'] = asset2 all_pools[tp] = tinypool tp = tp+1 del tinypool return all_pools ##### def ConvertDate(self, date): if isinstance(date, str): newdate = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S') elif isinstance(date, datetime.datetime): newdate = date newstrdate = str(newdate.day) + '-' + str(newdate.month) + '-' + str(newdate.year) return newstrdate ##### def CalculateAPY(self, value_start, value_now, day1, today = datetime.datetime.now()): # Not quite ready for prime time if isinstance(day1, str): day1_dt = datetime.datetime.strptime(day1, '%d-%m-%Y') deltadate = today - day1_dt APY = ((value_now / value_start) - 1) * (deltadate.days) / 365 return APY ##### def GetPriceFromPool(self, ASSET, block_id = 0, num_blocks = 133): # 133 ~ +/-10 minutes from transaction ALGO = self.tiny.fetch_asset(0) pool = self.tiny.fetch_pool(ALGO, ASSET) if block_id == 0: # Current price quote = pool.fetch_fixed_input_swap_quote(ALGO(1_000_000), slippage=0.01) asset_price = 1/(quote.amount_out.amount * 10**(-ASSET.decimals)) else: tx_past = self.indexer_client.search_transactions_by_address(pool.address, min_round = block_id-num_blocks, max_round = block_id+num_blocks) groupID_last = None algo_per_asset = [] asset_amt = 0 algo_amt = 0 for tx in tx_past['transactions']: if 'group' not in tx: # Skip if tx is not part of a group continue elif asset_amt != 0 and algo_amt != 0: # After getting an asset value and algo value, calculate the price algo_per_asset.append(algo_amt / asset_amt) continue elif tx['group'] != groupID_last: # Start a new group transaction to calculate price groupID_last = tx['group'] asset_amt = 0 algo_amt = 0 else: if tx['tx-type'] == 'axfer': if tx['asset-transfer-transaction']['asset-id'] == ASSET.id: asset_amt = tx['asset-transfer-transaction']['amount'] * 10**(-ASSET.decimals) elif tx['tx-type'] == 'pay': # Check if the value is >A0.01 as this would most likely be a fee if tx['payment-transaction']['amount'] >= 1e4: algo_amt = tx['payment-transaction']['amount'] * 10**(-ALGO.decimals) if len(algo_per_asset) < 10: # Use minimum 10 txns to get an average if num_blocks >= 3192: # Stops trying after timespan = 8 hours (+/-4 hours) print('Could not find enough transactions to estimate price.') asset_price = -1 else: # Keep adding +/-10 minutes until we get enough data print('Time band: +/-' + str(num_blocks/13.3 + 10) + ' minutes') asset_price = self.GetPriceFromPool(ASSET, block_id, num_blocks+133) else: # Use the median to calculate the price to ensure lopsided trades are not included asset_price = statistics.median(algo_per_asset) return asset_price ##### def ALGOtoUSD(self, price_in_algo, usdc_price_algo, usdt_price_algo): usd_price_algo = (usdc_price_algo + usdt_price_algo) / 2 # Average of usdc and usdt in case one of them is a bit off from the dollar asset_price_usd = price_in_algo / usd_price_algo return asset_price_usd ### End Functions ###
python
#!/usr/bin/env python import unittest class test_sample_hook(unittest.TestCase): def test_nothing(self): #do nothing return
python
# coding: utf-8 import responses import os import json import io import watson_developer_cloud from watson_developer_cloud.discovery_v1 import TrainingDataSet, TrainingQuery, TrainingExample try: from urllib.parse import urlparse, urljoin except ImportError: from urlparse import urlparse, urljoin base_discovery_url = 'https://gateway.watsonplatform.net/discovery/api/v1/' platform_url = 'https://gateway.watsonplatform.net' service_path = '/discovery/api' base_url = '{0}{1}'.format(platform_url, service_path) version = '2016-12-01' environment_id = 'envid' collection_id = 'collid' @responses.activate def test_environments(): discovery_url = urljoin(base_discovery_url, 'environments') discovery_response_body = """{ "environments": [ { "environment_id": "string", "name": "envname", "description": "", "created": "2016-11-20T01:03:17.645Z", "updated": "2016-11-20T01:03:17.645Z", "status": "status", "index_capacity": { "disk_usage": { "used_bytes": 0, "total_bytes": 0, "used": "string", "total": "string", "percent_used": 0 }, "memory_usage": { "used_bytes": 0, "total_bytes": 0, "used": "string", "total": "string", "percent_used": 0 } } } ] }""" responses.add(responses.GET, discovery_url, body=discovery_response_body, status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.list_environments() url_str = "{0}?version=2016-11-07".format(discovery_url) assert responses.calls[0].request.url == url_str assert responses.calls[0].response.text == discovery_response_body assert len(responses.calls) == 1 @responses.activate def test_get_environment(): discovery_url = urljoin(base_discovery_url, 'environments/envid') responses.add(responses.GET, discovery_url, body="{\"resulting_key\": true}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.get_environment(environment_id='envid') url_str = "{0}?version=2016-11-07".format(discovery_url) assert responses.calls[0].request.url == url_str assert len(responses.calls) == 1 @responses.activate def test_create_environment(): discovery_url = urljoin(base_discovery_url, 'environments') responses.add(responses.POST, discovery_url, body="{\"resulting_key\": true}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.create_environment(name="my name", description="my description") assert len(responses.calls) == 1 @responses.activate def test_update_environment(): discovery_url = urljoin(base_discovery_url, 'environments/envid') responses.add(responses.PUT, discovery_url, body="{\"resulting_key\": true}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.update_environment('envid', name="hello", description="new") assert len(responses.calls) == 1 @responses.activate def test_delete_environment(): discovery_url = urljoin(base_discovery_url, 'environments/envid') responses.add(responses.DELETE, discovery_url, body="{\"resulting_key\": true}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.delete_environment('envid') assert len(responses.calls) == 1 @responses.activate def test_collections(): discovery_url = urljoin(base_discovery_url, 'environments/envid/collections') responses.add(responses.GET, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.list_collections('envid') called_url = urlparse(responses.calls[0].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc assert called_url.path == test_url.path assert len(responses.calls) == 1 @responses.activate def test_collection(): discovery_url = urljoin(base_discovery_url, 'environments/envid/collections/collid') discovery_fields = urljoin(base_discovery_url, 'environments/envid/collections/collid/fields') config_url = urljoin(base_discovery_url, 'environments/envid/configurations') responses.add(responses.GET, config_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') responses.add(responses.GET, discovery_fields, body="{\"body\": \"hello\"}", status=200, content_type='application/json') responses.add(responses.GET, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') responses.add(responses.DELETE, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') responses.add(responses.POST, urljoin(base_discovery_url, 'environments/envid/collections'), body="{\"body\": \"create\"}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.create_collection(environment_id='envid', name="name", description="", language="", configuration_id='confid') discovery.create_collection(environment_id='envid', name="name", language="es", description="") discovery.get_collection('envid', 'collid') called_url = urlparse(responses.calls[2].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc assert called_url.path == test_url.path discovery.delete_collection(environment_id='envid', collection_id='collid') discovery.list_collection_fields(environment_id='envid', collection_id='collid') assert len(responses.calls) == 5 @responses.activate def test_query(): discovery_url = urljoin(base_discovery_url, 'environments/envid/collections/collid/query') responses.add(responses.GET, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.query('envid', 'collid', {'count': 10}) called_url = urlparse(responses.calls[0].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc assert called_url.path == test_url.path assert len(responses.calls) == 1 @responses.activate def test_query_relations(): discovery_url = urljoin( base_discovery_url, 'environments/envid/collections/collid/query_relations') responses.add( responses.POST, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1( '2016-11-07', username='username', password='password') discovery.query_relations('envid', 'collid', count=10) called_url = urlparse(responses.calls[0].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc assert called_url.path == test_url.path assert len(responses.calls) == 1 @responses.activate def test_query_entities(): discovery_url = urljoin( base_discovery_url, 'environments/envid/collections/collid/query_entities') responses.add( responses.POST, discovery_url, body="{\"body\": \"hello\"}", status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1( '2016-11-07', username='username', password='password') discovery.query_entities('envid', 'collid', {'count': 10}) called_url = urlparse(responses.calls[0].request.url) test_url = urlparse(discovery_url) assert called_url.netloc == test_url.netloc assert called_url.path == test_url.path assert len(responses.calls) == 1 @responses.activate def test_configs(): discovery_url = urljoin(base_discovery_url, 'environments/envid/configurations') discovery_config_id = urljoin(base_discovery_url, 'environments/envid/configurations/confid') results = {"configurations": [{"name": "Default Configuration", "configuration_id": "confid"}]} responses.add(responses.GET, discovery_url, body=json.dumps(results), status=200, content_type='application/json') responses.add(responses.GET, discovery_config_id, body=json.dumps(results['configurations'][0]), status=200, content_type='application/json') responses.add(responses.POST, discovery_url, body=json.dumps(results['configurations'][0]), status=200, content_type='application/json') responses.add(responses.PUT, discovery_config_id, body=json.dumps(results['configurations'][0]), status=200, content_type='application/json') responses.add(responses.DELETE, discovery_config_id, body=json.dumps({'deleted': 'bogus -- ok'}), status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') discovery.list_configurations(environment_id='envid') discovery.get_configuration(environment_id='envid', configuration_id='confid') assert len(responses.calls) == 2 discovery.create_configuration(environment_id='envid', name='my name') discovery.update_configuration(environment_id='envid', configuration_id='confid', name='my new name') discovery.delete_configuration(environment_id='envid', configuration_id='confid') assert len(responses.calls) == 5 @responses.activate def test_document(): discovery_url = urljoin(base_discovery_url, 'environments/envid/preview') config_url = urljoin(base_discovery_url, 'environments/envid/configurations') responses.add(responses.POST, discovery_url, body="{\"configurations\": []}", status=200, content_type='application/json') responses.add(responses.GET, config_url, body=json.dumps({"configurations": [{"name": "Default Configuration", "configuration_id": "confid"}]}), status=200, content_type='application/json') discovery = watson_developer_cloud.DiscoveryV1('2016-11-07', username='username', password='password') html_path = os.path.join(os.getcwd(), 'resources', 'simple.html') with open(html_path) as fileinfo: conf_id = discovery.test_configuration_in_environment(environment_id='envid', configuration_id='bogus', file=fileinfo) assert conf_id is not None conf_id = discovery.test_configuration_in_environment(environment_id='envid', file=fileinfo) assert conf_id is not None assert len(responses.calls) == 2 add_doc_url = urljoin(base_discovery_url, 'environments/envid/collections/collid/documents') doc_id_path = 'environments/envid/collections/collid/documents/docid' update_doc_url = urljoin(base_discovery_url, doc_id_path) del_doc_url = urljoin(base_discovery_url, doc_id_path) responses.add(responses.POST, add_doc_url, body="{\"body\": []}", status=200, content_type='application/json') doc_status = { "document_id": "45556e23-f2b1-449d-8f27-489b514000ff", "configuration_id": "2e079259-7dd2-40a9-998f-3e716f5a7b88", "created" : "2016-06-16T10:56:54.957Z", "updated" : "2017-05-16T13:56:54.957Z", "status": "available", "status_description": "Document is successfully ingested and indexed with no warnings", "notices": [] } responses.add(responses.GET, del_doc_url, body=json.dumps(doc_status), status=200, content_type='application/json') responses.add(responses.POST, update_doc_url, body="{\"body\": []}", status=200, content_type='application/json') responses.add(responses.DELETE, del_doc_url, body="{\"body\": []}", status=200, content_type='application/json') html_path = os.path.join(os.getcwd(), 'resources', 'simple.html') with open(html_path) as fileinfo: conf_id = discovery.add_document(environment_id='envid', collection_id='collid', file=fileinfo) assert conf_id is not None assert len(responses.calls) == 3 discovery.get_document_status(environment_id='envid', collection_id='collid', document_id='docid') assert len(responses.calls) == 4 discovery.update_document(environment_id='envid', collection_id='collid', document_id='docid') assert len(responses.calls) == 5 discovery.update_document(environment_id='envid', collection_id='collid', document_id='docid') assert len(responses.calls) == 6 discovery.delete_document(environment_id='envid', collection_id='collid', document_id='docid') assert len(responses.calls) == 7 conf_id = discovery.add_document(environment_id='envid', collection_id='collid', file=io.StringIO(u'my string of file'), filename='file.txt') assert len(responses.calls) == 8 conf_id = discovery.add_document(environment_id='envid', collection_id='collid', file=io.StringIO(u'<h1>my string of file</h1>'), filename='file.html', file_content_type='application/html') assert len(responses.calls) == 9 conf_id = discovery.add_document(environment_id='envid', collection_id='collid', file=io.StringIO(u'<h1>my string of file</h1>'), filename='file.html', file_content_type='application/html', metadata=io.StringIO(u'{"stuff": "woot!"}')) assert len(responses.calls) == 10 @responses.activate def test_delete_all_training_data(): training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' endpoint = training_endpoint.format(environment_id, collection_id) url = '{0}{1}'.format(base_url, endpoint) responses.add(responses.DELETE, url, status=204) service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.delete_all_training_data(environment_id=environment_id, collection_id=collection_id) assert response is None @responses.activate def test_list_training_data(): training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' endpoint = training_endpoint.format(environment_id, collection_id) url = '{0}{1}'.format(base_url, endpoint) mock_response = { "environment_id": "string", "collection_id": "string", "queries": [ { "query_id": "string", "natural_language_query": "string", "filter": "string", "examples": [ { "document_id": "string", "cross_reference": "string", "relevance": 0 } ] } ] } responses.add(responses.GET, url, body=json.dumps(mock_response), status=200, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.list_training_data(environment_id=environment_id, collection_id=collection_id) assert response == mock_response # Verify that response can be converted to a TrainingDataSet TrainingDataSet._from_dict(response) @responses.activate def test_add_training_data(): training_endpoint = '/v1/environments/{0}/collections/{1}/training_data' endpoint = training_endpoint.format(environment_id, collection_id) url = '{0}{1}'.format(base_url, endpoint) natural_language_query = "why is the sky blue" filter = "text:meteorology" examples = [ { "document_id": "54f95ac0-3e4f-4756-bea6-7a67b2713c81", "relevance": 1 }, { "document_id": "01bcca32-7300-4c9f-8d32-33ed7ea643da", "cross_reference": "my_id_field:1463", "relevance": 5 } ] mock_response = { "query_id": "string", "natural_language_query": "string", "filter": "string", "examples": [ { "document_id": "string", "cross_reference": "string", "relevance": 0 } ] } responses.add(responses.POST, url, body=json.dumps(mock_response), status=200, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.add_training_data( environment_id=environment_id, collection_id=collection_id, natural_language_query=natural_language_query, filter=filter, examples=examples) assert response == mock_response # Verify that response can be converted to a TrainingQuery TrainingQuery._from_dict(response) @responses.activate def test_delete_training_data(): training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}' query_id = 'queryid' endpoint = training_endpoint.format( environment_id, collection_id, query_id) url = '{0}{1}'.format(base_url, endpoint) responses.add(responses.DELETE, url, status=204) service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.delete_training_data(environment_id=environment_id, collection_id=collection_id, query_id=query_id) assert response is None @responses.activate def test_get_training_data(): training_endpoint = '/v1/environments/{0}/collections/{1}/training_data/{2}' query_id = 'queryid' endpoint = training_endpoint.format( environment_id, collection_id, query_id) url = '{0}{1}'.format(base_url, endpoint) mock_response = { "query_id": "string", "natural_language_query": "string", "filter": "string", "examples": [ { "document_id": "string", "cross_reference": "string", "relevance": 0 } ] } responses.add(responses.GET, url, body=json.dumps(mock_response), status=200, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.get_training_data(environment_id=environment_id, collection_id=collection_id, query_id=query_id) assert response == mock_response # Verify that response can be converted to a TrainingQuery TrainingQuery._from_dict(response) @responses.activate def test_create_training_example(): examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ '/{2}/examples' query_id = 'queryid' endpoint = examples_endpoint.format( environment_id, collection_id, query_id) url = '{0}{1}'.format(base_url, endpoint) document_id = "string" relevance = 0 cross_reference = "string" mock_response = { "document_id": "string", "cross_reference": "string", "relevance": 0 } responses.add(responses.POST, url, body=json.dumps(mock_response), status=201, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.create_training_example( environment_id=environment_id, collection_id=collection_id, query_id=query_id, document_id=document_id, relevance=relevance, cross_reference=cross_reference) assert response == mock_response # Verify that response can be converted to a TrainingExample TrainingExample._from_dict(response) @responses.activate def test_delete_training_example(): examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ '/{2}/examples/{3}' query_id = 'queryid' example_id = 'exampleid' endpoint = examples_endpoint.format(environment_id, collection_id, query_id, example_id) url = '{0}{1}'.format(base_url, endpoint) responses.add(responses.DELETE, url, status=204) service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.delete_training_example( environment_id=environment_id, collection_id=collection_id, query_id=query_id, example_id=example_id) assert response is None @responses.activate def test_get_training_example(): examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ '/{2}/examples/{3}' query_id = 'queryid' example_id = 'exampleid' endpoint = examples_endpoint.format(environment_id, collection_id, query_id, example_id) url = '{0}{1}'.format(base_url, endpoint) mock_response = { "document_id": "string", "cross_reference": "string", "relevance": 0 } responses.add(responses.GET, url, body=json.dumps(mock_response), status=200, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.get_training_example( environment_id=environment_id, collection_id=collection_id, query_id=query_id, example_id=example_id) assert response == mock_response # Verify that response can be converted to a TrainingExample TrainingExample._from_dict(response) @responses.activate def test_update_training_example(): examples_endpoint = '/v1/environments/{0}/collections/{1}/training_data' + \ '/{2}/examples/{3}' query_id = 'queryid' example_id = 'exampleid' endpoint = examples_endpoint.format(environment_id, collection_id, query_id, example_id) url = '{0}{1}'.format(base_url, endpoint) relevance = 0 cross_reference = "string" mock_response = { "document_id": "string", "cross_reference": "string", "relevance": 0 } responses.add(responses.PUT, url, body=json.dumps(mock_response), status=200, content_type='application/json') service = watson_developer_cloud.DiscoveryV1(version, username='username', password='password') response = service.update_training_example( environment_id=environment_id, collection_id=collection_id, query_id=query_id, example_id=example_id, relevance=relevance, cross_reference=cross_reference) assert response == mock_response # Verify that response can be converted to a TrainingExample TrainingExample._from_dict(response) @responses.activate def test_expansions(): url = 'https://gateway.watsonplatform.net/discovery/api/v1/environments/envid/collections/colid/expansions' responses.add( responses.GET, url, body='{"expansions": "results"}', status=200, content_type='application_json') responses.add( responses.DELETE, url, body='{"description": "success" }', status=200, content_type='application_json') responses.add( responses.POST, url, body='{"expansions": "success" }', status=200, content_type='application_json') discovery = watson_developer_cloud.DiscoveryV1('2017-11-07', username="username", password="password") discovery.list_expansions('envid', 'colid') assert responses.calls[0].response.json() == {"expansions": "results"} discovery.create_expansions('envid', 'colid', [{"input_terms": "dumb", "expanded_terms": "dumb2"}]) assert responses.calls[1].response.json() == {"expansions": "success"} discovery.delete_expansions('envid', 'colid') assert responses.calls[2].response.json() == {"description": "success"} assert len(responses.calls) == 3
python
import glob import datetime import string import pandas as pd current_year = datetime.datetime.today().year def age_binner(age): if age < 5: return "04 and under" elif 5 <= age <= 9: return "05 to 09 years" elif 10 <= age <= 14: return "10 to 14 years" elif 15 <= age <= 19: return "15 to 19 years" elif 20 <= age <= 24: return "20 to 24 years" elif 25 <= age <= 29: return "25 to 29 years" elif 30 <= age <= 34: return "30 to 34 years" elif 35 <= age <= 39: return "35 to 39 years" elif 40 <= age <= 44: return "40 to 44 years" elif 45 <= age <= 49: return "45 to 49 years" elif 50 <= age <= 54: return "50 to 54 years" elif 55 <= age <= 59: return "55 to 59 years" elif 60 <= age <= 64: return "60 to 64 years" elif 65 <= age <= 69: return "65 to 69 years" elif 70 <= age <= 74: return "70 to 74 years" elif 75 <= age <=79: return "75 to 79 years" elif 80 <= age <=84: return "80 to 84 years" else: return "85 years and over" def get_data(): d = {} columns = ['state', 'sex', 'year', 'name', 'occurences'] for file in glob.glob('namesbystate/*.TXT'): print file state = file.replace(".TXT","").replace('namesbystate/','') df = pd.read_csv(file, names=columns, header=None) df['current_age'] = current_year - df['year'] df['age_bin'] = df['current_age'].apply(age_binner) df['name'] = df['name'].apply(string.lower) d[state] = df[['age_bin', 'name', 'sex','occurrences']].groupby(['age_bin', 'name', 'sex']).sum() by_state = pd.Panel.from_dict(d) total = by_state.minor_xs('occurrences').sum(1) return by_state, total class DiscreteDistribution(object): def __init__(self, prior): self.posterior = prior self._prior = prior self.n = 0 def update(self, p): #P(age_bin) = P(age_bin | 'Sara')*P('Sara' | Alive)*P(Alive) + .. # boils down to the below formula. self.posterior = p.fillna(0) + self.posterior return def normalize_vector(v): return v/v.sum() def name_distribution(name, data, prior): try: return normalize_vector(data.ix[name,:, :]['occurrences'].sum(level='age_bin').reindex(prior.index)) except KeyError: return prior
python
class MethodsManager: """My Methods Manager """ def __init__(self): self.heap = {} def insert(self, elems): """Insert for main Args: elems (list): Tokens form user input """ if elems[1][0].isupper(): name = elems[1] # I have super? if ":" in elems[2:]: if self.have(elems[3]): super_class = elems[3] if len(elems) > 3: methods = elems[4:] else: methods = [] self.insert_simple(name, super_class, *methods) str_methods = ' '.join(map(str, methods)) print(f"Se creo {name} con sus métodos {str_methods}\n") else: print(f"Error: {elems[3]} no es una clase declarada\n") # I dont have super else: super_class = None if len(elems)>1: methods = elems[2:] else: methods=[] self.insert_simple(name, super_class, *methods) str_methods = ' '.join(map(str, methods)) print(f"Se creo {name} con sus métodos {str_methods}\n") else: print("Error: El nombre de las clases debe ser en mayúsculas\n") def insert_simple(self, name, super_class, *kwargs): """Format my input Args: name (string): Name of my Class super_class (string): Name of my Super Class """ elem = {"super":super_class, "methods":[*kwargs]} self.heap[name]=elem def have(self, name): """To know if i have a class with this name Args: name (string): Name of my Class Returns: bool: True if name is in my heap else False """ try: self.heap[name] return True except: return False def search_methods(self, name): """Description for main Args: name (string): Name of my Class Returns: string: String of all of methods for my Class """ if self.have(name): base = self.heap[name] ancestors=[name] # Search for ancestors while base["super"]!=None: ancestors.append(base["super"]) base=self.heap[base["super"]] # Older first ancestors=ancestors[::-1] methods_with_ancestor={} # For ancestor insert method for ancestor in ancestors: methods=self.heap[ancestor]["methods"] for method in methods: methods_with_ancestor[method]=ancestor # Pretty print response = "" for method in methods_with_ancestor: response = response + f"{method} -> {methods_with_ancestor[method]} :: {method}\n" return response else: return None def __str__(self): return str(self.heap)
python
from dataclasses import asdict from dataclasses import dataclass from dataclasses import field from typing import List from unittest import mock from unittest.case import TestCase from lxml.etree import Element from lxml.etree import QName from tests.fixtures.books import BookForm from tests.fixtures.books import Books from xsdata.exceptions import ParserError from xsdata.formats.dataclass.parsers.config import ParserConfig from xsdata.formats.dataclass.parsers.nodes import PrimitiveNode from xsdata.formats.dataclass.parsers.nodes import RootNode from xsdata.formats.dataclass.parsers.nodes import SkipNode from xsdata.formats.dataclass.parsers.xml import XmlParser from xsdata.models.enums import EventType class XmlParserTests(TestCase): def setUp(self): super(XmlParserTests, self).setUp() self.parser = XmlParser() self.parser.index = 10 self.parser.objects = [(QName(x), x) for x in "abcde"] def test_parse_context_raises_exception(self): with self.assertRaises(ParserError) as cm: self.parser.parse_context([], Books) self.assertEqual("Failed to create target class `Books`", str(cm.exception)) def test_add_namespace(self): self.parser.add_namespace(("foo", "bar")) self.assertEqual({"foo": "bar"}, self.parser.namespaces.ns_map) @mock.patch.object(RootNode, "next_node") @mock.patch.object(XmlParser, "emit_event") def test_queue(self, mock_emit_event, mock_next_node): primitive_node = PrimitiveNode(position=1, types=[int]) mock_next_node.return_value = primitive_node element = Element("{urn:books}books") config = ParserConfig() root_queue_item = RootNode( position=0, meta=self.parser.context.build(Books), default=None, config=config, ) objects = list() queue = list() queue.append(root_queue_item) self.parser.queue(element, queue, objects) self.assertEqual(2, len(queue)) self.assertEqual(root_queue_item, queue[0]) self.assertEqual(primitive_node, queue[1]) mock_emit_event.assert_called_once_with( EventType.START, element.tag, item=root_queue_item, element=element ) @mock.patch.object(XmlParser, "emit_event") @mock.patch.object(PrimitiveNode, "parse_element", return_value=("q", "result")) def test_dequeue(self, mock_parse_element, mock_emit_event): element = Element("author", nsmap={"prefix": "uri"}) element.text = "foobar" objects = list() queue = list() queue.append(PrimitiveNode(position=0, types=[str], default=None)) result = self.parser.dequeue(element, queue, objects) self.assertEqual("result", result) self.assertEqual(0, len(queue)) self.assertEqual(("q", result), objects[-1]) mock_parse_element.assert_called_once_with(element, objects) mock_emit_event.assert_called_once_with( EventType.END, element.tag, obj=result, element=element ) @mock.patch.object(XmlParser, "emit_event") def test_dequeue_with_none_qname(self, mock_emit_event): element = Element("author", nsmap={"prefix": "uri"}) element.text = "foobar" objects = list() queue = list() queue.append(SkipNode(position=0)) result = self.parser.dequeue(element, queue, objects) self.assertIsNone(result) self.assertEqual(0, len(queue)) self.assertEqual(0, len(objects)) self.assertEqual(0, mock_emit_event.call_count) def test_emit_event(self): mock_func = mock.Mock() self.parser.foo_bar_element = mock_func self.parser.emit_event("foo", "{tns}barElement", a=1, b=2) mock_func.assert_called_once_with(a=1, b=2) self.assertEqual({"{tns}barElement": "bar_element"}, self.parser.event_names) class XmlParserIntegrationTest(TestCase): def setUp(self): super(XmlParserIntegrationTest, self).setUp() self.books = Books( book=[ BookForm( id="bk001", author="Hightower, Kim", title="The First Book", genre="Fiction", price=44.95, pub_date="2000-10-01", review="An amazing story of nothing.", ), BookForm( id="bk002", author="Nagata, Suanne", title="Becoming Somebody", genre="Biography", review="A masterpiece of the fine art of gossiping.", ), ] ) def test_parse(self): xml = ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<brk:books xmlns:brk="urn:books">\n' ' <book id="bk001">\n' " <author>Hightower, Kim</author>\n" " <title>The First Book</title>\n" " <genre>Fiction</genre>\n" " <price>44.95</price>\n" " <pub_date>2000-10-01</pub_date>\n" " <review>An amazing story of nothing.</review>\n" " </book>\n" ' <book id="bk002">\n' " <author>Nagata, Suanne</author>\n" " <title>Becoming Somebody</title>\n" " <genre>Biography</genre>\n" " <review>A masterpiece of the fine art of gossiping.</review>\n" " </book>\n" "</brk:books>\n" ) parser = XmlParser() actual = parser.from_string(xml, Books) self.assertEqual(self.books, actual) self.assertEqual({"brk": "urn:books"}, parser.namespaces.ns_map) def test_parse_with_fail_on_unknown_properties_false(self): xml = ( '<?xml version="1.0" encoding="UTF-8"?>\n' "<books>\n" ' <book id="bk001">\n' " <author>Hightower, Kim</author>\n" " <title>The First Book</title>\n" " </book>\n" ' <book id="bk002">\n' " <author>Nagata, Suanne</author>\n" " <title>Becoming Somebody</title>\n" " </book>\n" "</books>\n" ) @dataclass class Book: author: str = field(metadata=dict(type="Element")) @dataclass class MyBooks: class Meta: name = "books" book: List[Book] = field( default_factory=list, metadata=dict(type="Element") ) config = ParserConfig(fail_on_unknown_properties=False) parser = XmlParser(config=config) actual = parser.from_string(xml, MyBooks) expected = { "book": [{"author": "Hightower, Kim"}, {"author": "Nagata, Suanne"}] } self.assertEqual(expected, asdict(actual))
python
# Morando Nicolò import pandas as pd file_path = 'filepath.csv' data = pd.read_csv(file_path) data.describe()
python
# 0611.py """ ref: https://gist.github.com/jsheedy/3913ab49d344fac4d02bcc887ba4277d ref: http://felix.abecassis.me/2011/09/opencv-morphological-skeleton/ """ import cv2 import numpy as np #1 src = cv2.imread('./data/T.jpg', cv2.IMREAD_GRAYSCALE) ##src = cv2.imread('alphabet.bmp', cv2.IMREAD_GRAYSCALE) ##src = cv2.bitwise_not(src) ret, A = cv2.threshold(src, 128, 255, cv2.THRESH_BINARY) skel_dst = np.zeros(src.shape, np.uint8) #2 shape1=cv2.MORPH_CROSS shape2=cv2.MORPH_RECT B= cv2.getStructuringElement(shape=shape1, ksize=(3,3)) done = True while done: erode = cv2.erode(A, B) ## opening = cv2.dilate(erode,B) opening = cv2.morphologyEx(erode, cv2.MORPH_OPEN, B) tmp = cv2.subtract(erode, opening) # cv2.absdiff(erode, opening) skel_dst = cv2.bitwise_or(skel_dst, tmp) A = erode.copy() done = cv2.countNonZero(A) != 0 ## cv2.imshow('opening', opening) ## cv2.imshow('tmp', tmp) ## cv2.imshow('skel_dst', skel_dst) ## cv2.waitKey() cv2.imshow('src', src) cv2.imshow('skel_dst', skel_dst) cv2.waitKey() cv2.destroyAllWindows()
python
import dicom import argparse import pylab import os import tqdm parser = argparse.ArgumentParser(description="由dicom格式文件生成png图片") parser.add_argument("origin", help="文件源路径(文件或文件夹)") parser.add_argument("--output", "-o", help="输出路径", default="./") argv = parser.parse_args() def get_path_filelist(path): files = os.listdir(path) file_list = [] for f in files: if os.path.isfile(path + '/' + f): if '.dcm' in f: file_list.append(path + '/' + f) return file_list if os.path.isdir(argv.origin): filelist = get_path_filelist(argv.origin) else: if '.dcm' not in argv.origin: exit("Uncorrect origin file.") filelist = [argv.origin] for file in tqdm.tqdm(filelist): dcm = dicom.read_file(file) filename = os.path.basename(file).replace(".dcm", "") pylab.imsave(argv.output + '/' + filename + '.png', dcm.pixel_array, cmap=pylab.cm.bone)
python
import platform from selenium.webdriver import Chrome, DesiredCapabilities from selenium.webdriver.chrome.options import Options from tests.util.web.platform.browser.generic import ManagedBrowser class ChromeManagedBrowser( ManagedBrowser ): """ ChromeManagedBrowser provides a Chrome edition of ManagedTestBrowser for use in Selenium based tests. """ def __init__(self, url: str): """ Initializes the ChromeManagedBrowser to anticipate sessions targeting the provided URL. :param url: The URL to target when establishing new sessions. """ super().__init__( url ) self.platform = "chrome" def __str__(self): return str(self.__repr__()) def __repr__(self): return str( { 'url': self.url, 'platform': self.platform, 'headless': self.headless, 'remote_browser': f"{self.remote_browser}:" f"{self.remote_browser_port}", 'session_active': self.session_active(), } ) def _get_chrome_capabilities(self) -> DesiredCapabilities: """ Provides a DesiredCapabilities object suitable for a Chrome webdriver session. Specifically: - Permit insecure SSL certs, such as what might be used in dev :return: A DesiredCapabilities object """ capabilities = DesiredCapabilities.CHROME.copy() capabilities['acceptSslCerts'] = True capabilities['acceptInsecureCerts'] = True return capabilities def _get_chrome_options(self) -> Options: """ Provides an Options object suitable for initializing a Chrome webdriver session. Specifically: - Disable notifications - Do not check for default browser status - Download permissions and preferences - Safe browsing OFF - Headless per ManagedTestBrowser setting :return: An Options object """ opts = Options() # Options for user interaction and session tracing opts.add_argument("--enable-logging=stderr --v=1") opts.add_argument("--disable-notifications") opts.add_argument("no-default-browser-check") # Options affecting memory and storage opts.add_argument("--no-sandbox") opts.add_argument("--allow-no-sandbox-job") opts.add_argument("--disable-dev-shm-usage") opts.add_argument("download.prompt_for_download=False") opts.add_argument('download.default_directory="/tmp/"') # Options permitting local files to be read opts.add_argument("safebrowsing.enabled=False") # Options to reduce system hardware requirements opts.add_argument("--disable-gpu") if self.remote_browser: if platform.system() in ["Windows"]: opts.add_experimental_option( "debuggerAddress", f"localhost:{self.remote_browser_port}" ) else: opts.add_argument( f"--remote-debugging-port={self.remote_browser_port}" ) if self.headless or self.remote_browser: opts.add_argument("--headless") if self.headless: opts.add_argument("--window-size=1600,1600") return opts def get_new_session(self): """ Overrides _get_browser_session to provide an initialized Chrome webdriver object ready for a new session. :return: A Chrome webdriver object """ return Chrome( options=self._get_chrome_options(), desired_capabilities=self._get_chrome_capabilities(), ) def get_new_browser(self, url, remote=False): """ Overrides get_new_session to provide a Chrome session. :return: A Chrome webdriver object """ browser = ChromeManagedBrowser(url) browser.remote_browser = remote return browser
python
from __future__ import absolute_import # import models into model package from .v1_persistent_volume import V1PersistentVolume from .v1_tcp_socket_action import V1TCPSocketAction from .v1_resource_quota_status import V1ResourceQuotaStatus from .v1_container_state_terminated import V1ContainerStateTerminated from .v1_replication_controller_list import V1ReplicationControllerList from .v1_capability import V1Capability from .v1_pod import V1Pod from .v1_event import V1Event from .v1_node_daemon_endpoints import V1NodeDaemonEndpoints from .v1_host_path_volume_source import V1HostPathVolumeSource from .v1_config_map_key_selector import V1ConfigMapKeySelector from .v1_volume import V1Volume from .v1_container_state_running import V1ContainerStateRunning from .v1_delete_options import V1DeleteOptions from .v1_pod_template_spec import V1PodTemplateSpec from .v1_secret_list import V1SecretList from .v1_nfs_volume_source import V1NFSVolumeSource from .v1_ceph_fs_volume_source import V1CephFSVolumeSource from .v1_capabilities import V1Capabilities from .v1_component_condition import V1ComponentCondition from .unversioned_status import UnversionedStatus from .v1_service_status import V1ServiceStatus from .unversioned_status_details import UnversionedStatusDetails from .v1_secret_volume_source import V1SecretVolumeSource from .v1_resource_requirements import V1ResourceRequirements from .v1_persistent_volume_claim import V1PersistentVolumeClaim from .unversioned_patch import UnversionedPatch from .v1_namespace_status import V1NamespaceStatus from .v1_persistent_volume_access_mode import V1PersistentVolumeAccessMode from .v1_resource_quota_spec import V1ResourceQuotaSpec from .v1_persistent_volume_spec import V1PersistentVolumeSpec from .v1_exec_action import V1ExecAction from .v1_persistent_volume_claim_volume_source import V1PersistentVolumeClaimVolumeSource from .v1_service_spec import V1ServiceSpec from .v1_service_list import V1ServiceList from .v1_persistent_volume_list import V1PersistentVolumeList from .v1_container_status import V1ContainerStatus from .v1_handler import V1Handler from .v1_node_address import V1NodeAddress from .v1_fc_volume_source import V1FCVolumeSource from .v1_endpoint_port import V1EndpointPort from .v1_downward_api_volume_file import V1DownwardAPIVolumeFile from .v1_endpoint_subset import V1EndpointSubset from .v1_limit_range_list import V1LimitRangeList from .v1_container import V1Container from .v1_pod_spec import V1PodSpec from .v1_flocker_volume_source import V1FlockerVolumeSource from .v1_persistent_volume_status import V1PersistentVolumeStatus from .v1_rbd_volume_source import V1RBDVolumeSource from .v1_load_balancer_ingress import V1LoadBalancerIngress from .v1_security_context import V1SecurityContext from .v1_service_port import V1ServicePort from .v1_namespace import V1Namespace from .v1_gce_persistent_disk_volume_source import V1GCEPersistentDiskVolumeSource from .v1_endpoints_list import V1EndpointsList from .v1_node_list import V1NodeList from .v1_event_source import V1EventSource from .v1_env_var_source import V1EnvVarSource from .unversioned_list_meta import UnversionedListMeta from .v1_limit_range_spec import V1LimitRangeSpec from .v1_persistent_volume_claim_spec import V1PersistentVolumeClaimSpec from .v1_replication_controller import V1ReplicationController from .v1_namespace_list import V1NamespaceList from .integer import Integer from .v1_volume_mount import V1VolumeMount from .v1_node_status import V1NodeStatus from .v1_replication_controller_status import V1ReplicationControllerStatus from .v1_pod_condition import V1PodCondition from .v1_node_condition import V1NodeCondition from .v1_pod_security_context import V1PodSecurityContext from .v1_service_account import V1ServiceAccount from .v1_pod_template import V1PodTemplate from .v1_pod_list import V1PodList from .v1_empty_dir_volume_source import V1EmptyDirVolumeSource from .v1_node_spec import V1NodeSpec from .v1_http_get_action import V1HTTPGetAction from .v1_resource_quota_list import V1ResourceQuotaList from .v1_daemon_endpoint import V1DaemonEndpoint from .v1_service_account_list import V1ServiceAccountList from .v1_probe import V1Probe from .v1_namespace_spec import V1NamespaceSpec from .v1_iscsi_volume_source import V1ISCSIVolumeSource from .v1_event_list import V1EventList from .v1_load_balancer_status import V1LoadBalancerStatus from .v1_persistent_volume_claim_list import V1PersistentVolumeClaimList from .v1_component_status import V1ComponentStatus from .v1_git_repo_volume_source import V1GitRepoVolumeSource from .v1_object_meta import V1ObjectMeta from .v1_secret_key_selector import V1SecretKeySelector from .v1_local_object_reference import V1LocalObjectReference from .v1_flex_volume_source import V1FlexVolumeSource from .v1_container_port import V1ContainerPort from .v1_secret import V1Secret from .v1_downward_api_volume_source import V1DownwardAPIVolumeSource from .v1_container_state import V1ContainerState from .v1_endpoints import V1Endpoints from .v1_cinder_volume_source import V1CinderVolumeSource from .v1_pod_status import V1PodStatus from .v1_se_linux_options import V1SELinuxOptions from .v1_service import V1Service from .v1_object_reference import V1ObjectReference from .v1_object_field_selector import V1ObjectFieldSelector from .v1_component_status_list import V1ComponentStatusList from .v1_lifecycle import V1Lifecycle from .v1_node_system_info import V1NodeSystemInfo from .json_watch_event import JsonWatchEvent from .v1_endpoint_address import V1EndpointAddress from .v1_aws_elastic_block_store_volume_source import V1AWSElasticBlockStoreVolumeSource from .v1_binding import V1Binding from .v1_node import V1Node from .v1_resource_quota import V1ResourceQuota from .v1_env_var import V1EnvVar from .unversioned_status_cause import UnversionedStatusCause from .v1_replication_controller_spec import V1ReplicationControllerSpec from .v1_container_state_waiting import V1ContainerStateWaiting from .v1_pod_template_list import V1PodTemplateList from .v1_limit_range_item import V1LimitRangeItem from .v1_finalizer_name import V1FinalizerName from .v1_limit_range import V1LimitRange from .v1_glusterfs_volume_source import V1GlusterfsVolumeSource from .v1_container_image import V1ContainerImage from .v1_persistent_volume_claim_status import V1PersistentVolumeClaimStatus
python
from .logger import get_logger
python
from django.test import TestCase from django.core.management import call_command from databuilder.tests import utils from databuilder import models # noinspection SpellCheckingInspection sample_name = 'Bob Bobski' class TestTask1(TestCase): def setUp(self): self.model_name = models.SampleTest.__name__.lower() models.SampleTest.objects.create(name=sample_name) def test_dump(self): total_records = models.SampleTest.objects.all().count() print(f'Your model has {total_records} dummy record.') # noinspection SpellCheckingInspection with utils.capture(call_command, 'toandroid') as output: self.assertIn(self.model_name, output) # CREATE Table statement self.assertIn(sample_name, output) # INSERT Statement
python
import argparse import os import xml.etree.ElementTree as ET import sys import configparser import os from os import path import codecs import re parser = argparse.ArgumentParser() parser.add_argument("-raw_path", default='../raw_data/xml/schaeftlarn') parser.add_argument("-save_path", default='../raw_data/stories/la') parser.add_argument('-log_file', default='../logs/converting.log') parser.add_argument('-verbose', default=False, type=lambda x: (str(x).lower() == 'true')) args = parser.parse_args() # xml/html tag regex TAG_RE = re.compile(r'<[^>]+>') def parse(path_to_file): tree = ET.parse(path_to_file) root = tree.getroot() identifier = '' for div in root.iter("{http://www.tei-c.org/ns/1.0}div"): if 'n' in div.attrib and 'type' in div.attrib: if 'textpart' != div.get('type'): identifier = div.get('n') regest = '' for front in root.iter('{http://www.tei-c.org/ns/1.0}front'): if '{http://www.w3.org/XML/1998/namespace}lang' in front.attrib: # excluding non-german regests if 'deu' == front.get('{http://www.w3.org/XML/1998/namespace}lang'): for div in front.iter('{http://www.tei-c.org/ns/1.0}div'): if 'subtype' in div.attrib: if 'regest' == div.get('subtype'): for p in div.iter('{http://www.tei-c.org/ns/1.0}p'): try: regest = regest + p.text.replace(' ','').replace('\n','') except: regest = regest text = '' for body in root.iter('{http://www.tei-c.org/ns/1.0}body'): for div in body.iter('{http://www.tei-c.org/ns/1.0}div'): if 'type' in div.attrib: if 'textpart' == div.get('type'): for p in div.iter('{http://www.tei-c.org/ns/1.0}p'): # get the raw text because it includes the punctuation marks # punctuation marks are crucial for the translation quality raw_text = str(ET.tostring(p, encoding="unicode", method="xml")) # remove xml tags raw_text = TAG_RE.sub('', raw_text) raw_text = raw_text.replace(' ','').replace('\n','') text += raw_text + ' ' return identifier, regest, text def write_log_file(no_id_found, no_regest_found, no_text_found): log_path = os.path.abspath(args.log_file) print('writing the log file to: ',log_path) file = codecs.open(log_path, 'w', 'utf-8') file.write('no identifier:\n') for path in no_id_found: file.write('\n'+path) file.write('no regest:\n') for path in no_regest_found: file.write('\n'+path) file.write('no text:\n') for path in no_text_found: file.write('\n'+path) file.close() def get_files(args): path = os.path.abspath(args.raw_path) files = [] # r=root, d=directories, f = files print('start to load all formulae from: '+path) for r, d, f in os.walk(path): for file in f: if '.xml' in file: if '__cts__.xml' != file and '__capitains__.xml' != file and '.lat' in file: files.append(os.path.join(r, file)) print('found: '+str(len(files))+ ' files') return files if __name__ == '__main__': files = get_files(args) count = 0 no_regest_found = [] no_id_found = [] no_text_found = [] for f in files: identifier, regest, text = parse(f) if (''== identifier): no_id_found.append(f) elif (''== regest): no_regest_found.append(f) elif (''== text): no_text_found.append(f) else: save_path = os.path.abspath(args.save_path) identifier = identifier.replace(':','.') save_path = os.path.join(save_path, identifier+'.story') file = codecs.open(save_path, 'w', 'utf-8') file.write(text) file.write('\n\n@highlight\n\n') file.write(regest) file.close() count += 1 if args.verbose: sys.stdout.write('.') if(50==count): print('.') sys.stdout.write('\n') sys.stdout.flush() write_log_file(no_id_found, no_regest_found, no_text_found) print('successfully loaded:', count, 'files. for more info see the log file', )
python
#name: CurateChemStructures #description: curating a molecules set for structural data homogenization #top-menu: Chem | Curate... #language: python #sample: chem/chem_standards.csv #tags: demo, chem, rdkit #input: dataframe data [Input data table] #input: column smiles {type:categorical; semType: Molecule} [Molecules, in SMILES format] #input: bool kekulization = false #input: bool normalization = false #input: bool reionization = false #input: bool neutralization = false #input: bool tautomerization = false #input: bool mainFragment = false #output: dataframe curated {action:join(data); semType: Molecule} [Molecules, in SMILES format] import numpy as np from rdkit import Chem from rdkit.Chem.MolStandardize import rdMolStandardize smiles = data[smiles] length = len(smiles) standardized = np.full(length, None, dtype=object) def neutralize_atoms(mol): pattern = Chem.MolFromSmarts("[+1!h0!$([*]~[-1,-2,-3,-4]),-1!$([*]~[+1,+2,+3,+4])]") at_matches = mol.GetSubstructMatches(pattern) at_matches_list = [y[0] for y in at_matches] if len(at_matches_list) > 0: for at_idx in at_matches_list: atom = mol.GetAtomWithIdx(at_idx) chg = atom.GetFormalCharge() hcount = atom.GetTotalNumHs() atom.SetFormalCharge(0) atom.SetNumExplicitHs(hcount - chg) atom.UpdatePropertyCache() return mol if tautomerization: enumerator = rdMolStandardize.TautomerEnumerator() for n in range(0, length): mol = Chem.MolFromSmiles(smiles[n], sanitize = True) if mol is None or mol.GetNumAtoms() == 0: continue if tautomerization: mol = enumerator.Canonicalize(mol) if normalization: mol = rdMolStandardize.Normalize(mol) if reionization: mol = rdMolStandardize.Reionize(mol) if neutralization: neutralize_atoms(mol) if mainFragment: mol = rdMolStandardize.FragmentParent(mol) if kekulization: Chem.Kekulize(mol) standardized[n] = Chem.MolToSmiles(mol, kekuleSmiles = kekulization) curated = pd.DataFrame(standardized, columns = ['curated_molecule'])
python
import json import falcon class HealthCheck: def on_get(self, req, resp): resp.body = json.dumps({'status': 'happy and health!'}) resp.status = falcon.HTTP_200
python
import attr from operator import itemgetter, methodcaller, attrgetter from django.conf import settings import spacy from .service import Service from .states import states from .loaders import table_loader from .language_model import nlp from ..forms import QuestionForm @attr.s class NlpMiddleware: get_response = attr.ib() def __call__(self, request) : data_service = Service(initial_state=states.OK) ( data_service.of_(request.GET) .filter_(lambda qd: 'q' in qd , error_code=states.NO_URL_PARAM) .map_(QuestionForm) .assign(fieldname='form') .filter_(methodcaller('is_valid'), error_code=states.INVALID_FORM) .map_(attrgetter('cleaned_data')) .map_(itemgetter('q')) .assign(fieldname='question') .maybe(nlp, error_code=states.NO_MODEL) .assign(fieldname='document') .map_(lambda doc: doc._.qtype) .maybe(table_loader, error_code=states.UNRECOGNIZED) .bind(lambda doc: methodcaller('find_answer', doc._.kb_ident), data_from='document') .maybe(lambda result: f"${result.min_pay} to ${result.max_pay}", error_code=states.NO_RECORDS_FOUND) .assign(fieldname='answer') ) request.context = data_service return self.get_response(request) def process_template_response(self, request, response) : data_service = request.context if data_service.in_state(states.INVALID_FORM) : response.context_data.update(**data_service.to_dict()) return response
python
# -*- coding: utf-8 -*- """ This module contains all functions for response of optical elements. Created on Wed May 22 12:15:23 2019 @author: Swarnav Banik [email protected] """ import numpy as np import numpy.fft as fourier import scipy as scp from PIL import Image # %% Common Functions ######################################################### # The following functions take inputs # Wave Vector k in units um # Minimum Waist w0 in units um # Position r,z in units um # Lens Action ################################################################### def SphLensAction(E,X,Y,k,f,**kwargs): # Evaluates the response of a spherical lens at its front focal plane # Inputs: E - 2D Field pattern # X,Y - 2D grid representing co-ordinates # k - Wave vector [um^-1] # f - focal length [mm] # FocussedAxis - Along what axis is the beam focused at the back # focal plane if (E.shape != X.shape or X.shape != Y.shape): raise Exception('OpticalElements::SphLensAction::E,X and Y should have same dimensions.') for key, value in kwargs.items(): if key == 'FocussedAxis': FocAxis = value f = f*10**3 Transform = fourier.fft2(E) if FocAxis == 'X': Transform = fourier.fftshift(Transform, axes = 0) elif FocAxis == 'Y': Transform = fourier.fftshift(Transform, axes = 1) elif FocAxis == 'NONE': Transform = fourier.fftshift(Transform) dx = X[0,1]-X[0,0] Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx)) dy = dx = Y[1,0]-Y[0,0] Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy)) [X, Y] = np.meshgrid(Xfrq,Yfrq) return [Transform, X, Y] def CylLensAction(E,X,Y,k,f,**kwargs): # Evaluates the response of a cylindrical lens at its front focal plane # Inputs: E - 2D Field pattern # X,Y - 2D grid representing co-ordinates # k - Wave vector [um^-1] # f - focal length [mm] # FocussedAxis - Along what axis is the beam focused at the back # focal plane # FocusingAxis - Along what axis does the lens focus if (E.shape != X.shape or X.shape != Y.shape): raise Exception('OpticalElements::CylLensAction::E,X and Y should have same dimensions.') for key, value in kwargs.items(): if key == 'FocusingAxis': FocAxis = value f = f*10**3 if FocAxis == 'X': Transform = fourier.fft(E, axis = 1) Transform = fourier.fftshift(Transform, axes = 1) dx = X[0,1]-X[0,0] Xfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(X.shape[1], d=dx)) Yfrq = Y[:,0] elif FocAxis == 'Y': Transform = fourier.fft(E, axis = 0) Transform = fourier.fftshift(Transform, axes = 0) dy = dx = Y[1,0]-Y[0,0] Yfrq = (2*np.pi*f/k)*fourier.fftshift(fourier.fftfreq(Y.shape[0], d=dy)) Xfrq = X[0,:] else: raise Exception('OpticalElements::CylLensAction::Focussing xxis needs to be specified.') [X, Y] = np.meshgrid(Xfrq,Yfrq) return [Transform, X, Y] def PiPlateAction(E,X,Y,y_offset,tilt): # Evaluates the response of an imaging system via the PSF # Inputs: # X,Y - 2D grid representing co-ordinates at the plane of pi plate # E: The light field at the plane of pi plate # y_offset, titlt: Offset and tilt of the pi plate # Outputs: # The light field after passing through the pi plate if (E.shape != X.shape or X.shape != Y.shape): raise Exception('OpticalElements::PiPlateAction::E, X and Y should have same dimensions.') Phase = np.angle(E) for ii in range(Y.shape[0]): for jj in range(Y.shape[1]): if Y[ii,jj]>(np.tan(tilt)*X[ii,jj]+y_offset): Phase[ii,jj] = Phase[ii,jj]+np.pi return np.abs(E)*np.exp(1j*Phase) def MatrixFreeProp(q_in,d): A = 1 B = d C = 0 D = 1 q_out = (A*q_in+B)/(C*q_in+D) return q_out def MatrixLens(q_in,f): A = 1 B = 0 C = -1/f D = 1 q_out = (A*q_in+B)/(C*q_in+D) return q_out # Imaging ##################################################################### def ImageViaPSF(X_o, Y_o, E_o, ASF, **kwargs): # Evaluates the response of an imaging system via the PSF # Inputs: # X_o,Y_o - 2D grid representing co-ordinates in object plane # E_o: The light field at the object plane # ASF: Amplitude Spread Function = sqrt(PSF) # norm (optional): Normalize the ASF by some factor # Outputs: # I_i: The light field at the image plane for key, value in kwargs.items(): if key == 'norm': ASF = ASF*value E_ft = fourier.fftshift(fourier.fft2(E_o)) ASF_ft = fourier.fftshift(fourier.fft2(ASF)) E_i = fourier.ifftshift(fourier.ifft2(E_ft*ASF_ft)) I_i = np.abs(E_i)**2 return I_i def ASF(X_o,Y_o,R_airy,**kwargs): # Evaluates the Amplitude Spread Function of an imaging system # Inputs: # X_o,Y_o - 2D grid representing co-ordinates in object plane # R_airy: Radial extent of the PSF/ ASF # kind (optional): Kind of ASF, default is airy # Outputs: # ASF: The ASF = sqrt(PSF) kind = 'airy' for key, value in kwargs.items(): if key == 'kind': kind = value R = np.sqrt(X_o**2+Y_o**2) if kind == 'airy': ASF = scp.special.jv(1,3.8317*R/R_airy)/(3.8317*R/R_airy) ASF[R==0] = 0.5 if kind == 'gaussian': R_airy = R_airy*2.672/3.8317; ASF = np.exp(-(X_o**2+Y_o**2)/R_airy**2) ASF = ASF/np.sum(np.abs(ASF)**2) return ASF def PixelizeImage(I_org,X_org,Y_org,PixSize_cam): # Pixelize the image # Inputs: # X_org,Y_org - 2D grid representing co-ordinates in object plane # I_org: The image # PixSize_cam: The pixel size of the camera # Outputs: # X_cam,Y_cam - 2D grid representing co-ordinates in object plane on camera # I_cam: The pixelated image # PixSize_cam: The pixel size on the camera if (I_org.shape != X_org.shape or X_org.shape != Y_org.shape): raise Exception('OpticalElements::PixelizeImage::I_org,X_org and Y_org should have same dimensions.') if (X_org[0,0]-X_org[0,1] != Y_org[0,0]-Y_org[1,0]): raise Exception('OpticalElements::PixelizeImage::Pixel size in X and Y are not same') nptsx = int(round(X_org[0,-1]-X_org[0,0]/PixSize_cam)) nptsy = int(round(Y_org[-1,0]-Y_org[0,0]/PixSize_cam)) PixSize_cam = [(X_org[0,0]-X_org[0,-1])/nptsx, (Y_org[0,0]-Y_org[-1,0])/nptsy] x = np.linspace(X_org[0,0],X_org[0,-1],nptsx) y = np.linspace(Y_org[0,0],Y_org[-1,0],nptsy) [X_cam,Y_cam] = np.meshgrid(x,y) I_org_img = Image.fromarray(I_org) I_cam_img = I_org_img.resize((nptsy,nptsx),resample=Image.BILINEAR) I_cam = np.asarray(I_cam_img) return [X_cam,Y_cam,I_cam, PixSize_cam]
python
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import division """ Created on Fri May 4 13:43:46 2018 @author: xingshuli """ import os from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from keras import backend as K #from NIN_16 import NIN16 #from model_vgg16 import VGG16 #from Rnet import New_net from Bridge_VGG19 import Bridge_VGG from learning_rate import choose #pre-parameters os.environ['CUDA_VISIBLE_DEVICES'] = '0' # '1' or '0' GPU img_height, img_width = 224, 224 if K.image_dim_ordering() == 'th': input_shape = (3, img_width, img_height) else: input_shape = (img_width, img_height, 3) batch_size = 16 epochs = 500 train_data_dir = os.path.join(os.getcwd(), 'image_Data/train') validation_data_dir = os.path.join(os.getcwd(), 'image_Data/validation') num_classes = 24 nb_train_samples = 10402 nb_validation_samples = 2159 #model = New_net(input_shape = input_shape, classes = num_classes) #model = VGG16(input_shape = input_shape, classes = num_classes) model = Bridge_VGG(input_shape = input_shape, classes = num_classes) optimizer = SGD(lr = 0.001, momentum = 0.9, nesterov = True) model.compile(loss = 'categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy']) model.summary() train_datagen = ImageDataGenerator(rescale = 1. / 255, rotation_range = 15, width_shift_range = 0.2, height_shift_range = 0.2, horizontal_flip = True, zoom_range = 0.2, shear_range = 0.2) test_datagen = ImageDataGenerator(rescale = 1. / 255) train_generator = train_datagen.flow_from_directory( train_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( validation_data_dir, target_size=(img_width, img_height), batch_size=batch_size, class_mode='categorical') #set learning rate schedule lr_monitorable = True lr_reduce = choose(lr_monitorable = lr_monitorable) #set callbacks for model fit callbacks = [lr_reduce] #model fit hist = model.fit_generator( train_generator, steps_per_epoch=nb_train_samples //batch_size, epochs=epochs, validation_data=validation_generator, validation_steps=nb_validation_samples //batch_size, callbacks=callbacks) #print acc and stored into acc.txt f = open('/home/xingshuli/Desktop/acc.txt','w') f.write(str(hist.history['acc'])) f.close() #print val_acc and stored into val_acc.txt f = open('/home/xingshuli/Desktop/val_acc.txt','w') f.write(str(hist.history['val_acc'])) f.close() #print val_loss and stored into val_loss.txt f = open('/home/xingshuli/Desktop/val_loss.txt', 'w') f.write(str(hist.history['val_loss'])) f.close() #the reasonable accuracy of model should be calculated based on #the value of patience in EarlyStopping: accur = accur[-patience + 1:]/patience Er_patience = 10 accur = [] with open('/home/xingshuli/Desktop/val_acc.txt','r') as f1: data1 = f1.readlines() for line in data1: odom = line.strip('[]\n').split(',') num_float = list(map(float, odom)) accur.append(num_float) f1.close() y = sum(accur, []) ave = sum(y[-Er_patience:]) / len(y[-Er_patience:]) print('Validation Accuracy = %.4f' % (ave)) #save model save_dir = os.path.join(os.getcwd(), 'Wide_ResNet_Model') model_name = 'keras_trained_model.h5' if not os.path.isdir(save_dir): os.makedirs(save_dir) save_path = os.path.join(save_dir, model_name) model.save(save_path) print('the model has been saved at %s' %save_path)
python
from flask_login import current_user from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, SubmitField, BooleanField, FileField from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError from flask_wtf.file import FileField, FileAllowed, FileRequired from app.models import User from app.extensions import photos class RegisterForm(FlaskForm): username = StringField('用户名', validators=[DataRequired(message='请填写用户名'), Length(4, 20, message='长度在4到20个字符之间')]) email = StringField('邮箱(务必填写正确,否则无法激活登录)', validators=[DataRequired(message='请填写邮箱'), Email(message='请填写正确的邮箱格式')]) password = PasswordField('密码', validators=[DataRequired(message='请填写密码'), Length(8, 20, message='密码长度在8到20之间'), EqualTo('confirm', message='密码不一致')]) confirm = PasswordField('密码确认') submit = SubmitField('注册') # 检验username是否存在 def validate_username(self, field): user = User.query.filter_by(username=field.data).first() if user: raise ValidationError('用户名已存在') # 校验邮箱是否已存在 def validate_email(self, field): user = User.query.filter_by(email=field.data).first() if user: raise ValidationError('邮箱已存在') # 定义登录的form表单 class LoginForm(FlaskForm): username = StringField('用户名或邮箱', validators=[DataRequired(message='用户名不能为空')]) password = PasswordField('密码', validators=[DataRequired(message='密码不能为空')]) remember = BooleanField('记住我', default=True) submit = SubmitField('登录') # 定义修改密码的表单 class UserPasswordForm(FlaskForm): oldpwd = PasswordField('原密码', validators=[DataRequired(message='原密码不能为空')]) newpwd = PasswordField('新密码', validators=[DataRequired(message='请填写新密码'), Length(8, 20, message='密码长度在8到20之间'), EqualTo('confirm', message='密码不一致')]) confirm = PasswordField('密码确认') submit = SubmitField('注册') # 校验原密码是否正确 def validate_oldpwd(self, field): # 获取真实user对象 user = current_user._get_current_object() if not user.verify_password(field.data): raise ValidationError('原密码错误') # 校验新老密码不能一致 def validate_newpwd(self, field): # 获取真实user对象 user = current_user._get_current_object() if user.verify_password(field.data): raise ValidationError('新旧密码不能一样') # 添加头像表单 class IconForm(FlaskForm): icon = FileField('头像', render_kw={'class': 'btn btn-default'}, validators=[FileAllowed(photos, message='只能上传图片'), FileRequired(message='请先选择文件')]) submit = SubmitField('修改') # 填写新邮箱来修改邮箱 class EmailForm(FlaskForm): email = StringField('新邮箱(务必填写正确,否则无法收到修改邮件)', validators=[DataRequired(message='请填写新邮箱'), Email(message='请填写正确的邮箱格式')]) submit = SubmitField('提交') # 用来提交用户名或邮箱来重置密码 class EUForm(FlaskForm): username = StringField('用户名或有效的邮箱', validators=[DataRequired(message='用户名不能为空')]) submit = SubmitField('下一步', render_kw={'style': "float: right"}) # 用来提交验证码 class AuthCodeForm(FlaskForm): authcode = StringField('验证码', validators=[DataRequired(message='验证码不能为空')]) submit = SubmitField('提交', render_kw={'style': "float: right"}) # 重置密码 class ResetPwdForm(FlaskForm): password = PasswordField('新密码', validators=[DataRequired(message='请填写密码'), Length(8, 20, message='密码长度在8到20之间'), EqualTo('confirm', message='密码不一致')]) confirm = PasswordField('密码确认') submit = SubmitField('确定', render_kw={'style': "float: right"})
python
import ee import geemap # Create a map centered at (lat, lon). Map = geemap.Map(center=[40, -100], zoom=4) fromFT = ee.FeatureCollection("users/wqs/Pipestem/Pipestem_HUC10") # This function computes the feature's geometry area and adds it as a property. def addArea(feature): return feature.set({'areaHa': feature.geometry().area().divide(100 * 100)}) # Map the area getting function over the FeatureCollection. areaAdded = fromFT.map(addArea) # Print the first feature from the collection with the added property. first = areaAdded.first() print('First feature: ', first.getInfo()) print("areaHa: ", first.get("areaHa").getInfo()) # Display the map. Map
python
import keras # initializer = keras.initializers.glorot_uniform(seed=0) initializer = keras.initializers.glorot_normal() """ Creates Residual Network with 50 layers """ def create_model(input_shape=(64, 64, 3), classes=1): # Define the input as a tensor with shape input_shape X_input = keras.layers.Input(input_shape) # Zero-Padding X = keras.layers.ZeroPadding2D((3, 3))(X_input) # Stage 1 X = keras.layers.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name='bn_conv1')(X) X = keras.layers.Activation('relu')(X) X = keras.layers.MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f = 3, filters=[64, 64, 256], stage=2, block='a', s=1) X = identity_block(X, 3, [64, 64, 256], stage=2, block='b') X = identity_block(X, 3, [64, 64, 256], stage=2, block='c') # Stage 3 X = convolutional_block(X, f = 3, filters=[128, 128, 512], stage=3, block='a', s=2) X = identity_block(X, 3, [128, 128, 512], stage=3, block='b') X = identity_block(X, 3, [128, 128, 512], stage=3, block='c') X = identity_block(X, 3, [128, 128, 512], stage=3, block='d') # Stage 4 X = convolutional_block(X, f = 3, filters=[256, 256, 1024], stage=4, block='a', s=2) X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e') X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f') # Stage 5 X = convolutional_block(X, f = 3, filters=[512, 512, 2048], stage=5, block='a', s=2) X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b') X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c') # AVGPOOL X = keras.layers.AveragePooling2D(pool_size=(2, 2))(X) # output layer X = keras.layers.Flatten()(X) X = keras.layers.Dense(classes, activation='sigmoid', name='fc{}' .format(classes), kernel_initializer=initializer)(X) # Create model model = keras.models.Model(inputs=X_input, outputs=X, name='resnet50') return model """ Identity Block of ResNet """ def identity_block(X, f, filters, stage, block): # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = keras.layers.Conv2D(filters=F1, kernel_size=(1, 1), strides=(1,1), padding='valid', name=conv_name_base + '2a', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = keras.layers.Activation('relu')(X) X = keras.layers.Dropout(0.5)(X) # Second component of main path X = keras.layers.Conv2D(filters=F2, kernel_size=(f, f), strides=(1,1), padding='same', name=conv_name_base + '2b', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = keras.layers.Activation('relu')(X) X = keras.layers.Dropout(0.5)(X) # Third component of main path X = keras.layers.Conv2D(filters=F3, kernel_size=(1, 1), strides=(1,1), padding='valid', name=conv_name_base + '2c', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2c')(X) # Add shortcut value to main path, and pass it through a RELU activation X = keras.layers.Add()([X, X_shortcut]) X = keras.layers.Activation('relu')(X) return X """ Convolutional Block of ResNet """ def convolutional_block(X, f, filters, stage, block, s=2): # defining name basis conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X # First component of main path X = keras.layers.Conv2D(F1, (1, 1), strides=(s, s), name=conv_name_base + '2a', padding='valid', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2a')(X) X = keras.layers.Activation('relu')(X) X = keras.layers.Dropout(0.5)(X) # Second component of main path X = keras.layers.Conv2D(F2, (f, f), strides=(1, 1), name=conv_name_base + '2b', padding='same', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2b')(X) X = keras.layers.Activation('relu')(X) X = keras.layers.Dropout(0.5)(X) # Third component of main path X = keras.layers.Conv2D(F3, (1, 1), strides=(1, 1), name=conv_name_base + '2c', padding='valid', kernel_initializer=initializer)(X) X = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '2c')(X) X_shortcut = keras.layers.Conv2D(F3, (1, 1), strides=(s,s), name=conv_name_base + '1', padding='valid', kernel_initializer=initializer)(X_shortcut) X_shortcut = keras.layers.BatchNormalization(axis=3, name=bn_name_base + '1')(X_shortcut) # Add shortcut value to main path, and pass it through a RELU activation X = keras.layers.Add()([X, X_shortcut]) X = keras.layers.Activation('relu')(X) return X
python
from typing import Union, List, Any from ..core.client import ClientBase from ..core.connect import AsyncTCPConnection Key = Union[int, float, str] class MasterClient(ClientBase): def get_shard(self, key): return self._execute("get_shard", key) def get_map(self): return self._execute("get_map") def stat(self): return self._execute("stat") def create_index(self, index): return self._execute("create_index", index) class AsyncMasterClient(MasterClient): def __init__(self, host, port, transport_class=AsyncTCPConnection, **kwargs): super(AsyncMasterClient, self).__init__(host, port, transport_class, **kwargs)
python
from fastapi import APIRouter, HTTPException import pandas as pd import plotly.express as px import numpy as np import plotly.graph_objects as go router = APIRouter() @router.get('/vizprices') async def visual(): # load in airbnb dataset DATA_PATH = 'https://raw.githubusercontent.com/Air-BnB-2-BW/data-science/master/airbnb_bw.csv' df = pd.read_csv(DATA_PATH, index_col=0) x = ['$0-25', '$25-50', '$50-75', '$75-100', '$100-125', '$125-150', '$150-175', '$175-200', '$200+'] y = [27, 272, 325, 125, 164, 93, 45, 22 ,13] fig = go.Figure(data=[go.Bar(x=x, y=y)]) fig.update_traces(marker_color='rgb(158,202,225)', marker_line_color='rgb(8,48,107)', marker_line_width=4.5, opacity=0.6) fig.update_layout(title_text='Cost Per Person') fig.update_layout(width=2000, height=1000, margin={"r": 1, "t": 1, "l": 1, "b": 1}) fig.show() return fig.to_json()
python
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function from unittest import TestCase from tests.utils import assert_equal_dict from polyaxon_schemas.ml.hooks import StepLoggingTensorHookConfig from polyaxon_schemas.ml.processing.pipelines import TFRecordSequencePipelineConfig from polyaxon_schemas.ml.train import TrainConfig class TestTrainConfigs(TestCase): def test_train_config(self): config_dict = { "data_pipeline": TFRecordSequencePipelineConfig( data_files=["~/data_file"], meta_data_file="~/meta_data_file", shuffle=True, num_epochs=10, batch_size=64, ).to_schema(), "steps": 300, "hooks": [ StepLoggingTensorHookConfig( ["Dense_1", "Conv2D_4"], every_n_iter=100 ).to_schema() ], } config = TrainConfig.from_dict(config_dict) assert_equal_dict(config.to_dict(), config_dict)
python
from cereal import car from common.realtime import DT_CTRL from common.numpy_fast import clip from common.params import Params from selfdrive.car import apply_std_steer_torque_limits from selfdrive.car.hyundai.hyundaican import create_lkas11, create_clu11, create_lfahda_mfc, \ create_scc11, create_scc12, create_scc13, create_scc14, \ create_mdps12 from selfdrive.car.hyundai.values import Buttons, CarControllerParams, CAR from opendbc.can.packer import CANPacker from selfdrive.config import Conversions as CV #DIY cruise... from common.numpy_fast import interp import cereal.messaging as messaging sm = messaging.SubMaster(['radarState', 'controlsState']) VisualAlert = car.CarControl.HUDControl.VisualAlert min_set_speed = 30 * CV.KPH_TO_MS # Accel limits ACCEL_HYST_GAP = 0.02 # don't change accel command for small oscillations within this value ACCEL_MAX = 1.5 # 1.5 m/s2 ACCEL_MIN = -3.0 # 3 m/s2 ACCEL_SCALE = max(ACCEL_MAX, -ACCEL_MIN) def accel_hysteresis(accel, accel_steady): # for small accel oscillations within ACCEL_HYST_GAP, don't change the accel command if accel > accel_steady + ACCEL_HYST_GAP: accel_steady = accel - ACCEL_HYST_GAP elif accel < accel_steady - ACCEL_HYST_GAP: accel_steady = accel + ACCEL_HYST_GAP accel = accel_steady return accel, accel_steady def process_hud_alert(enabled, fingerprint, visual_alert, left_lane, right_lane, left_lane_depart, right_lane_depart): sys_warning = (visual_alert in [VisualAlert.steerRequired, VisualAlert.ldw]) # initialize to no line visible sys_state = 1 if left_lane and right_lane or sys_warning: # HUD alert only display when LKAS status is active sys_state = 3 if enabled or sys_warning else 4 elif left_lane: sys_state = 5 elif right_lane: sys_state = 6 # initialize to no warnings left_lane_warning = 0 right_lane_warning = 0 if left_lane_depart: left_lane_warning = 1 if fingerprint in [CAR.GENESIS_G90, CAR.GENESIS_G80] else 2 if right_lane_depart: right_lane_warning = 1 if fingerprint in [CAR.GENESIS_G90, CAR.GENESIS_G80] else 2 return sys_warning, sys_state, left_lane_warning, right_lane_warning class CarController(): def __init__(self, dbc_name, CP, VM): self.p = CarControllerParams(CP) self.packer = CANPacker(dbc_name) self.apply_steer_last = 0 self.car_fingerprint = CP.carFingerprint self.steer_rate_limited = False self.lkas11_cnt = 0 self.scc12_cnt = 0 self.last_resume_frame = 0 self.resume_cnt = 0 self.last_lead_distance = 0 self.turning_signal_timer = 0 self.longcontrol = CP.openpilotLongitudinalControl self.scc_live = not CP.radarOffCan self.accel_steady = 0 # params init self.lfamfc = Params().get("MfcSelect", encoding='utf8') == "2" #DIY cruise... self.released_clutch = False self.manual_gearbox = CP.manualGearbox self.btn_cnt = 0 self.btn_pressed = False self.prev_btn = 0 self.gap_size = 4 #set gap size. lower number == less gap self.btn_combo = [] def update(self, enabled, CS, frame, actuators, pcm_cancel_cmd, visual_alert, left_lane, right_lane, left_lane_depart, right_lane_depart, set_speed, lead_visible): # *** compute control surfaces *** # Steering Torque new_steer = int(round(actuators.steer * self.p.STEER_MAX)) apply_steer = apply_std_steer_torque_limits(new_steer, self.apply_steer_last, CS.out.steeringTorque, self.p) self.steer_rate_limited = new_steer != apply_steer # disable if steer angle reach 90 deg, otherwise mdps fault in some models lkas_active = enabled and abs(CS.out.steeringAngleDeg) < CS.CP.maxSteeringAngleDeg # Disable steering while turning blinker on and speed below 60 kph if CS.out.leftBlinker or CS.out.rightBlinker: self.turning_signal_timer = 0.5 / DT_CTRL # Disable for 0.5 Seconds after blinker turned off # if self.turning_indicator_alert: # set and clear by interface # lkas_active = 0 if self.turning_signal_timer > 0: self.turning_signal_timer -= 1 if not lkas_active: apply_steer = 0 self.apply_steer_last = apply_steer sys_warning, sys_state, left_lane_warning, right_lane_warning = \ process_hud_alert(enabled, self.car_fingerprint, visual_alert, left_lane, right_lane, left_lane_depart, right_lane_depart) clu11_speed = CS.clu11["CF_Clu_Vanz"] enabled_speed = 38 if CS.is_set_speed_in_mph else 60 if clu11_speed > enabled_speed or not lkas_active: enabled_speed = clu11_speed if not(min_set_speed < set_speed < 255 * CV.KPH_TO_MS): set_speed = min_set_speed set_speed *= CV.MS_TO_MPH if CS.is_set_speed_in_mph else CV.MS_TO_KPH can_sends = [] can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active, CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane, left_lane_warning, right_lane_warning, 0)) if CS.mdps_bus or CS.scc_bus == 1: # send lkas11 bus 1 if mdps or scc is on bus 1 can_sends.append(create_lkas11(self.packer, frame, self.car_fingerprint, apply_steer, lkas_active, CS.lkas11, sys_warning, sys_state, enabled, left_lane, right_lane, left_lane_warning, right_lane_warning, 1)) if frame % 2 and CS.mdps_bus: # send clu11 to mdps if it is not on bus 0 can_sends.append(create_clu11(self.packer, frame, CS.mdps_bus, CS.clu11, Buttons.NONE, enabled_speed)) if pcm_cancel_cmd and self.longcontrol: can_sends.append(create_clu11(self.packer, frame, CS.scc_bus, CS.clu11, Buttons.CANCEL, clu11_speed)) if CS.mdps_bus: # send mdps12 to LKAS to prevent LKAS error can_sends.append(create_mdps12(self.packer, frame, CS.mdps12)) # 20 Hz LFA MFA message if frame % 5 == 0 and self.lfamfc: can_sends.append(create_lfahda_mfc(self.packer, enabled)) # DIY cruise... if enabled: sm.update(0) lead_data = sm['radarState'].leadOne lead_one = sm['radarState'].leadOne lead_two = sm['radarState'].leadTwo vel_cruise = sm['controlsState'].vCruise #target max speed seen on screen. In km/h if lead_one.status == True: lead_data = lead_one if lead_two.status == True and ((lead_one.dRel - lead_two.dRel) > 3.0): lead_data = lead_two lead_rel_dist = lead_data.dRel lead_rel_vel = lead_data.vRel lead_vel = lead_data.vLead cruise_curr_set_speed = CS.out.cruiseState.speed #cruise speed m/s max_cru_speed = vel_cruise * CV.KPH_TO_MS #speed limit press_button_speed = 3 #press two times every 3 frames lead_speed_diff = 2.5 * CV.KPH_TO_MS #we're slower than the lead car by this amount. km/h #button up/down combination to set gap size #UP DOWN UP DOWN smaller gap #DOWN UP DOWN UP bigger gap if CS.cruise_buttons == Buttons.RES_ACCEL or CS.cruise_buttons == Buttons.SET_DECEL: self.btn_pressed = True if self.btn_pressed: self.btn_cnt += 1 if self.btn_cnt > 0 and self.btn_cnt < 100: if CS.cruise_buttons == Buttons.RES_ACCEL and not self.prev_btn == Buttons.RES_ACCEL: self.btn_combo.append(Buttons.RES_ACCEL) self.prev_btn = Buttons.RES_ACCEL if CS.cruise_buttons == Buttons.SET_DECEL and not self.prev_btn == Buttons.SET_DECEL: self.btn_combo.append(Buttons.SET_DECEL) self.prev_btn = Buttons.SET_DECEL else: self.btn_cnt = 0 self.btn_pressed = False self.prev_btn = 0 self.btn_combo = [] if self.btn_combo == [Buttons.RES_ACCEL, Buttons.SET_DECEL, Buttons.RES_ACCEL, Buttons.SET_DECEL]: self.gap_size -= 1 self.btn_combo = [] if self.btn_combo == [Buttons.SET_DECEL, Buttons.RES_ACCEL, Buttons.SET_DECEL, Buttons.RES_ACCEL]: self.gap_size += 1 self.btn_combo = [] #press down if high lateral acceleration bpV = [30., 130.] lat_acc = abs(CS.out.cruiseState.lateralAcceleration) speed_interp = int(CS.out.vEgo * CV.MS_TO_KPH) + 4 acc_range = [0.25, 0.40] acc_thresh = interp(speed_interp, bpV, acc_range) #we drive slower than lead to get the gap and later the distance will pull us back in until a balance is found lead_vel -= lead_speed_diff #set gap if self.gap_size > 0: lead_vel += ((lead_rel_dist / self.gap_size) * CV.KPH_TO_MS) #when following logic following = lead_data.status and lead_rel_dist < 130. and lead_rel_dist > 1. and not ((CS.out.leftBlinker or CS.out.rightBlinker) and CS.out.vEgo > (60 * CV.KPH_TO_MS)) #prevents disabling cruise if speed <30km/h if clu11_speed <= 30: clu11_speed = 30 if following: if cruise_curr_set_speed < lead_vel and max_cru_speed > cruise_curr_set_speed and frame % press_button_speed < 2: can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.RES_ACCEL, clu11_speed)) if (cruise_curr_set_speed * CV.MS_TO_KPH) > 30: if max_cru_speed < cruise_curr_set_speed or cruise_curr_set_speed > lead_vel and frame % press_button_speed < 2: can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed)) elif not following: if cruise_curr_set_speed < max_cru_speed and frame % press_button_speed < 2 and lat_acc < acc_thresh: can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.RES_ACCEL, clu11_speed)) elif (cruise_curr_set_speed > max_cru_speed and (cruise_curr_set_speed * CV.MS_TO_KPH) > 30 and frame % press_button_speed < 2) or \ (lat_acc > acc_thresh and frame % press_button_speed < 2): can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed)) if self.manual_gearbox: if CS.out.clutchPressed == True and self.released_clutch == False: self.released_clutch = True if CS.out.clutchPressed == False and self.released_clutch == True: can_sends.append(create_clu11(self.packer, frame, 0, CS.clu11, Buttons.SET_DECEL, clu11_speed)) if frame % press_button_speed >= 2: self.released_clutch = False return can_sends
python
#!/usr/bin/env python3 # Copyright (c) 2019 The Khronos Group Inc. # SPDX-License-Identifier: Apache-2.0 from itertools import product from shared import PLATFORMS, TRUE_FALSE, VS_VERSION, make_win_artifact_name if __name__ == "__main__": for platform, uwp in product(PLATFORMS, TRUE_FALSE): print(make_win_artifact_name(platform, uwp))
python
import tkinter as tk class DashboardGUI: def __init__(self, master, interpreter): self.master = master self.interpreter = interpreter h = 316 w = 480 self.top_bar_canvas = tk.Canvas(master,bg="black",height=h,width=w/20) self.top_bar_canvas.grid(row=0,column=0,rowspan=2) self.time_text = self.top_bar_canvas.create_text(12,0.67*h,text="IDK:IDK AM", angle=90, fill='white', font=('Helvetica', '12', 'bold')) self.sat_num_text = self.top_bar_canvas.create_text(12,0.15*h,text="0 SAT", angle=90, fill='white', font=('Helvetica', '12', 'bold')) self.speed_label_canvas = tk.Canvas(master,bg="black", height=h/2, width=w/12) self.speed_label_canvas.grid(row=0,column=1) self.speed_label_text = self.speed_label_canvas.create_text(20,80,text="SPEED (MPH)", angle=90, fill='white', font=('Helvetica', '15', 'bold')) self.rpm_label_canvas = tk.Canvas(master,bg="black", height=h/2, width=w/12) self.rpm_label_canvas.grid(row=1,column=1) self.rpm_label_text = self.rpm_label_canvas.create_text(20,80,text="CADENCE (RPM)", angle=90, fill='white', font=('Helvetica', '12', 'bold')) self.speed_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.speed_canvas.grid(row=0,column=2) self.speed_text = self.speed_canvas.create_text(40,80,text="0.0", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.cadence_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.cadence_canvas.grid(row=1,column=2) self.cadence_text = self.cadence_canvas.create_text(40,80,text="0.0", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.avg_speed_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black') self.avg_speed_label_canvas.grid(row=0,column=3) self.avg_speed_label_text = self.avg_speed_label_canvas.create_text(20,80,text="AVG SPEED", angle=90, fill='white', font=('Helvetica', '15', 'bold')) self.distance_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black') self.distance_label_canvas.grid(row=1,column=3) self.distance_label_text = self.distance_label_canvas.create_text(20,80,text="DISTANCE (MILES)", angle=90, fill='white', font=('Helvetica', '11', 'bold')) self.avg_speed_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.avg_speed_canvas.grid(row=0,column=4) self.avg_speed_text = self.avg_speed_canvas.create_text(40,80,text="0.0", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.distance_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.distance_canvas.grid(row=1,column=4) self.distance_text = self.distance_canvas.create_text(40,80,text="0.0", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.direction_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black') self.direction_label_canvas.grid(row=0,column=5) self.direction_label_text = self.direction_label_canvas.create_text(20,80,text="DIRECTION", angle=90, fill='white', font=('Helvetica', '15', 'bold')) self.incline_label_canvas = tk.Canvas(master,height=h/2,width=w/12,bg='black') self.incline_label_canvas.grid(row=1,column=5) self.incline_label_text = self.incline_label_canvas.create_text(20,80,text="INCLINE (DEG)", angle=90, fill='white', font=('Helvetica', '13', 'bold')) self.direction_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.direction_canvas.grid(row=0,column=6) self.direction_text = self.direction_canvas.create_text(40,80,text="N", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.incline_canvas = tk.Canvas(master,height=h/2,width=w/6,bg='black') self.incline_canvas.grid(row=1,column=6) self.incline_text = self.incline_canvas.create_text(40,80,text="0.0", angle=90, fill='yellow', font=('Helvetica', '50', 'bold')) self.lights_button = LatchingButton(master, width=60,height=60,fills=("white","red"),command=self.toggle_lights) self.lights_button.grid(row=0,column=7,rowspan=1) self.lights = 0 def toggle_lights(self): self.lights = 1 - self.lights def update_display(self): self.interpreter.updateData(self.lights) self.top_bar_canvas.itemconfigure(self.sat_num_text,text='%.1f SAT'%self.interpreter.SAT) self.top_bar_canvas.itemconfigure(self.time_text,text=self.interpreter.getDisplayTimeString()) if self.interpreter.getSpeed()>25: self.speed_canvas.itemconfigure(self.speed_text,text='%.1f'%self.interpreter.getSpeed(),fill="red") else: self.speed_canvas.itemconfigure(self.speed_text,text='%.1f'%self.interpreter.getSpeed(),fill="yellow") self.cadence_canvas.itemconfigure(self.cadence_text,text='%.0f'%self.interpreter.PED) self.avg_speed_canvas.itemconfigure(self.avg_speed_text,text='%.0f'%self.interpreter.getAvgSpeed()) self.distance_canvas.itemconfigure(self.distance_text,text='%.0f'%self.interpreter.getDistance()) self.direction_canvas.itemconfigure(self.direction_text,text=self.interpreter.getDirection()) self.incline_canvas.itemconfigure(self.incline_text,text='%.0f'%self.interpreter.getIncline()) self.master.after(1,self.update_display) #this ensures this process continually repeats class LatchingButton(tk.Canvas): def __init__(self, parent, width, height, fills, command=None): tk.Canvas.__init__(self, parent, borderwidth=1, highlightthickness=0) self.command = command self.fills=fills self.fill_index = 0 padding = 4 self.oval = self.create_oval((padding,padding, width+padding, height+padding), outline="black", fill=self.fills[self.fill_index]) (x0,y0,x1,y1) = self.bbox("all") width = (x1-x0) + padding height = (y1-y0) + padding self.configure(width=width, height=height) self.bind("<ButtonPress-1>", self._on_press) self.bind("<ButtonRelease-1>", self._on_release) def _on_press(self, event): pass def _on_release(self, event): self.fill_index = 1 - self.fill_index self.itemconfigure(self.oval,fill=self.fills[self.fill_index]) if self.command is not None: self.command() if __name__ == '__main__': import serial import serial.tools.list_ports import interpreter ports = serial.tools.list_ports.comports() for port, desc, hwid in sorted(ports): if desc=="Arduino Micro": print("{}: {} [{}]".format(port, desc, hwid)) break arduino = serial.Serial(port, 115200, timeout=0.1, write_timeout=0) path = '~/bike-computer/data/' intrptr = interpreter.Interpreter(arduino,path) root = tk.Tk() dbg = DashboardGUI(root, intrptr) root.overrideredirect(True) root.after(10, dbg.update_display) root.mainloop()
python
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import time, random import numpy as np import tensorflow as tf from tensorflow.python.layers import core as layers_core import argparse from tensorflow.python.client import device_lib import os from utils import * class Option(object): def __init__(self, d): self.__dict__ = d def save(self): with open(os.path.join(self.this_expsdir, "option.txt"), "w") as f: for key, value in sorted(self.__dict__.items(), key=lambda x: x[0]): f.write("%s, %s\n" % (key, str(value))) logging = tf.logging def data_type(): return tf.float32 class PTBModel(object): #The language model. def __init__(self, is_training, is_test_LM=False): self._is_training = is_training self.batch_size = config.batch_size self.num_steps = config.num_steps size = config.hidden_size vocab_size = config.vocab_size self._input=tf.placeholder(shape=[None, config.num_steps], dtype=tf.int32) self._target=tf.placeholder(shape=[None, config.num_steps], dtype=tf.int32) self._sequence_length=tf.placeholder(shape=[None], dtype=tf.int32) with tf.device("/cpu:0"): embedding = tf.get_variable("embedding", [vocab_size, size], dtype=data_type()) inputs = tf.nn.embedding_lookup(embedding, self._input) softmax_w = tf.get_variable( "softmax_w", [size, vocab_size], dtype=data_type()) softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type()) if is_training and config.keep_prob < 1: inputs = tf.nn.dropout(inputs, config.keep_prob) output = self._build_rnn_graph(inputs, self._sequence_length, is_training) output=tf.reshape(output, [-1, config.hidden_size]) logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b) # Reshape logits to be a 3-D tensor for sequence loss logits = tf.reshape(logits, [-1, self.num_steps, vocab_size]) self._output_prob=tf.nn.softmax(logits) # Use the contrib sequence loss and average over the batches mask=tf.sequence_mask(lengths=self._sequence_length, maxlen=self.num_steps, dtype=data_type()) loss = tf.contrib.seq2seq.sequence_loss( logits, self._target, mask, average_across_timesteps=True, average_across_batch=True) # Update the cost self._cost = loss #self._lr = tf.Variable(0.0, trainable=False) tvars = tf.trainable_variables() grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars), config.max_grad_norm) optimizer = tf.train.AdamOptimizer() self._train_op = optimizer.apply_gradients( zip(grads, tvars), global_step=tf.train.get_or_create_global_step()) def _build_rnn_graph(self, inputs, sequence_length, is_training): return self._build_rnn_graph_lstm(inputs, sequence_length, is_training) def _get_lstm_cell(self, is_training): return tf.contrib.rnn.BasicLSTMCell( config.hidden_size, forget_bias=0.0, state_is_tuple=True, reuse=not is_training) def _build_rnn_graph_lstm(self, inputs, sequence_length, is_training): """Build the inference graph using canonical LSTM cells.""" # Slightly better results can be obtained with forget gate biases # initialized to 1 but the hyperparameters of the model would need to be # different than reported in the paper. def make_cell(): cell = self._get_lstm_cell( is_training) if is_training and config.keep_prob < 1: cell = tf.contrib.rnn.DropoutWrapper( cell, output_keep_prob=config.keep_prob) return cell cell = tf.contrib.rnn.MultiRNNCell( [make_cell() for _ in range(config.num_layers)], state_is_tuple=True) outputs, states=tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=data_type()) return outputs def run_epoch(sess, model, input, sequence_length, target=None, mode='train'): #Runs the model on the given data. if mode=='train': #train language model _,cost = sess.run([model._train_op, model._cost], feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length}) return cost elif mode=='test': #test language model cost = sess.run(model._cost, feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length}) return cost else: #use the language model to calculate sentence probability output_prob = sess.run(model._output_prob, feed_dict={model._input: input, model._sequence_length:sequence_length}) return output_prob def main(config): if config.mode=='forward' or config.mode=='use': with tf.name_scope("forward_train"): with tf.variable_scope("forward", reuse=None): m_forward = PTBModel(is_training=True) with tf.name_scope("forward_test"): with tf.variable_scope("forward", reuse=True): mtest_forward = PTBModel(is_training=False) var=tf.trainable_variables() var_forward=[x for x in var if x.name.startswith('forward')] saver_forward=tf.train.Saver(var_forward, max_to_keep=1) if config.mode=='backward' or config.mode=='use': with tf.name_scope("backward_train"): with tf.variable_scope("backward", reuse=None): m_backward = PTBModel(is_training=True) with tf.name_scope("backward_test"): with tf.variable_scope("backward", reuse=True): mtest_backward = PTBModel(is_training=False) var=tf.trainable_variables() var_backward=[x for x in var if x.name.startswith('backward')] saver_backward=tf.train.Saver(var_backward, max_to_keep=1) init = tf.global_variables_initializer() with tf.Session() as session: session.run(init) input = [[3,4,5,6,6,7,8,9,4,5,6,7,8,9,2]] sequence_length = [10] prob_old=run_epoch(session, mtest_forward, input, sequence_length, mode='use') print(prob_old) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Experiment setup") # misc parser.add_argument('--seed', default=33, type=int) parser.add_argument('--gpu', default="3", type=str) parser.add_argument('--no_train', default=False, action="store_true") parser.add_argument('--no_preds', default=False, action="store_true") parser.add_argument('--exps_dir', default=None, type=str) parser.add_argument('--exp_name', default=None, type=str) parser.add_argument('--load', default=None, type=str) # data property parser.add_argument('--data_path', default='data/quora/quora.txt', type=str) parser.add_argument('--dict_path', default='data/quora/dict.pkl', type=str) parser.add_argument('--dict_size', default=30000, type=int) parser.add_argument('--vocab_size', default=30003, type=int) parser.add_argument('--backward', default=False, action="store_true") parser.add_argument('--keyword_pos', default=True, action="store_false") # model architecture parser.add_argument('--num_steps', default=15, type=int) parser.add_argument('--num_layers', default=2, type=int) parser.add_argument('--emb_size', default=256, type=int) parser.add_argument('--hidden_size', default=300, type=int) parser.add_argument('--dropout', default=0.0, type=float) parser.add_argument('--model', default=0, type=int) # optimization parser.add_argument('--batch_size', default=128, type=int) parser.add_argument('--epochs', default=200, type=int) parser.add_argument('--learning_rate', default=0.001, type=float) parser.add_argument('--weight_decay', default=0.00, type=float) parser.add_argument('--clip_norm', default=0.00, type=float) parser.add_argument('--no_cuda', default=False, action="store_true") parser.add_argument('--local', default=False, action="store_true") parser.add_argument('--threshold', default=0.1, type=float) # evaluation parser.add_argument('--sim', default='word_max', type=str) parser.add_argument('--mode', default='sa', type=str) parser.add_argument('--accuracy', default=False, action="store_true") parser.add_argument('--top_k', default=10, type=int) parser.add_argument('--accumulate_step', default=1, type=int) parser.add_argument('--backward_path', default=None, type=str) parser.add_argument('--forward_path', default=None, type=str) # sampling parser.add_argument('--use_data_path', default='data/input/input.txt', type=str) parser.add_argument('--reference_path', default=None, type=str) parser.add_argument('--pos_path', default='POS/english-models', type=str) parser.add_argument('--emb_path', default='data/quora/emb.pkl', type=str) parser.add_argument('--max_key', default=3, type=float) parser.add_argument('--max_key_rate', default=0.5, type=float) parser.add_argument('--rare_since', default=30000, type=int) parser.add_argument('--sample_time', default=100, type=int) parser.add_argument('--search_size', default=100, type=int) parser.add_argument('--action_prob', default=[0.3,0.3,0.3,0.3], type=list) parser.add_argument('--just_acc_rate', default=0.0, type=float) parser.add_argument('--sim_mode', default='keyword', type=str) parser.add_argument('--save_path', default='temp.txt', type=str) parser.add_argument('--forward_save_path', default='data/tfmodel/forward.ckpt', type=str) parser.add_argument('--backward_save_path', default='data/tfmodel/backward.ckpt', type=str) parser.add_argument('--max_grad_norm', default=5, type=float) parser.add_argument('--keep_prob', default=1, type=float) d = vars(parser.parse_args()) option = Option(d) random.seed(option.seed) np.random.seed(option.seed) os.environ["CUDA_VISIBLE_DEVICES"] = option.gpu config = option main(option)
python
import numpy as np from ..Tools.Downloading._ReadDataIndex import _ReadDataIndex from .. import Globals def ReadIndex(subcomp,L,prod): ''' Reads the index file for a given data product. Inputs ====== subcomp : string Name of sub component of instrument L : int Level of data to download prod : str Data product to download Available data products ======================= subcomp L prod efd 2 'E_spin' efd 2 'pot' efd 2 'spec' hfa 2 'high' hfa 2 'low' hfa 2 'monit' hfa 3 '' ofa 2 'complex' ofa 2 'matrix' ofa 2 'spec' Returns ======= numpy.recarray ''' if subcomp == 'hfa' and L == 3: idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}.dat'.format(L,subcomp) datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/'.format(subcomp,L) else: idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}-{:s}.dat'.format(L,subcomp,prod) datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/{:s}/'.format(subcomp,L,prod) return _ReadDataIndex(idxfname)
python
# -*- coding: UTF-8 -*- # A part of NonVisual Desktop Access (NVDA) # Copyright (C) 2007-2020 NV Access Limited, Peter Vágner # This file is covered by the GNU General Public License. # See the file COPYING for more details. import time import nvwave import threading import queue from ctypes import cdll from ctypes import * import config import globalVars from logHandler import log import os import codecs isSpeaking = False onIndexReached = None bgThread=None bgQueue = None player = None espeakDLL=None #: Keeps count of the number of bytes pushed for the current utterance. #: This is necessary because index positions are given as ms since the start of the utterance. _numBytesPushed = 0 #Parameter bounds minRate=80 maxRate=450 minPitch=0 maxPitch=99 #event types espeakEVENT_LIST_TERMINATED=0 espeakEVENT_WORD=1 espeakEVENT_SENTENCE=2 espeakEVENT_MARK=3 espeakEVENT_PLAY=4 espeakEVENT_END=5 espeakEVENT_MSG_TERMINATED=6 espeakEVENT_PHONEME=7 #position types POS_CHARACTER=1 POS_WORD=2 POS_SENTENCE=3 #output types AUDIO_OUTPUT_PLAYBACK=0 AUDIO_OUTPUT_RETRIEVAL=1 AUDIO_OUTPUT_SYNCHRONOUS=2 AUDIO_OUTPUT_SYNCH_PLAYBACK=3 #synth flags espeakCHARS_AUTO=0 espeakCHARS_UTF8=1 espeakCHARS_8BIT=2 espeakCHARS_WCHAR=3 espeakSSML=0x10 espeakPHONEMES=0x100 espeakENDPAUSE=0x1000 espeakKEEP_NAMEDATA=0x2000 #speech parameters espeakSILENCE=0 espeakRATE=1 espeakVOLUME=2 espeakPITCH=3 espeakRANGE=4 espeakPUNCTUATION=5 espeakCAPITALS=6 espeakWORDGAP=7 espeakOPTIONS=8 # reserved for misc. options. not yet used espeakINTONATION=9 espeakRESERVED1=10 espeakRESERVED2=11 #error codes EE_OK=0 #EE_INTERNAL_ERROR=-1 #EE_BUFFER_FULL=1 #EE_NOT_FOUND=2 # eSpeak initialization flags espeakINITIALIZE_DONT_EXIT = 0x8000 class espeak_EVENT_id(Union): _fields_=[ ('number',c_int), ('name',c_char_p), ('string',c_char*8), ] class espeak_EVENT(Structure): _fields_=[ ('type',c_int), ('unique_identifier',c_uint), ('text_position',c_int), ('length',c_int), ('audio_position',c_int), ('sample',c_int), ('user_data',c_void_p), ('id',espeak_EVENT_id), ] class espeak_VOICE(Structure): _fields_=[ ('name',c_char_p), ('languages',c_char_p), ('identifier',c_char_p), ('gender',c_byte), ('age',c_byte), ('variant',c_byte), ('xx1',c_byte), ('score',c_int), ('spare',c_void_p), ] def __eq__(self, other): return isinstance(other, type(self)) and addressof(self) == addressof(other) # As __eq__ was defined on this class, we must provide __hash__ to remain hashable. # The default hash implementation is fine for our purposes. def __hash__(self): return super().__hash__() # constants that can be returned by espeak_callback CALLBACK_CONTINUE_SYNTHESIS=0 CALLBACK_ABORT_SYNTHESIS=1 def encodeEspeakString(text): return text.encode('utf8') def decodeEspeakString(data): return data.decode('utf8') t_espeak_callback=CFUNCTYPE(c_int,POINTER(c_short),c_int,POINTER(espeak_EVENT)) @t_espeak_callback def callback(wav,numsamples,event): try: global player, isSpeaking, _numBytesPushed if not isSpeaking: return CALLBACK_ABORT_SYNTHESIS indexes = [] for e in event: if e.type==espeakEVENT_MARK: indexNum = int(decodeEspeakString(e.id.name)) # e.audio_position is ms since the start of this utterance. # Convert to bytes since the start of the utterance. BYTES_PER_SAMPLE = 2 MS_PER_SEC = 1000 bytesPerMS = player.samplesPerSec * BYTES_PER_SAMPLE // MS_PER_SEC indexByte = e.audio_position * bytesPerMS # Subtract bytes in the utterance that have already been handled # to give us the byte offset into the samples for this callback. indexByte -= _numBytesPushed indexes.append((indexNum, indexByte)) elif e.type==espeakEVENT_LIST_TERMINATED: break if not wav: player.idle() onIndexReached(None) isSpeaking = False return CALLBACK_CONTINUE_SYNTHESIS wav = string_at(wav, numsamples * sizeof(c_short)) if numsamples>0 else b"" prevByte = 0 for indexNum, indexByte in indexes: player.feed(wav[prevByte:indexByte], onDone=lambda indexNum=indexNum: onIndexReached(indexNum)) prevByte = indexByte if not isSpeaking: return CALLBACK_ABORT_SYNTHESIS player.feed(wav[prevByte:]) _numBytesPushed += len(wav) return CALLBACK_CONTINUE_SYNTHESIS except: log.error("callback", exc_info=True) class BgThread(threading.Thread): def __init__(self): super().__init__(name=f"{self.__class__.__module__}.{self.__class__.__qualname__}") self.setDaemon(True) def run(self): global isSpeaking while True: func, args, kwargs = bgQueue.get() if not func: break try: func(*args, **kwargs) except: log.error("Error running function from queue", exc_info=True) bgQueue.task_done() def _execWhenDone(func, *args, mustBeAsync=False, **kwargs): global bgQueue if mustBeAsync or bgQueue.unfinished_tasks != 0: # Either this operation must be asynchronous or There is still an operation in progress. # Therefore, run this asynchronously in the background thread. bgQueue.put((func, args, kwargs)) else: func(*args, **kwargs) def _speak(text): global isSpeaking, _numBytesPushed uniqueID=c_int() # if eSpeak was interupted while speaking ssml that changed parameters such as pitch, # It may not reset those runtime values back to the user-configured values. # Therefore forcefully cause eSpeak to reset its parameters each time beginning to speak again after not speaking. if not isSpeaking: espeakDLL.espeak_ng_Cancel() isSpeaking = True _numBytesPushed = 0 # eSpeak can only process compound emojis when using a UTF8 encoding text=text.encode('utf8',errors='ignore') flags = espeakCHARS_UTF8 | espeakSSML | espeakPHONEMES return espeakDLL.espeak_Synth(text,0,0,0,0,flags,byref(uniqueID),0) def speak(text): global bgQueue _execWhenDone(_speak, text, mustBeAsync=True) def stop(): global isSpeaking, bgQueue # Kill all speech from now. # We still want parameter changes to occur, so requeue them. params = [] try: while True: item = bgQueue.get_nowait() if item[0] != _speak: params.append(item) bgQueue.task_done() except queue.Empty: # Let the exception break us out of this loop, as queue.empty() is not reliable anyway. pass for item in params: bgQueue.put(item) isSpeaking = False player.stop() def pause(switch): global player player.pause(switch) def setParameter(param,value,relative): _execWhenDone(espeakDLL.espeak_SetParameter,param,value,relative) def getParameter(param,current): return espeakDLL.espeak_GetParameter(param,current) def getVoiceList(): voices=espeakDLL.espeak_ListVoices(None) voiceList=[] for voice in voices: if not voice: break voiceList.append(voice.contents) return voiceList def getCurrentVoice(): voice = espeakDLL.espeak_GetCurrentVoice() if voice: return voice.contents else: return None def setVoice(voice): # For some weird reason, espeak_EspeakSetVoiceByProperties throws an integer divide by zero exception. setVoiceByName(voice.identifier) def setVoiceByName(name): _execWhenDone(espeakDLL.espeak_SetVoiceByName,encodeEspeakString(name)) def _setVoiceAndVariant(voice=None, variant=None): v=getCurrentVoice() res = decodeEspeakString(v.identifier).split("+") if not voice: voice = res[0] if not variant: if len(res) == 2: variant = res[1] else: variant = "none" if variant == "none": espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice)) else: try: espeakDLL.espeak_SetVoiceByName(encodeEspeakString("%s+%s" % (voice, variant))) except: espeakDLL.espeak_SetVoiceByName(encodeEspeakString(voice)) def setVoiceAndVariant(voice=None, variant=None): _execWhenDone(_setVoiceAndVariant, voice=voice, variant=variant) def _setVoiceByLanguage(lang): v=espeak_VOICE() lang=lang.replace('_','-') if lang[:2] == 'ja': lang = 'en-us' v.languages=encodeEspeakString(lang) try: espeakDLL.espeak_SetVoiceByProperties(byref(v)) except: v.languages=encodeEspeakString("en") espeakDLL.espeak_SetVoiceByProperties(byref(v)) def setVoiceByLanguage(lang): _execWhenDone(_setVoiceByLanguage, lang) def espeak_errcheck(res, func, args): if res != EE_OK: raise RuntimeError("%s: code %d" % (func.__name__, res)) return res def initialize(indexCallback=None): """ @param indexCallback: A function which is called when eSpeak reaches an index. It is called with one argument: the number of the index or C{None} when speech stops. """ global espeakDLL, bgThread, bgQueue, player, onIndexReached espeakDLL = cdll.LoadLibrary(os.path.join(globalVars.appDir, "synthDrivers", "espeak.dll")) espeakDLL.espeak_Info.restype=c_char_p espeakDLL.espeak_Synth.errcheck=espeak_errcheck espeakDLL.espeak_SetVoiceByName.errcheck=espeak_errcheck espeakDLL.espeak_SetVoiceByProperties.errcheck=espeak_errcheck espeakDLL.espeak_SetParameter.errcheck=espeak_errcheck espeakDLL.espeak_Terminate.errcheck=espeak_errcheck espeakDLL.espeak_ListVoices.restype=POINTER(POINTER(espeak_VOICE)) espeakDLL.espeak_GetCurrentVoice.restype=POINTER(espeak_VOICE) espeakDLL.espeak_SetVoiceByName.argtypes=(c_char_p,) eSpeakPath = os.path.join(globalVars.appDir, "synthDrivers") sampleRate = espeakDLL.espeak_Initialize( AUDIO_OUTPUT_SYNCHRONOUS, 300, os.fsencode(eSpeakPath), # #10607: ensure espeak does not exit NVDA's process on errors such as the espeak path being invalid. espeakINITIALIZE_DONT_EXIT ) if sampleRate <= 0: raise OSError(f"espeak_Initialize failed with code {sampleRate}. Given Espeak data path of {eSpeakPath}") player = nvwave.WavePlayer( channels=1, samplesPerSec=sampleRate, bitsPerSample=16, outputDevice=config.conf["speech"]["outputDevice"], buffered=True ) onIndexReached = indexCallback espeakDLL.espeak_SetSynthCallback(callback) bgQueue = queue.Queue() bgThread=BgThread() bgThread.start() def terminate(): global bgThread, bgQueue, player, espeakDLL , onIndexReached stop() bgQueue.put((None, None, None)) bgThread.join() espeakDLL.espeak_Terminate() bgThread=None bgQueue=None player.close() player=None espeakDLL=None onIndexReached = None def info(): # Python 3.8: a path string must be specified, a NULL is fine when what we need is version string. return espeakDLL.espeak_Info(None).decode() def getVariantDict(): dir = os.path.join(globalVars.appDir, "synthDrivers", "espeak-ng-data", "voices", "!v") # Translators: name of the default espeak varient. variantDict={"none": pgettext("espeakVarient", "none")} for fileName in os.listdir(dir): absFilePath = os.path.join(dir, fileName) if os.path.isfile(absFilePath): # In python 3, open assumes the default system encoding by default. # This fails if Windows' "use Unicode UTF-8 for worldwide language support" option is enabled. # The expected encoding is unknown, therefore use latin-1 to stay as close to Python 2 behavior as possible. try: with open(absFilePath, 'r', encoding="latin-1") as file: for line in file: if line.startswith('name '): temp=line.split(" ") if len(temp) ==2: name=temp[1].rstrip() break name=None except: log.error("Couldn't parse espeak variant file %s" % fileName, exc_info=True) continue if name is not None: variantDict[fileName]=name return variantDict
python
import mock import pytest from django import forms from django.db import models from filer.models import Image from barbeque.filer import FilerFileField, AdminFileFormField from barbeque.tests.factories.filer import ImageFactory class FileModel(models.Model): file1 = FilerFileField(null=True) file2 = FilerFileField(blank=True) file3 = FilerFileField() @pytest.mark.django_db class TestAdminFileFormField: def test_super_not_clean(self): field = AdminFileFormField( mock.Mock(), Image.objects.all(), 'id', required=False) assert field.clean('') is None def test_without_alt_text_disabled(self): image = ImageFactory.create(default_alt_text=None) field = AdminFileFormField( mock.Mock(), Image.objects.all(), 'id', alt_text_required=False) assert isinstance(field.clean(str(image.pk)), Image) def test_without_alt_text_enabled(self): image = ImageFactory.create(default_alt_text=None) field = AdminFileFormField(mock.Mock(), Image.objects.all(), 'id') with pytest.raises(forms.ValidationError): field.clean(str(image.pk)) def test_with_alt_text_enabled(self): image = ImageFactory.create(default_alt_text='Test') field = AdminFileFormField(mock.Mock(), Image.objects.all(), 'id') assert isinstance(field.clean(str(image.pk)), Image) def test_extensions_invalid_disabled(self): image = ImageFactory.create(default_alt_text='Test') field = AdminFileFormField( mock.Mock(), Image.objects.all(), 'id') assert isinstance(field.clean(str(image.pk)), Image) def test_extensions_valid_enabled(self): image = ImageFactory.create(default_alt_text='Test') field = AdminFileFormField( mock.Mock(), Image.objects.all(), 'id', extensions=['jpg', 'gif']) assert isinstance(field.clean(str(image.pk)), Image) def test_extensions_invalid_enabled(self): image = ImageFactory.create(default_alt_text='Test') field = AdminFileFormField( mock.Mock(), Image.objects.all(), 'id', extensions=['png', 'gif']) with pytest.raises(forms.ValidationError): field.clean(str(image.pk)) class TestFilerFileField: def test_formfield(self): form_class = forms.models.modelform_factory(FileModel, fields='__all__') assert isinstance(form_class().fields['file1'], AdminFileFormField) @pytest.mark.django_db def test_blank_null(self): assert FileModel._meta.get_field('file1').blank is True assert FileModel._meta.get_field('file1').null is True assert FileModel._meta.get_field('file2').blank is True assert FileModel._meta.get_field('file2').null is True assert FileModel._meta.get_field('file3').blank is False assert FileModel._meta.get_field('file3').null is False
python
import shutil from pathlib import Path import dask.dataframe as dd import numpy as np import pandas as pd from bokeh.io import export_png from bokeh.io import output_file from bokeh.models import Column from bokeh.models import Div from bokeh.plotting import figure from bokeh.plotting import save from sid.colors import get_colors from sid.statistics import calculate_r_effective from sid.statistics import calculate_r_zero def visualize_simulation_results( data, outdir_path, infection_vars, background_vars, window_length=7, ): """Visualize the results one or more simulation results. Args: data (str, pandas.DataFrame, Path, list): list of paths to the pickled simulation results. outdir_path (path): path to the folder where to save the results. Careful, all contents are removed when the function is called. infection_vars (list): list of infection rates to plot background_vars (list): list of background variables by whose value to group the results. Have to be present in all simulation results. window_length (int): How many dates to use for the reproduction numbers. """ colors = get_colors("categorical", 12) if isinstance(background_vars, str): background_vars = [background_vars] outdir_path = Path(outdir_path) datasets = [data] if isinstance(data, (str, pd.DataFrame, Path)) else data datasets = [ Path(path_or_df) if isinstance(path_or_df, str) else path_or_df for path_or_df in datasets ] _create_folders(outdir_path, background_vars) rates = _create_rates_for_all_data( datasets, infection_vars, background_vars, window_length, ) for bg_var in ["general"] + background_vars: if bg_var == "general": title = "Rates in the General Population" else: title = f"Rates According to {_nice_str(bg_var)}" rate_plots = _create_rate_plots(rates[bg_var], colors, title) title_element = Div(text=title, style={"font-size": "150%"}) _export_plots_and_layout( title=title_element, plots=rate_plots, outdir_path=outdir_path / bg_var, ) def _create_folders(outdir_path, background_vars): if outdir_path.exists(): shutil.rmtree(outdir_path) outdir_path.mkdir() for var in ["general"] + background_vars: outdir_path.joinpath(var).mkdir() def _create_rates_for_all_data( datasets, infection_vars, background_vars, window_length ): """Create the statistics for each dataset and merge them into one dataset. Args: datasets (list): list of str, Paths to pickled DataFrames or pd.DataFrames. infection_vars (list): list of infection rates to plot background_vars (list): list of background variables by whose value to group the results. Have to be present in all simulation results. window_length (int): How many dates to use for the reproduction numbers. rates (pandas.DataFrame): DataFrame with the dates as index. The columns are a MultiIndex with four levels: The outermost is the "bg_var" ("general" for the overall rate). The next is the "rate" (e.g. the infectious rate or r zero), then "bg_value", the value of the background variable and last "data_id". """ name_to_statistics = {} for i, df_or_path in enumerate(datasets): vars_for_r_zero = ["immunity", "n_has_infected", "cd_infectious_false"] keep_vars = sorted( set(infection_vars + background_vars + vars_for_r_zero + ["date"]) ) df_name, df = _load_data(df_or_path, keep_vars, i) name_to_statistics[df_name] = _create_statistics( df=df, infection_vars=infection_vars, background_vars=background_vars, window_length=window_length, ) rates = pd.concat(name_to_statistics, axis=1, names=["data_id"]) order = ["bg_var", "rate", "bg_value", "data_id"] rates = rates.reorder_levels(order=order, axis=1) return rates def _load_data(df_or_path, keep_vars, i): if isinstance(df_or_path, pd.DataFrame): df = df_or_path[keep_vars] df_name = i elif isinstance(df_or_path, Path): df = dd.read_parquet(df_or_path, engine="fastparquet")[keep_vars].compute() df_name = df_or_path.stem else: raise NotImplementedError return df_name, df def _create_statistics(df, infection_vars, background_vars, window_length): """Calculate the infection rates and reproduction numbers for each date. Args: df (pandas.DataFrame): The simulation results. infection_vars (list): list of infection rates to plot background_vars (list): list of background variables by whose value to group the results. Have to be present in all simulation results. window_length (int): How many dates to use for the reproduction numbers. Returns: rates (pandas.DataFrame): DataFrame with the statistics of one simulation run. The index are the dates. The columns are a MultiIndex with three levels: The outermost is the "bg_var" ("general" for the overall rate). The next is the "bg_value", the last is the "rate" (e.g. the infectious rate or r zero). """ gb = df.groupby("date") overall = gb.mean()[infection_vars] overall["r_zero"] = gb.apply(calculate_r_zero, window_length) overall["r_effective"] = gb.apply(calculate_r_effective, window_length) # add column levels for later overall.columns.name = "rate" overall = _prepend_column_level(overall, "general", "bg_value") overall = _prepend_column_level(overall, "general", "bg_var") single_df_rates = [overall] for bg_var in background_vars: gb = df.groupby([bg_var, "date"]) infection_rates = gb.mean()[infection_vars].unstack(level=0) r_zeros = gb.apply(calculate_r_zero, window_length).unstack(level=0) r_zeros = _prepend_column_level(r_zeros, "r_zero", "rate") r_eff = gb.apply(calculate_r_effective, window_length).unstack(level=0) r_eff = _prepend_column_level(r_eff, "r_effective", "rate") rates_by_group = pd.concat([infection_rates, r_zeros, r_eff], axis=1) rates_by_group.columns.names = ["rate", "bg_value"] rates_by_group = _prepend_column_level(rates_by_group, bg_var, "bg_var") rates_by_group = rates_by_group.swaplevel("rate", "bg_value", axis=1) single_df_rates.append(rates_by_group) rates = pd.concat(single_df_rates, axis=1).fillna(0) return rates def _prepend_column_level(df, key, name): prepended = pd.concat([df], keys=[key], names=[name], axis=1) return prepended def _create_rate_plots(rates, colors, title): """Plot all rates for a single background variable Args: rates (pandas.DataFrame): DataFrame with the dates as index. The columns are a MultiIndex with three levels: The outermost is the variable name (e.g. infectious or r_zero). The next are the values the background variable can take, the last "data_id". colors (list): list of colors to use. title (str): the plot title will be the name of the rate plus this string. Returns: plots (list): list of bokeh plots. """ vars_to_plot = rates.columns.levels[0] plots = [] full_range_vars = [ "ever_infected", "immunity", "symptomatic_among_infectious", ] for var, color in zip(vars_to_plot, colors): y_range = (0, 1) if var in full_range_vars else None bg_values = rates[var].columns.unique().levels[0] for bg_val in bg_values: plot_title = f"{_nice_str(var)} {title}" if bg_val != "general": plot_title += f": {bg_val}" p = _plot_rates( rates=rates[var][bg_val], title=plot_title, color=color, y_range=y_range, ) p.name = var if bg_val == "general" else f"{var}_{bg_val.replace(' ', '')}" plots.append(p) return plots def _plot_rates(rates, title, color, y_range): """Plot the rates over time. Args: rates (DataFrame): the index are the x values, the values the y values. Every column is plotted as a separate line. color (str): color. title (str): plot title. y_range (tuple or None): range of the y axis. Returns: p (bokeh figure) """ xs = rates.index p = figure( tools=[], plot_height=400, plot_width=800, title=title, y_range=y_range, x_axis_type="datetime", ) # plot the median p.line(x=xs, y=rates.median(axis=1), alpha=1, line_width=2.75, line_color=color) # plot the confidence band q5 = rates.apply(np.nanpercentile, q=5, axis=1) q95 = rates.apply(np.nanpercentile, q=95, axis=1) p.varea(x=xs, y1=q95, y2=q5, alpha=0.2, color=color) # add the trajectories for var in rates: p.line(x=xs, y=rates[var], line_width=1, line_color=color, alpha=0.3) p = _style(p) return p def _export_plots_and_layout(title, plots, outdir_path): """Save all plots as png and the layout as html. Args: title (bokeh.Div): title element. plots (list): list of bokeh plots outdir_path (pathlib.Path): base path to which to append the plot name to build the path where to save each plot. """ for p in plots: outpath = outdir_path / f"{p.name}.png" output_file(outpath) export_png(p, filename=outpath) output_file(outdir_path / "overview.html") save(Column(title, *plots)) def _style(p): gray = "#808080" p.outline_line_color = None p.xgrid.visible = False p.ygrid.visible = False p.axis.minor_tick_line_color = None p.axis.axis_line_color = gray p.axis.major_label_text_color = gray p.axis.major_tick_line_color = gray return p def _nice_str(s): return s.replace("_", " ").title()
python