content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import stiefo
#stiefo.render_screen(["2-", "aus", "bei", "bei t a g", "pro", "pro z e nt"])
#stiefo.render_screen(["2- t", "2- z", "aus", "mit g e b", "der", "trans p o t", "die"])
#stiefo.render_screen(["der", "man", "ist", "nicht", "3b e0", "w e@ lich", "f a", "f schaft",
# "ei g schaft", "auf g a b", "be l a st"])
#stiefo.render_screen(["ver r a t", "ver b i nd", "für", "nach g e b", "gegen"])
#stiefo.render_screen(["endlich", "ge l e g lich", "w lich", "w lich*"])
#stiefo.render_screen(['durch', 'durch s', 'durch m e s', 'noch m a l', 'jedoch', 'deutschland'])
#stiefo.render_screen(['e b {a s}', 'e {a s} b', 'ein {a0}', 'j {a s}', 'nach {a0 r}', 'un 1l {a0 r}'])
#stiefo.render_screen(['l e b {a}(-0.4,0)', 'un 1l {a r}(-0.1,0)', '+3@0 {a0 r}(0.4,-0.25)', 'ge w i {a0}(-0.3,0) s', 'm {a}(-0.4,0) r'])
#stiefo.render_screen(['selbstverständlich', 'staatlich', 'stattlich', 'selb'])
#stiefo.render_screen(['w', 'w*', 'w4', 'w*4', 'ei g m4', 'ei g 1m4 e r', 'm4 s i cht',
# 'w4 lich*', '1z4', '1f4 ei', 'zu 1k4', 'vor {w**4}'])
stiefo.render_screen(['bund', 'ober', 'gleich', 'viel ei', 'viel fach',
'ver e@ gleich {A r}(0.125,0)', 'letzt lich*',
'wesen lich*', 'ei g tüm lich*', 'um s i cht', 'trotz'])
#stiefo.render_screen(['m e t', 'm', 'm*', 'm* e r', 'm* {a0}(0,0.3)',
# 't u r 1mm', 'mm'])
#stiefo.render_screen(['s e r', 's', 's*'])
#stiefo.render_screen(['t e t', '3@^0', 'ander', 'ich', 'ein ander'])
#stiefo.render_screen(['voll', 's i n voll', 'voll k o m', 'voll z i {a0 r}(-0.5,0)', 'nach u @^*00 i z i barkeit'])
#stiefo.render_screen(['s i n los', 'h a m los', 'b ei sp i l los', 'los g e', 'los l a s'])
#stiefo.render_screen(['ge l e g heit', 'k I nd heit', 'f heit', 'einheit'])
#stiefo.render_screen(['außerordentlich', 'mehr', 'sicher', '1s* lich', 'm* heit*', '1s* heit*'])
#stiefo.render_screen(['bereit', 'bis', 'bin', 'übrig', 'aber', 'überzeug', 'überdies'])
#stiefo.render_screen(['fest', 'vom', 'fast', 'freund'])
#stiefo.render_screen(['ungefähr', 'immer', 'zwar', 'euer', 'sofort', 'fort s e z ung', 'digital', 'digital i s i r', 'digital a z ei g'])
#stiefo.render_screen(['all', 'allzu', 'allein', 'allgemein', 'allerdings'])
| python |
import logging
from dht.node import SelfNode
from dht.settings import BUCKET_SIZE, BUCKET_REPLACEMENT_CACHE_SIZE
class BucketHasSelfException(Exception):
pass
class NodeNotFoundException(Exception):
pass
class NodeAlreadyAddedException(Exception):
pass
class BucketIsFullException(Exception):
pass
class Bucket:
""" A Bucket is a list of sorted Nodes by last_seen. """
def __init__(self,
nodes_size=BUCKET_SIZE,
replacement_cache_size=BUCKET_REPLACEMENT_CACHE_SIZE):
""" Init the Bucket. """
self.nodes = []
self.nodes_size = nodes_size
self.replacement_cache = []
self.replacement_cache_size = replacement_cache_size
self.has_self = False
def add_node(self, node):
""" Add a node to this bucket. """
try:
self.find_node(node.key)
raise NodeAlreadyAddedException('This node is already in this Bucket.')
except NodeNotFoundException:
pass
if self.has_self:
raise BucketHasSelfException('This Bucket has SelfNode, split this Bucket.')
if isinstance(node, SelfNode):
self.has_self = True
if len(self.nodes) < self.nodes_size:
self.nodes.append(node)
self.sort()
elif len(self.replacement_cache) < self.replacement_cache_size:
self.add_replacement(node)
else:
raise BucketIsFullException()
def find_node(self, key):
""" Find and return a Node by key in this Bucket. """
try:
return next(node for node in self.nodes if node.key == key)
except StopIteration:
raise NodeNotFoundException()
def remove_node(self, key):
""" Remove and return a Node from this Bucket. """
(node, index) = next(
(self.nodes[i], i) for i in range(len(self.nodes)) if self.nodes[i].key == key)
del self.nodes[index]
return node
def sort(self):
""" Sort the nodes of this Bucket by last_seen. """
self.nodes.sort(key=lambda node: node.last_seen)
def add_replacement(self, node):
self.replacement_cache.append(node)
def get_unconnected_nodes(self) -> list:
""" Get the unconnected nodes in this Bucket. """
unconnected = []
for node in self.nodes:
if not node.is_connected():
unconnected.append(node)
for node in self.replacement_cache:
if not node.is_connected():
unconnected.append(node)
return unconnected
| python |
from http.server import BaseHTTPRequestHandler, HTTPServer
import json
import argparse
import urllib.parse as urlparse
from osim.env import RunEnv
import numpy as np
from utils import Scaler
import multiprocessing
import pickle
PORT_NUMBER = 8018
def mp_test(s):
p = multiprocessing.Pool(2)
tras = p.map(run_episode_from_last_checkpoint, [(s, 'a')]*4)
p.close()
p.join()
return tras
def dump_episodes(chk_dir, episodes, cores):
scaler_file = chk_dir + '/scaler_latest'
scaler = pickle.load(open(scaler_file, 'rb'))
p = multiprocessing.Pool(cores, maxtasksperchild=1)
tras = p.map(run_episode_from_last_checkpoint,
[(scaler, chk_dir)]*episodes)
p.close()
p.join()
episodes_file = chk_dir + '/episodes_latest'
pickle.dump(tras, open(episodes_file, 'wb'))
def run_episode_from_last_checkpoint(pickled_object):
"""
Load the last checkpoint from the current folder, and using that
checkpoint run episodes parallely to collect the episodes
Args:
pickled_object = (scaler, chk_dir)
scaler: scaler object, used to scale/offset each observation dimension
to a similar range
chk_dir: the logger object
Returns: 4-typle of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: useful for training scaler, shape = (episode len, obs_dim)
"""
import tensorflow as tf
scaler = pickled_object[0]
chkp_dir = pickled_object[1]
sess = tf.Session()
# chkp_dir = '/home/ubuntu/pat-cody/log-files/RunEnv_test2/Sep-02_11:57:45'
latest_chkp_file = tf.train.latest_checkpoint(chkp_dir, latest_filename='policy_checkpoint')
meta_graph = tf.train.import_meta_graph(latest_chkp_file + '.meta')
print(latest_chkp_file)
meta_graph.restore(sess, latest_chkp_file)
obs_ph = tf.get_collection('obs_ph_chk')[0]
sampled_act = tf.get_collection('sampled_act_chk')[0]
env = RunEnv(visualize=False)
obs = env.reset(difficulty=2)
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
scale[-1] = 1.0
offset[-1] = 0.0
while not done:
obs = np.asarray(obs)
obs = obs.astype(np.float64).reshape((1, -1))
obs = np.append(obs, [[step]], axis=1)
unscaled_obs.append(obs)
obs = (obs - offset) * scale
observes.append(obs)
action = get_action_from_obs(sess, obs_ph, sampled_act, obs)
actions.append(action)
obs, reward, done, _ = env.step(action[0])
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3
trajectory = {'observes': np.concatenate(observes),
'actions': np.concatenate(actions),
'rewards': np.array(rewards, dtype=np.float64),
'unscaled_obs': np.concatenate(unscaled_obs)}
return trajectory
def get_action_from_obs(sess, obs_ph, sampled_act, obs):
feed_dict = {obs_ph: obs}
return sess.run(sampled_act, feed_dict=feed_dict).reshape((1, -1)).astype(np.float64)
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
if '/ping' in self.path:
print(self.path)
parsed_url = urlparse.urlparse(self.path)
print(urlparse.parse_qs(parsed_url.query))
print('lmao it worked')
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.end_headers()
self.wfile.write(bytes(json.dumps({'anil': 'tanu'}), 'utf8'))
return
if '/get_episodes' in self.path:
parsed_url = urlparse.urlparse(self.path)
query = urlparse.parse_qs(parsed_url.query)
episodes = int(query['episodes'][0])
chk_dir = query['chk_dir'][0]
cores = int(query['cores'][0])
print(chk_dir)
print(episodes)
dump_episodes(chk_dir, episodes, cores)
# s = Scaler(42)
# traj = mp_test(s)
# pickle.dump(traj, open('traj.pkl', 'wb'))
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.end_headers()
self.wfile.write(bytes(json.dumps({'Success': 'OK'}), 'utf8'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--listen', type=str, default='127.0.0.1')
parser.add_argument('--port', type=int, default=PORT_NUMBER)
args = parser.parse_args()
server = HTTPServer((args.listen, args.port), myHandler)
print('Server started on', args)
server.serve_forever()
| python |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
s = pd.Series(np.random.normal(10, 8, 20))
s.plot(style='ko-', alpha=0.4, label='Series plotting')
plt.legend()
plt.savefig('pandasplot.png')
| python |
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import numpy as np
import popart
import popart._internal.ir as _ir
import pytest
def test_tensor_type_creation():
""" Test that we can create a popart._internal.ir.TensorType enum. """
_ir.TensorType.ActGrad
_ir.TensorType.Const
_ir.TensorType.Stream
_ir.TensorType.Unknown
_ir.TensorType.Variable
_ir.TensorType.N
def test_variable_update_type_creation():
""" Test that we can create a popart._internal.ir.VariableUpdateType enum.
"""
_ir.VariableUpdateType.None_
_ir.VariableUpdateType.Gradient
_ir.VariableUpdateType.Copy
def test_tensor_construction():
""" Test that we can construct a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
tId = "t"
tType = _ir.TensorType.ActGrad
dc = _ir.DebugContext()
_ = _ir.Tensor(tId, tType, g)
_ = _ir.Tensor(tId, tType, g, dc)
def test_tensor_str():
""" Test the str() method of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
Tensor = lambda id: _ir.Tensor(id, _ir.TensorType.ActGrad, g)
assert Tensor("t0").str() == "t0"
assert Tensor("t1").str() == "t1"
def test_tensor_clone():
""" Test the clone() method of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
t0 = _ir.Tensor("t0", _ir.TensorType.ActGrad, g)
t1 = t0.clone(g)
assert f"clone_{t0.str()}" == t1.str()
assert t0.info == t1.info
def test_tensor_tensor_type0():
""" Test the tensorType() method of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
Tensor = lambda id, type: _ir.Tensor(id, type, g)
tTypes = [_ir.TensorType.ActGrad, _ir.TensorType.Const]
for i, tType in enumerate(tTypes):
assert Tensor(f"t{i}", tType).tensorType() == tType
def test_tensor_tensor_type1():
""" Test the tensor_type() method of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
Tensor = lambda id, type: _ir.Tensor(id, type, g)
tTypes = {_ir.TensorType.ActGrad: "ActGrad", _ir.TensorType.Const: "Const"}
for i, (tType, tTypeStr) in enumerate(tTypes.items()):
assert Tensor(f"t{i}", tType).tensor_type() == tTypeStr
def test_tensor_set_tensor_type():
""" Test the setTensorType() method of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
tTypeOld = _ir.TensorType.ActGrad
tTypeNew = _ir.TensorType.Const
t = _ir.Tensor("t", tTypeOld, g)
assert t.tensorType() == tTypeOld
t.setTensorType(tTypeNew)
assert t.tensorType() == tTypeNew
def test_tensor_get_set_replicated_streaming_mode():
""" Test the getReplicatedStreamMode() and setReplicatedStreamMode() methods
of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
assert t.getReplicatedStreamMode(
) == _ir.Tensor.ReplicatedStreamMode.Replicate
t.setReplicatedStreamMode(_ir.Tensor.ReplicatedStreamMode.Broadcast)
assert t.getReplicatedStreamMode(
) == _ir.Tensor.ReplicatedStreamMode.Broadcast
def test_tensor_has_tensor_data():
""" Test the hasTensorData() method of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
assert t.hasTensorData() == False
buffer = np.random.rand(2, 3, 4)
tInfo = _ir.TensorInfo(_ir.DataType.FLOAT, buffer.shape)
t.setTensorData(tInfo, buffer)
assert t.hasTensorData() == True
def test_tensor_tensor_data():
""" Test the tensorData() and setTensorData() methods of a
popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
with pytest.raises(popart.popart_exception) as e_info:
t.tensorData()
assert e_info.value.args[0] == "Data not set for t"
with pytest.raises(popart.popart_exception) as e_info:
t.tensorData_const()
assert e_info.value.args[0] == "Data not set for t"
buffer = np.random.rand(2, 3, 4)
tInfo = _ir.TensorInfo(_ir.DataType.FLOAT, buffer.shape)
t.setTensorData(tInfo, buffer)
# TODO(T42205): Test that the returned tensor data matches the one that was
# set.
t.tensorData()
t.tensorData_const()
def test_tensor_get_graph():
""" Test the getGraph() method of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
gFromTensor = t.getGraph()
assert g.id == gFromTensor.id
gFromTensor = t.getGraph_const()
assert g.id == gFromTensor.id
def test_tensor_get_ir():
""" Test the getIr() method of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
irFromTensor = t.getIr()
assert g.id == irFromTensor.getAllGraphs()[1].id
irFromTensor = t.getIr_const()
assert g.id == irFromTensor.getAllGraphs()[1].id
def test_tensor_has_virtual_graph_id():
""" Test the hasVirtualGraphId() method of a popart._internal.ir.Tensor
object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
# TODO(T42205): Test that hasVirtualGraphId() returns the expected values.
t.hasVirtualGraphId()
def test_tensor_get_virtual_graph_id():
""" Test the getVirtualGraphId() method of a popart._internal.ir.Tensor
object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
with pytest.raises(popart.popart_exception) as e_info:
t.getVirtualGraphId()
assert e_info.value.args[0] == (
"Invalid call to getVirtualGraphId, Tensor does not have one")
# TODO(T42205): Test that getVirtualGraphId() returns the expected values.
def test_tensor_get_virtual_graph_id_unsafe():
""" Test the getVirtualGraphIdUnsafe() method of a
popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
# TODO(T42205): Test that getVirtualGraphIdUnsafe() returns the expected
# values.
t.getVirtualGraphIdUnsafe()
def test_tensor_get_batch_axis():
""" Test the getBatchAxis() method of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
assert t.getBatchAxis() == -1
# TODO(T42205): Test that getBatchAxis() returns the expected values when
# the tensor has producers/consumers.
def test_tensor_get_debug_info():
""" Test the getDebugInfo() method of a popart._internal.ir.Tensor object.
"""
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
_ = t.getDebugInfo()
def test_tensor_id():
""" Test the id attribute of a popart._internal.ir.Tensor object. """
ir = _ir.Ir()
g = ir.createGraph("g")
t = _ir.Tensor("t", _ir.TensorType.ActGrad, g)
assert t.id == "t"
def test_replicated_stream_mode_creation():
""" Test that we can create a
popart._internal.ir.Tensor.ReplicatedStreamMode enum.
"""
_ir.Tensor.ReplicatedStreamMode.Replicate
_ir.Tensor.ReplicatedStreamMode.Broadcast
# TODO(T42205): Write unit test for the following methods and attributes of the
# Tensor class:
# - Tensor.isUnmodifiable()
# - Tensor.isCheckpointTensor()
# - Tensor.isImplicitRecomputeTensor()
# - Tensor.isRestoreInplaceTensor()
# - Tensor.idIncludesPrefix()
# - Tensor.isOptimizerTensor()
# - Tensor.isRemoteArgTensor()
# - Tensor.isRandomSeedTensor()
# - Tensor.isOptimizerStateTensor()
# - Tensor.isAccumulatorTensor()
# - Tensor.isHostLoadTensor()
# - Tensor.isWeightTensor()
# - Tensor.isAnchored()
# - Tensor.isRootAnchor()
# - Tensor.anyAlias()
# - Tensor.associatedOps()
# - Tensor.getVirtualGraphIdAndTileSet()
# - Tensor.getVirtualGraphIdAndTileSetUnsafe()
# - Tensor.consumersAllPreLoss()
# - Tensor.isModified()
# - Tensor.isAliased()
# - Tensor.getDataViaGraphTraversal()
# - Tensor.consumers
# - Tensor.info
# - Tensor.tensorLocationInfo
# - Tensor.inputSettings
| python |
SPOTIFY_USERS = {
'<user_name_1>': {
'client_id': '<client_id>',
'client_secret': '<client_secret>',
'redirect_uri': '<redirect_uri>',
'user_name': '<user_name>',
},
'<user_name_2>': {
'client_id': '<client_id>',
'client_secret': '<client_secret>',
'redirect_uri': '<redirect_uri>',
'user_name': '<user_name>',
},
}
POSTGRES_CONNECTION_STRING = 'postgres://<user>:<pass>@<host>:<port>/<db>'
| python |
from sisense.resource import Resource
class Folder(Resource):
def get(self, oid: str) -> Resource:
"""
Get a specific folder.
:param oid: (str) Folder's ID.
:return: (Folder)
"""
content = self._api.get(f'folders/{oid}')
return Folder(self._api, content)
def all(self) -> list:
"""
Get all folders.
:return: (list) List of folder objects.
"""
content = self._api.get('folders')
results = [Folder(self._api, rjson) for rjson in content]
return results
def create(self, name: str, parent: str = None) -> Resource:
"""
Create a new folder.
:param name: (str) Folder's name.
:param parent: (str, default None) Parent folder's ID.
:return: (Folder) The new folder.
"""
data = {'name': name}
if parent:
data['parentId'] = parent
content = self._api.post('folders', data=data)
return Folder(self._api, content)
def delete(self):
"""Delete the current folder."""
self._api.delete(f'folders/{self.oid}')
| python |
from clearskies.secrets.additional_configs import MySQLConnectionDynamicProducerViaSSHCertBastion as Base
from pathlib import Path
import socket
import subprocess
import os
import time
class MySQLConnectionDynamicProducerViaSSHCertBastion(Base):
_config = None
_boto3 = None
def __init__(
self,
producer_name=None,
bastion_region=None,
bastion_name=None,
bastion_host=None,
bastion_username=None,
public_key_file_path=None,
local_proxy_port=None,
cert_issuer_name=None,
database_host=None,
database_name=None
):
# not using kwargs because I want the argument list to be explicit
self.config = {
'producer_name': producer_name,
'bastion_host': bastion_host,
'bastion_region': bastion_region,
'bastion_name': bastion_name,
'bastion_username': bastion_username,
'public_key_file_path': public_key_file_path,
'local_proxy_port': local_proxy_port,
'cert_issuer_name': cert_issuer_name,
'database_host': database_host,
'database_name': database_name,
}
def provide_connection_details(self, environment, secrets, boto3):
self._boto3 = boto3
return super().provide_connection_details(environment, secrets)
def _get_bastion_host(self, environment):
bastion_host = self._fetch_config(environment, 'bastion_host', 'akeyless_mysql_bastion_host', default='')
bastion_name = self._fetch_config(environment, 'bastion_name', 'akeyless_mysql_bastion_name', default='')
if bastion_host:
return bastion_host
if bastion_name:
bastion_region = self._fetch_config(environment, 'bastion_region', 'akeyless_mysql_bastion_region')
return self._public_ip_from_name(bastion_name, bastion_region)
raise ValueError(
f"I was asked to connect to a database via an AKeyless dynamic producer through an SSH bastion with certificate auth, but I'm missing some configuration. I need either the bastion host or the name of the instance in AWS. These can be set in the call to `clearskies.backends.akeyless_aws.mysql_connection_dynamic_producer_via_ssh_cert_bastion()` by providing the 'bastion_host' or 'bastion_name' argument, or by setting an environment variable named 'akeyless_mysql_bastion_host' or 'akeyless_mysql_bastion_name'."
)
def _public_ip_from_name(self, bastion_name, bastion_region):
ec2 = self._boto3.client('ec2', region_name=bastion_region)
response = ec2.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': [bastion_name]
},
{
'Name': 'instance-state-name',
'Values': ['running']
},
],
)
if not response.get('Reservations'):
raise ValueError(
f"Could not find a running instance with the designated bastion name, '{bastion_name}' in region '{bastion_region}'"
)
if not response.get('Reservations')[0].get('Instances'):
raise ValueError(
f"Could not find a running instance with the designated bastion name, '{bastion_name}' in region '{bastion_region}'"
)
instance = response.get('Reservations')[0].get('Instances')[0]
if not instance.get('PublicIpAddress'):
raise ValueError(
f"I found the bastion instance with a name of '{bastion_name}' in region '{bastion_region}', but it doesn't have a public IP address"
)
return instance.get('PublicIpAddress')
| python |
"""
Functions and classes for aligning two lists using dynamic programming.
The algorithm is based on on a slight variation of the method given at:
http://www.avatar.se/molbioinfo2001/dynprog/adv_dynamic.html. By default NIST
insertion, deletion and substitution penalties are used.
Author: Herman Kamper
Contact: [email protected]
Date: 2011, 2014, 2015
"""
import numpy as np
#-----------------------------------------------------------------------------#
# DYNAMIC PROGRAMMING CLASSES #
#-----------------------------------------------------------------------------#
class DPEntry:
"""Alignment type ("d", "i", "s", or "m") and an integer score."""
def __init__(self, align="m", score=0):
self.align = align
self.score = score
class DPError(object):
"""
Attributes
----------
n_del : int
n_ins : int
n_sub : int
n_match : int
n_total : int
"""
def __init__(self, n_del=0, n_ins=0, n_sub=0, n_match=0, n_total=0):
self.n_del = n_del
self.n_ins = n_ins
self.n_sub = n_sub
self.n_match = n_match
self.n_total = n_total
def __add__(self, other):
"""Add this DPError to another."""
if type(other) == DPError:
self.n_del += other.n_del
self.n_ins += other.n_ins
self.n_sub += other.n_sub
self.n_match += other.n_match
self.n_total += other.n_total
return self
__radd__ = __add__
__iadd__ = __add__
def __str__(self):
"""Returns a string representation of the alignment error."""
return (
"H = " + str(self.n_match) + ", D = " + str(self.n_del) + ", S = "
+ str(self.n_sub) + ", I = " + str(self.n_ins)+ ", N = " +
str(self.n_total)
)
def get_levenshtein(self):
"""Returns the Levenshtein distance of the alignment."""
return self.n_del + self.n_sub + self.n_ins
def get_accuracy(self):
"""
Calculates the accuracy given the stored errors using the formula:
Accuracy = (Matches - Insertions) / Total
"""
return float(self.n_match - self.n_ins) / self.n_total
def get_wer(self):
"""
Calculates the word error rate (WER) using:
WER = (Substitutions + Deletions + Insertions) / Total
"""
return float(self.n_sub + self.n_del + self.n_ins) / self.n_total
#-----------------------------------------------------------------------------#
# DYNAMIC PROGRAMMING ALIGNMENT FUNCTION #
#-----------------------------------------------------------------------------#
def dp_align(ref_list, test_list, ins_penalty=3, del_penalty=3, sub_penalty=4):
"""
Performs dynamic programming alignment of `ref_list` to `test_list`.
Parameters
----------
ref_list : list
test_list : list
"""
# Initialise the alignment matrix
dp_matrix = np.empty([len(test_list) + 1, len(ref_list) + 1], dtype = object)
for i in range(len(test_list) + 1):
for j in range(len(ref_list) + 1):
dp_matrix[i][j] = DPEntry()
# Initialise the originf
dp_matrix[0][0].score = 0
dp_matrix[0][0].align = "m"
# The first row is all delections:
for j in range(1, len(ref_list) + 1):
dp_matrix[0][j].score = j*del_penalty
dp_matrix[0][j].align = "d"
# Fill dp_matrix
for i in range(1, len(test_list) + 1):
# First column is all insertions
dp_matrix[i][0].score = i*ins_penalty
dp_matrix[i][0].align = "i"
for j in range(1, len(ref_list) + 1):
del_score = dp_matrix[i, j - 1].score + del_penalty
ins_score = dp_matrix[i - 1, j].score + ins_penalty
if test_list[i - 1] == ref_list[j - 1]:
# Considering a match
match_score = dp_matrix[i - 1, j - 1].score
# Test for a match
if match_score <= del_score and match_score <= ins_score:
dp_matrix[i, j].score = match_score
dp_matrix[i, j].align = "m"
# Test for a deletion
elif del_score <= ins_score:
dp_matrix[i, j].score = del_score
dp_matrix[i, j].align = "d"
# Test for an insertion (only option left)
else:
dp_matrix[i, j].score = ins_score
dp_matrix[i, j].align = "i"
else:
# Considering a substitution
sub_score = dp_matrix[i - 1, j - 1].score + sub_penalty
# Test for a substitution
if sub_score < del_score and sub_score <= ins_score:
dp_matrix[i, j].score = sub_score
dp_matrix[i, j].align = "s"
# Test for a deletion
elif del_score <= ins_score:
dp_matrix[i, j].score = del_score
dp_matrix[i, j].align = "d"
# Test for an insertion (only option left)
else:
dp_matrix[i, j].score = ins_score
dp_matrix[i, j].align = "i"
# Perform alignment by tracking through the dp_matrix
dp_errors = DPError()
dp_errors.n_total = len(ref_list)
i = len(test_list)
j = len(ref_list)
while i > 0 or j > 0:
if dp_matrix[i, j].align == "m":
#print test_list[i - 1], ref_list[j - 1]
i -= 1
j -= 1
dp_errors.n_match += 1
elif dp_matrix[i, j].align == "s":
#print test_list[i - 1], ref_list[j - 1]
i -= 1
j -= 1
dp_errors.n_sub += 1
elif dp_matrix[i, j].align == "d":
#print "-", ref_list[j - 1]
j -= 1
dp_errors.n_del += 1
elif dp_matrix[i, j].align == "i":
#print test_list[i - 1], "-"
i -= 1
dp_errors.n_ins += 1
# Return the alignment results
return dp_errors
#-----------------------------------------------------------------------------#
# MAIN FUNCTION #
#-----------------------------------------------------------------------------#
def main():
a = dp_align("recycling", "recycle", ins_penalty=1, del_penalty=1, sub_penalty=1)
print("Levenshtein distance between recycling and recycle: " + str(a.get_levenshtein()))
if __name__ == "__main__":
main()
| python |
from __future__ import absolute_import, division, print_function
import pkgutil
import numpy as np
import glue
def test_histogram_data():
data = glue.core.data.Data(label="Test Data")
comp_a = glue.core.data.Component(np.random.uniform(size=500))
comp_b = glue.core.data.Component(np.random.normal(size=500))
data.add_component(comp_a, 'uniform')
data.add_component(comp_b, 'normal')
return data
def test_data():
data = glue.core.data.Data(label="Test Data 1")
data2 = glue.core.data.Data(label="Teset Data 2")
comp_a = glue.core.data.Component(np.array([1, 2, 3]))
comp_b = glue.core.data.Component(np.array([1, 2, 3]))
comp_c = glue.core.data.Component(np.array([2, 4, 6]))
comp_d = glue.core.data.Component(np.array([1, 3, 5]))
data.add_component(comp_a, 'a')
data.add_component(comp_b, 'b')
data2.add_component(comp_c, 'c')
data2.add_component(comp_d, 'd')
return data, data2
def test_categorical_data():
data = glue.core.data.Data(label="Test Cat Data 1")
data2 = glue.core.data.Data(label="Teset Cat Data 2")
comp_x1 = glue.core.data.CategoricalComponent(np.array(['a', 'a', 'b']))
comp_y1 = glue.core.data.Component(np.array([1, 2, 3]))
comp_x2 = glue.core.data.CategoricalComponent(np.array(['c', 'a', 'b']))
comp_y2 = glue.core.data.Component(np.array([1, 3, 5]))
data.add_component(comp_x1, 'x1')
data.add_component(comp_y1, 'y1')
data2.add_component(comp_x2, 'x2')
data2.add_component(comp_y2, 'y2')
return data, data2
def test_image():
data = glue.core.data.Data(label="Test Image")
comp_a = glue.core.data.Component(np.ones((25, 25)))
data.add_component(comp_a, 'test_1')
comp_b = glue.core.data.Component(np.zeros((25, 25)))
data.add_component(comp_b, 'test_2')
return data
def test_cube():
data = glue.core.data.Data(label="Test Cube")
comp_a = glue.core.data.Component(np.ones((16, 16, 16)))
data.add_component(comp_a, 'test_3')
return data
| python |
import jax.numpy as jnp
from matplotlib import pyplot as plt
from numpy.linalg import inv
from jsl.sent.run import train
from jsl.sent.agents.kalman_filter import KalmanFilterReg
from jsl.sent.environments.base import make_matlab_demo_environment
def posterior_lreg(X, y, R, mu0, Sigma0):
Sn_bayes_inv = inv(Sigma0) + X.T @ X / R
Sn_bayes = inv(Sn_bayes_inv)
mn_bayes = Sn_bayes @ (inv(Sigma0) @ mu0 + X.T @ y / R)
return mn_bayes, Sn_bayes
def main():
input_dim = 2
mu0 = jnp.zeros(input_dim)
Sigma0 = jnp.eye(input_dim) * 10.
F = jnp.eye(input_dim)
Q, R = 0, 1
print("1")
agent = KalmanFilterReg(mu0, Sigma0, F, Q, R)
env = make_matlab_demo_environment(test_batch_size=1)
nsteps = 21
params, rewards = train(agent, env, nsteps=nsteps)
print(params["mean"].shape)
print(params["cov"].shape)
w0_hist, w1_hist = params["mean"].T
w0_err, w1_err = jnp.sqrt(params["cov"][:, [0, 1], [0, 1]].T)
# Offline estimation
input_dim, num_train = 2, 21
(w0_post, w1_post), Sigma_post = posterior_lreg(jnp.squeeze(env.X_train),
jnp.squeeze(env.y_train),
R, mu0, Sigma0)
w0_std, w1_std = jnp.sqrt(Sigma_post[[0, 1], [0, 1]])
dict_figures = {}
timesteps = jnp.arange(num_train)
fig, ax = plt.subplots()
ax.errorbar(timesteps, w0_hist, w0_err, fmt="-o", label="$w_0$", color="black", fillstyle="none")
ax.errorbar(timesteps, w1_hist, w1_err, fmt="-o", label="$w_1$", color="tab:red")
ax.axhline(y=w0_post, c="black", label="$w_0$ batch")
ax.axhline(y=w1_post, c="tab:red", linestyle="--", label="$w_1$ batch")
ax.fill_between(timesteps, w0_post - w0_std, w0_post + w0_std, color="black", alpha=0.4)
ax.fill_between(timesteps, w1_post - w1_std, w1_post + w1_std, color="tab:red", alpha=0.4)
plt.legend()
ax.set_xlabel("time")
ax.set_ylabel("weights")
ax.set_ylim(-8, 4)
ax.set_xlim(-0.5, num_train)
dict_figures["linreg_online_kalman"] = fig
return dict_figures
if __name__=="__main__":
main()
| python |
import os
from datetime import datetime
import tensorflow as tf
from feature_extractor import MobileNet, Resnet, Vgg16
from modules import atrous_spatial_pyramid_pooling
class DeepLab(object):
def __init__(self, base_architecture, training=True, num_classes=21, ignore_label=255, batch_norm_momentum=0.9997, pre_trained_model=None, log_dir='data/logs/deeplab/'):
self.is_training = tf.placeholder(tf.bool, None, name='is_training')
self.num_classes = num_classes
self.ignore_label = ignore_label
self.inputs_shape = [None, None, None, 3]
self.labels_shape = [None, None, None, 1]
self.training = training
self.inputs = tf.placeholder(tf.float32, shape=self.inputs_shape, name='inputs')
self.labels = tf.placeholder(tf.uint8, shape=self.labels_shape, name='labels')
self.target_height = tf.placeholder(tf.int32, None, name='target_image_height')
self.target_width = tf.placeholder(tf.int32, None, name='target_image_width')
self.weight_decay = tf.placeholder(tf.float32, None, name='weight_decay')
self.regularizer = tf.contrib.layers.l2_regularizer(scale=self.weight_decay)
self.batch_norm_momentum = batch_norm_momentum
self.feature_map = self.backbone_initializer(base_architecture)
if pre_trained_model:
self.initialize_backbone_from_pretrained_weights(pre_trained_model)
self.outputs = self.model_initializer()
self.learning_rate = tf.placeholder(tf.float32, None, name='learning_rate')
self.loss = self.loss_initializer()
self.optimizer = self.optimizer_initializer()
# Initialize tensorflow session
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
if self.training:
self.train_step = 0
now = datetime.now()
self.log_dir = os.path.join(log_dir, now.strftime('%Y%m%d-%H%M%S'))
self.writer = tf.summary.FileWriter(self.log_dir, tf.get_default_graph())
self.train_summaries, self.valid_summaries = self.summary()
def backbone_initializer(self, base_architecture):
with tf.variable_scope('backbone'):
if base_architecture == 'vgg16':
features = Vgg16(self.inputs, self.weight_decay, self.batch_norm_momentum)
elif base_architecture.startswith('resnet'):
n_layers = int(base_architecture.split('_')[-1])
features = Resnet(n_layers, self.inputs, self.weight_decay, self.batch_norm_momentum, self.is_training)
elif base_architecture.startswith('mobilenet'):
depth_multiplier = float(base_architecture.split('_')[-1])
features = MobileNet(depth_multiplier, self.inputs, self.weight_decay, self.batch_norm_momentum, self.is_training)
else:
raise ValueError('Unknown backbone architecture!')
return features
def model_initializer(self):
pools = atrous_spatial_pyramid_pooling(inputs=self.feature_map, filters=256, regularizer=self.regularizer)
logits = tf.layers.conv2d(inputs=pools, filters=self.num_classes, kernel_size=(1, 1), name='logits')
outputs = tf.image.resize_bilinear(images=logits, size=(self.target_height, self.target_width), name='resized_outputs')
return outputs
def loss_initializer(self):
labels_linear = tf.reshape(tensor=self.labels, shape=[-1])
not_ignore_mask = tf.to_float(tf.not_equal(labels_linear, self.ignore_label))
# The locations represented by indices in indices take value on_value, while all other locations take value off_value.
# For example, ignore label 255 in VOC2012 dataset will be set to zero vector in onehot encoding (looks like the not ignore mask is not required)
onehot_labels = tf.one_hot(indices=labels_linear, depth=self.num_classes, on_value=1.0, off_value=0.0)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=tf.reshape(self.outputs, shape=[-1, self.num_classes]), weights=not_ignore_mask)
return loss
def optimizer_initializer(self):
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
return optimizer
def summary(self):
with tf.name_scope('loss'):
train_loss_summary = tf.summary.scalar('train', self.loss)
valid_loss_summary = tf.summary.scalar('valid', self.loss)
return train_loss_summary, valid_loss_summary
def train(self, inputs, labels, target_height, target_width, learning_rate, weight_decay):
_, outputs, train_loss, summaries = self.sess.run([self.optimizer, self.outputs, self.loss, self.train_summaries], feed_dict={self.inputs: inputs, self.labels: labels, self.learning_rate: learning_rate, self.target_height: target_height, self.target_width: target_width, self.weight_decay: weight_decay, self.is_training: True})
self.writer.add_summary(summaries, self.train_step)
self.train_step += 1
return outputs, train_loss
def validate(self, inputs, labels, target_height, target_width):
outputs, valid_loss, summaries = self.sess.run([self.outputs, self.loss, self.valid_summaries], feed_dict={self.inputs: inputs, self.labels: labels, self.target_height: target_height, self.target_width: target_width, self.is_training: False})
self.writer.add_summary(summaries, self.train_step)
return outputs, valid_loss
def test(self, inputs, target_height, target_width):
outputs = self.sess.run(self.outputs, feed_dict={self.inputs: inputs, self.target_height: target_height, self.target_width: target_width, self.is_training: False})
return outputs
def save(self, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
self.saver.save(self.sess, os.path.join(directory, filename))
return os.path.join(directory, filename)
def load(self, filepath):
self.saver.restore(self.sess, filepath)
def initialize_backbone_from_pretrained_weights(self, path_to_pretrained_weights):
variables_to_restore = tf.contrib.slim.get_variables_to_restore(exclude=['global_step'])
valid_prefix = 'backbone/'
tf.train.init_from_checkpoint(path_to_pretrained_weights, {v.name[len(valid_prefix):].split(':')[0]: v for v in variables_to_restore if v.name.startswith(valid_prefix)})
def close(self):
if self.training:
self.writer.close()
self.sess.close()
if __name__ == '__main__':
deeplab = DeepLab('resnet_101', pre_trained_model='data/models/pretrained/resnet_101/resnet_v2_101.ckpt')
print('Graph compiled successfully.')
deeplab.close()
| python |
import zmq
from threading import Thread
import queue
from client_login import LoginClient
from enums import Host, Intervals
import time
class Client:
def __init__(self, target):
self.context = zmq.Context.instance()
self.username = None
self.queue = queue.Queue()
self.message = None
self.target = target
self.token = None
def run(self):
self.username, self.token = LoginClient().login()
self.main()
def main(self):
main_socket = self.context.socket(zmq.DEALER)
main_socket.setsockopt(zmq.IDENTITY, self.username.encode())
main_socket.connect("tcp://localhost:{}".format(Host.PORT))
print('Client connected!\n')
relay = ClientRelay(main_socket, self.queue, self.target, self.token)
relay.start()
while True:
self.message = input('')
self.queue.put(self.message)
class ClientRelay(Thread):
def __init__(self, main_socket, msg_queue, target, token):
self.main_socket = main_socket
self.msg_queue = msg_queue
self.target = target
self.token = token
Thread.__init__(self)
def run(self):
heartbeat = Thread(target=self.heartbeat)
heartbeat.start()
while True:
if self.main_socket.poll(Intervals.POLL_REFRESH_INTERVAL):
incoming_message = self.main_socket.recv_json()
self.message_received(incoming_message)
if not self.msg_queue.empty():
client_message = self.msg_queue.get()
data = {
'to': self.target,
'token': self.token,
'message': client_message
}
self.main_socket.send_json(data)
def message_received(self, incoming_message):
id_ = incoming_message['id']
new_message = incoming_message['message']
if new_message == 'Your token expired!':
print(
'WARNING : YOUR SESSION HAS EXPIRED, RESTART THE CLIENT OR RELOG!!!')
if id_ == self.target:
print('{}: {}'.format(id_, new_message))
return
def heartbeat(self):
data = {
'to': 'ping',
'token': self.token,
'message': None
}
while True:
time.sleep(Intervals.HEARTBEAT_INTERVAL)
self.main_socket.send_json(data)
| python |
import serial
import bk169X.sim as _bksim
class PSCommError(Exception):
pass
class PowerSupply(object):
def __init__(
self,
device,
baud=9600,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
address='00',
timeout=1,
simulated=False
):
self.device = device
self.baud = baud
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.address = address
self.timeout = timeout
self.simulated = simulated
self._ser = None
self._cmd_rep = 'OK'
self._cmd_rep_fail = ''
def connect(self):
if self._ser is None:
if self.simulated:
self._ser = _bksim.SerialSim(timeout=self.timeout)
else:
self._ser = serial.Serial(
self.device,
self.baud,
bytesize=self.bytesize,
parity=self.parity,
stopbits=self.stopbits,
timeout=self.timeout
)
def close(self):
self._ser.close()
self._ser = None
@staticmethod
def _float_to_fmt(value, order, digits):
return '{value:0>{digits:d}.0f}'.format(value=value*10**order, digits=digits)
@staticmethod
def _fmt_to_float(valstr, order):
return float(valstr)/10**order
def _write(self, str_val):
str_val += '\r'
byte_val = str_val.encode('ascii', 'ignore')
self._ser.write(byte_val)
def _readline(self):
eol = b'\r'
length_eol = len(eol)
line = bytearray()
while True:
c = self._ser.read(1)
if c:
line += c
if line[-length_eol:] == eol:
break
else:
break
return line.decode('ascii', 'ignore').rstrip('\r')
def remote(self):
self.cmd('SESS')
def local(self):
self.cmd('ENDS')
def off(self):
self.cmd('SOUT', '1')
def on(self):
self.cmd('SOUT', '0')
def voltage(self, voltage=None):
if voltage is None:
resp = self.cmd('GETD')
return self._fmt_to_float(resp[:4], 2)
else:
self.cmd('VOLT', self._float_to_fmt(voltage, 1, 3))
def current(self, current=None):
if current is None:
resp = self.cmd('GETD')
return self._fmt_to_float(resp[4:-1], 3)
else:
self.cmd('CURR', self._float_to_fmt(current, 2, 3))
def reading(self):
resp = self.cmd('GETD')
return self._fmt_to_float(resp[:4], 2), self._fmt_to_float(resp[4:-1], 3), bool(int(resp[-1]))
def setpoint(self, voltage=None, current=None):
digits = 3
if voltage is None and current is None:
resp = self.cmd('GETS')
return self._fmt_to_float(resp[:digits], 1), self._fmt_to_float(resp[digits:], 2)
else:
if voltage is not None:
self.cmd('VOLT', self._float_to_fmt(voltage, 1, digits))
if current is not None:
self.cmd('CURR', self._float_to_fmt(current, 2, digits))
def maximum(self):
digits = 3
resp = self.cmd('GMAX')
return self._fmt_to_float(resp[:digits], 1), self._fmt_to_float(resp[digits:], 2)
def voltage_limit(self, voltage=None):
if voltage is None:
resp = self.cmd('GOVP')
return self._fmt_to_float(resp, 1)
else:
self.cmd('SOVP', self._float_to_fmt(voltage, 1, 3))
def getd(self):
return self.cmd('GETD')
def cmd(self, cmd, value=None):
if self._ser is None:
self.connect()
cmd += self.address
if value is not None:
cmd += value
self._write(cmd)
output = None
while True:
line = self._readline()
if line == self._cmd_rep:
break
elif line == self._cmd_rep_fail:
raise PSCommError(
"No command 'OK' response returned by power supply within {0:.1f} s".format(self.timeout)
)
else:
if output is None:
output = line
else:
raise PSCommError("More than one line output returned by power supply")
return output
def __enter__(self):
self.connect()
return self
def __exit__(self, type, value, traceback):
self.close()
| python |
#!/home/sunnymarkliu/software/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
VGG net implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
VGG net Paper: https://arxiv.org/pdf/1409.1556.pdf
Mnist Dataset: http://yann.lecun.com/exdb/mnist/
@author: MarkLiu
@time : 17-3-4 下午3:22
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
class Vgg16(object):
"""
VggNet-16
"""
def __init__(self, num_classes, activation, skip_layer, weights_path='DEFAULT'):
self.NUM_CLASSES = num_classes
self.ACTIVATION = activation
# 指定跳过加载 pre-trained weight 层
self.SKIP_LAYER = skip_layer
if weights_path == 'DEFAULT':
self.WEIGHTS_PATH = 'vgg16.npy'
else:
self.WEIGHTS_PATH = weights_path
def conv2d(self, x, filter_height, filter_width, num_filters, stride_y, stride_x,
name, padding='SAME'):
"""
卷积层
:param x: [batch, in_height, in_width, in_channels]
:param num_filters: filters 的数目,[filter_height, filter_width, in_channels, out_channels]
:param stride_y, stride_x: 每一维度滑动的步长,strides[0]=strides[3]=1
"""
# Get number of input channels
input_channels = int(x.get_shape()[-1])
with tf.variable_scope(name) as scope:
# Create tf variables for the weights and biases of the conv layer
weights = tf.get_variable('filter',
shape=[filter_height, filter_width, input_channels, num_filters])
biases = tf.get_variable('biases', shape=[num_filters])
conv = tf.nn.conv2d(x, weights,
strides=[1, stride_y, stride_x, 1],
padding=padding)
conv_bias = tf.nn.bias_add(conv, biases)
# Apply activation function
relu = self.ACTIVATION(conv_bias, name=scope.name)
return relu
def max_pool(self, x, filter_height, filter_width, stride_y, stride_x, name, padding='SAME'):
"""
pooling 层, 当 stride = ksize, padding='SAME' 时输出 tensor 大小减半
:param x: [batch, height, width, channels]
:param filter_height, filter_width: [1, height, width, 1]
:param stride_y, stride_x: [1, stride, stride, 1]
"""
return tf.nn.max_pool(x, ksize=[1, filter_height, filter_width, 1],
strides=[1, stride_y, stride_x, 1],
padding=padding, name=name)
def fully_connected(self, x, num_out, name, activation=True):
"""
全连接层, n_units 指定输出神经元的数目
"""
with tf.variable_scope(name) as scope:
shape = x.get_shape().as_list()
num_in = 1
for d in shape[1:]:
num_in *= d
x = tf.reshape(x, [-1, num_in])
weights = tf.get_variable('weights', shape=[num_in, num_out], trainable=True)
biases = tf.get_variable('biases', [num_out], trainable=True)
fc = tf.nn.xw_plus_b(x, weights, biases, name=scope.name)
if activation:
fc = self.ACTIVATION(fc)
return fc
def dropout(self, x, keep_prob):
"""
dropout layer
"""
return tf.nn.dropout(x, keep_prob)
def build_model(self):
"""
构建模型
"""
# input features
self.x = tf.placeholder(tf.float32, shape=[None, 224, 224, 3], name='input_layer')
self.y = tf.placeholder(tf.float32, [None, self.NUM_CLASSES], name='output_layer')
# learning_rate placeholder
self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
# dropout layer: keep probability, vgg default value:0.5
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# build model
# conv1: conv1_1 + conv1_2 + pool1
conv1_1 = self.conv2d(self.x, 3, 3, 64, 1, 1, padding='SAME', name='conv1_1')
conv1_2 = self.conv2d(conv1_1, 3, 3, 64, 1, 1, padding='SAME', name='conv1_2')
pool1 = self.max_pool(conv1_2, 3, 3, 2, 2, padding='SAME', name='pool1')
# conv2: conv2_1 + conv2_2 + pool2
conv2_1 = self.conv2d(pool1, 3, 3, 128, 1, 1, padding='SAME', name='conv2_1')
conv2_2 = self.conv2d(conv2_1, 3, 3, 128, 1, 1, padding='SAME', name='conv2_2')
pool2 = self.max_pool(conv2_2, 3, 3, 2, 2, padding='SAME', name='pool2')
# conv3: conv3_1 + conv3_2 + conv3_3 + pool3
conv3_1 = self.conv2d(pool2, 3, 3, 256, 1, 1, padding='SAME', name='conv3_1')
conv3_2 = self.conv2d(conv3_1, 3, 3, 256, 1, 1, padding='SAME', name='conv3_2')
conv3_3 = self.conv2d(conv3_2, 3, 3, 256, 1, 1, padding='SAME', name='conv3_3')
pool3 = self.max_pool(conv3_3, 3, 3, 2, 2, padding='SAME', name='pool3')
# conv4: conv4_1 + conv4_2 + conv4_3 + pool4
conv4_1 = self.conv2d(pool3, 3, 3, 512, 1, 1, padding='SAME', name='conv4_1')
conv4_2 = self.conv2d(conv4_1, 3, 3, 512, 1, 1, padding='SAME', name='conv4_2')
conv4_3 = self.conv2d(conv4_2, 3, 3, 512, 1, 1, padding='SAME', name='conv4_3')
pool4 = self.max_pool(conv4_3, 3, 3, 2, 2, padding='SAME', name='pool4')
# conv5: conv5_1 + conv5_2 + conv5_3 + pool5
conv5_1 = self.conv2d(pool4, 3, 3, 512, 1, 1, padding='SAME', name='conv5_1')
conv5_2 = self.conv2d(conv5_1, 3, 3, 512, 1, 1, padding='SAME', name='conv5_2')
conv5_3 = self.conv2d(conv5_2, 3, 3, 512, 1, 1, padding='SAME', name='conv5_3')
pool5 = self.max_pool(conv5_3, 3, 3, 2, 2, padding='SAME', name='pool5')
# fc6
fc6 = self.fully_connected(pool5, 4096, name='fc6')
dropout6 = self.dropout(fc6, self.keep_prob)
# fc7
fc7 = self.fully_connected(dropout6, 4096, name='fc7')
dropout7 = self.dropout(fc7, self.keep_prob)
# fc8
read_out_digits = self.fully_connected(dropout7, self.NUM_CLASSES, activation=False, name='fc8')
self.read_out_logits = tf.nn.softmax(read_out_digits, name="prob")
def init_train_test_op(self):
# loss function
self.loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.y,
logits=self.read_out_logits))
# training op
self.training_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss_function)
self.predict_op = tf.arg_max(self.read_out_logits, 1)
# predict
predict_matches = tf.equal(tf.arg_max(self.y, dimension=1),
tf.arg_max(self.read_out_logits, 1))
# accuracy metric
self.accuracy = tf.reduce_mean(tf.cast(predict_matches, tf.float32))
def classify(self, features_x):
"""
分类预测
"""
feed_dict = {self.x: features_x, self.keep_prob: 1.0}
predict_y, prob = self.sess.run([self.predict_op, self.read_out_logits], feed_dict=feed_dict)
return predict_y, prob
def train(self, x, y, learning_rate, keep_prob=0.5):
"""
训练
"""
feed_dict = {
self.x: x,
self.y: y,
self.keep_prob: keep_prob,
self.learning_rate: learning_rate
}
_, train_loss = self.sess.run([self.training_op, self.loss_function], feed_dict=feed_dict)
train_accuracy = self.get_accuracy(x, y)
return train_loss, train_accuracy
def get_accuracy(self, x, y):
"""
获取测试数据的精度
"""
feed_dict = {
self.x: x,
self.y: y,
self.keep_prob: 1.0
}
accuracy = self.sess.run(self.accuracy, feed_dict=feed_dict)
return accuracy
def init(self):
self.build_model()
self.init_train_test_op()
self.sess = tf.Session()
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
def load_initial_weights(self):
"""
As the weights from https://mega.nz/#!YU1FWJrA!O1ywiCS2IiOlUCtCpI6HTJOMrneN-Qdv3ywQP5poecM come
as a dict of lists (e.g. weights['conv1_1'] is a list) and not as dict of
dicts (e.g. weights['conv1'] is a dict with keys 'weights' & 'biases') we
need a special load function
"""
print('Load the pretrained weights into the non-trainable layer...')
# Load the weights into memory
weights_dict = np.load(self.WEIGHTS_PATH, encoding='bytes').item()
# Loop over all layer names stored in the weights dict
for op_name in weights_dict:
# Check if the layer is one of the layers that should be reinitialized
if op_name not in self.SKIP_LAYER:
with tf.variable_scope(op_name, reuse=True):
# Loop over list of weights/biases and assign them to their corresponding tf variable
for data in weights_dict[op_name]:
# Biases
if len(data.shape) == 1:
print('load bias' + op_name)
var = tf.get_variable('biases', trainable=False)
self.sess.run(var.assign(data))
# full connected layer weights
elif len(data.shape) == 2:
print('load Weights' + op_name)
var = tf.get_variable('weights', trainable=False)
self.sess.run(var.assign(data))
# cnn layer filters
else:
print('load filter' + op_name)
var = tf.get_variable('filter', trainable=False)
self.sess.run(var.assign(data))
| python |
from __future__ import absolute_import
HORIZON_CONFIG = {
# Allow for ordering dashboards; list or tuple if provided.
'dashboards': ["module", "portal"],
# Name of a default dashboard; defaults to first alphabetically if None
'default_dashboard': "portal",
# Default redirect url for users' home
'user_home': "",
# URL for additional help with this site.
'help_url': None,
# Exception configuration.
'exceptions': {'unauthorized': [],
'not_found': [],
'recoverable': []},
# Password configuration.
'password_validator': {'regex': '.*',
'help_text': ("Password is not accepted")},
'password_autocomplete': 'on',
# AJAX settings for JavaScript
'ajax_queue_limit': 10,
'ajax_poll_interval': 2500,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'angular_modules': [],
'js_files': [],
'js_spec_files': [],
'modal_backdrop': 'static'
}
| python |
#Team Zephyr
#necessary libraries to be imported
import nmap
import netifaces
from nmap import PortScanner
import socket
import multiprocessing
import subprocess
import os
import threading
import time
import re
import pdb
import numpy
HOST_IP = [] #contains the ip addresses of the devices connected onto the network.
HOST_MAC = [] #contains the mac address of the devices connected onto the network.
PORTS = [] #contains the open ports of the devices connected onto the network.
# Matrix to stuff ports, MAC, and other values into
A = numpy.matrix(["port","state","name","product","version","extrainfo","ocpe","ip"])
drone_ip = '0.0.0.0' #contains the ip address of our system
#get router ip address
def get_router_ip():
gws = netifaces.gateways()
router_ip = list(gws['default'].values())[0][0]
print("Router IP: " + router_ip)
'''
search the network for devices connected on to the network
INPUT: null
OUTPUT: fills out HOST_IP and HOST_MAC
'''
def search_network():
stream = os.popen('arp-scan -l')
output = stream.read()
for line in output.split('\n'):
ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', line)
mac = re.findall(r'(?:[0-9a-fA-F]:?){12}', line)
if ip:
HOST_IP.append(ip[0])
if mac:
HOST_MAC.append(mac[0])
'''
get ip address of the current system, and the router it is connected to
INPUT: null
OUTPUT: returns the ip address of the system
'''
def get_my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
'''
does a regular nmap scan to gather information about the devices onto the network
INPUT: ip address
OUTPUT: fills the matrix 'A' containing information about the open ports found
'''
def nmap_scan(tgt_host):
nmScan = nmap.PortScanner()
nmScan.scan(tgt_host, '21-443')
# run a loop to print all the found result about the ports
for host in nmScan.all_hosts():
print('[+] Host IP : %s\n[+] Host Name : %s' % (host, nmScan[host].hostname()))
print('[+] State : %s' % nmScan[host].state())
for proto in nmScan[host].all_protocols():
print('----------')
print('[+] Protocol : %s' % proto)
lport = nmScan[host][proto].keys()
lport=sorted(lport)
for port in lport:
print ('[+] Port : %s\tState : %s' % (port, nmScan[host][proto][port]['state']))
print ('[+] Name : %s\tProduct : %s\tVersion : %s' % (nmScan[host][proto][port]['name'], nmScan[host][proto][port]['product'], nmScan[host][proto][port]['version']))
print ('[+] Additional info : %s\tCommon Platform Enumeration : %s' % (nmScan[host][proto][port]['extrainfo'], nmScan[host][proto][port]['cpe']))
global A
#if any of the values are null, fill the element with "null"
if nmScan[host][proto][port]['state'] == "":
nmScan[host][proto][port]['state'] = "null"
if nmScan[host][proto][port]['name'] == "":
nmScan[host][proto][port]['name'] = "null"
if nmScan[host][proto][port]['product'] == "":
nmScan[host][proto][port]['product'] = "null"
if nmScan[host][proto][port]['version'] == "":
nmScan[host][proto][port]['version'] = "null"
if nmScan[host][proto][port]['extrainfo'] == "":
nmScan[host][proto][port]['extrainfo'] = "null"
if nmScan[host][proto][port]['cpe'] == "":
nmScan[host][proto][port]['cpe'] = "null"
B = numpy.matrix([port, nmScan[host][proto][port]['state'], nmScan[host][proto][port]['name'], nmScan[host][proto][port]['product'], nmScan[host][proto][port]['version'], nmScan[host][proto][port]['extrainfo'], nmScan[host][proto][port]['cpe'], str(tgt_host)])
A = numpy.concatenate((A,B))
'''
prints out the addresss resolution index table of the devices on the network
INPUT: null
OUTPUT: stdout printing the ARP table
'''
def display_arp():
print('ARP index:')
print('IP address\t|\tMac Address')
print('--------------------------------------------')
for i in range(0, len(HOST_IP)):
print(HOST_IP[i] + '\t|\t' + HOST_MAC[i])
print('--------------------------------------------')
print('\n\n')
'''
Prints out the concatenated information into an output file
INPUT: null
OUTPUT: write the information of matrix A onto the file "targets.txt"
'''
def write_report():
with open('targets.txt','w+') as fp:
for line in A:
numpy.savetxt(fp,line,fmt="%s ,")
fp.close()
return
'''
Main function (program begins here)
'''
if __name__ == '__main__':
drone_ip = get_my_ip()
search_network()
get_router_ip()
print("Drone IP: " + drone_ip)
display_arp()
for i in HOST_IP:
print('\n\n\n[*] Scanning ip address: ' + i)
nmap_scan(i)
print("Matrix containing information in file: \n")
print(A)
write_report()
| python |
#!/usr/bin/python
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test the lint module."""
import collections
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../../..' % os.path.dirname(__file__)))
from chromite.lib import cros_test_lib
import lint
class TestNode(object):
"""Object good enough to stand in for lint funcs"""
Args = collections.namedtuple('Args', ('args', 'vararg', 'kwarg'))
Arg = collections.namedtuple('Arg', ('name',))
def __init__(self, doc='', fromlineno=0, path='foo.py', args=(), vararg='',
kwarg=''):
self.doc = doc
self.lines = doc.split('\n')
self.fromlineno = fromlineno
self.file = path
self.args = self.Args(args=[self.Arg(name=x) for x in args],
vararg=vararg, kwarg=kwarg)
def argnames(self):
return self.args
class DocStringCheckerTest(cros_test_lib.TestCase):
"""Tests for DocStringChecker module"""
GOOD_FUNC_DOCSTRINGS = (
'Some string',
"""Short summary
Body of text.
""",
"""line o text
Body and comments on
more than one line.
Args:
moo: cow
Returns:
some value
Raises:
something else
""",
"""Short summary.
Args:
fat: cat
Yields:
a spoon
""",
)
BAD_FUNC_DOCSTRINGS = (
"""
bad first line
""",
""" whitespace is wrong""",
"""whitespace is wrong """,
"""Should be no trailing blank lines
Returns:
a value
"""
"""ok line
cuddled end""",
"""we want Args/Returns not Arguments/Return
Arguments:
Return:
""",
"""section order is wrong here
Raises:
Returns:
""",
"""sections lack whitespace between them
Args:
foo: bar
Returns:
yeah
""",
"""yields is misspelled
Yield:
a car
""",
"""Section name has bad spacing
Args:\x20\x20\x20
key: here
""",
"""too many blank lines
Returns:
None
""",
"""wrongly uses javadoc
@returns None
"""
)
# The current linter isn't good enough yet to detect these.
TODO_BAD_FUNC_DOCSTRINGS = (
"""The returns section isn't a proper section
Args:
bloop: de
returns something
""",
"""the indentation is incorrect
Args:
some: day
""",
)
def add_message(self, msg_id, node=None, line=None, args=None):
"""Capture lint checks"""
# We include node.doc here explicitly so the pretty assert message
# inclues it in the output automatically.
self.results.append((msg_id, node.doc, line, args))
def setUp(self):
self.results = []
self.checker = lint.DocStringChecker()
self.checker.add_message = self.add_message
def testGood_visit_function(self):
"""Allow known good docstrings"""
for dc in self.GOOD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc)
self.checker.visit_function(node)
self.assertEqual(self.results, [],
msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_visit_function(self):
"""Reject known bad docstrings"""
for dc in self.BAD_FUNC_DOCSTRINGS:
self.results = []
node = TestNode(doc=dc)
self.checker.visit_function(node)
self.assertNotEqual(self.results, [],
msg='docstring was not rejected:\n"""%s"""' % dc)
def testSmoke_visit_module(self):
"""Smoke test for modules"""
self.checker.visit_module(TestNode(doc='foo'))
self.assertEqual(self.results, [])
self.checker.visit_module(TestNode(doc='', path='/foo/__init__.py'))
self.assertEqual(self.results, [])
def testSmoke_visit_class(self):
"""Smoke test for classes"""
self.checker.visit_class(TestNode(doc='bar'))
def testGood_check_first_line(self):
"""Verify _check_first_line accepts good inputs"""
# pylint: disable=W0212
docstrings = (
'Some string',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertEqual(self.results, [],
msg='docstring was not accepted:\n"""%s"""' % dc)
def testBad_check_first_line(self):
"""Verify _check_first_line rejects bad inputs"""
# pylint: disable=W0212
docstrings = (
'\nSome string\n',
)
for dc in docstrings:
self.results = []
node = TestNode(doc=dc)
self.checker._check_first_line(node, node.lines)
self.assertEqual(len(self.results), 1)
def testGoodFuncVarKwArg(self):
"""Check valid inputs for *args and **kwargs"""
# pylint: disable=W0212
for vararg in (None, 'args', '_args'):
for kwarg in (None, 'kwargs', '_kwargs'):
self.results = []
node = TestNode(vararg=vararg, kwarg=kwarg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 0)
def testMisnamedFuncVarKwArg(self):
"""Reject anything but *args and **kwargs"""
# pylint: disable=W0212
for vararg in ('arg', 'params', 'kwargs', '_moo'):
self.results = []
node = TestNode(vararg=vararg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 1)
for kwarg in ('kwds', '_kwds', 'args', '_moo'):
self.results = []
node = TestNode(kwarg=kwarg)
self.checker._check_func_signature(node)
self.assertEqual(len(self.results), 1)
def testGoodFuncArgs(self):
"""Verify normal args in Args are allowed"""
# pylint: disable=W0212
datasets = (
("""args are correct, and cls is ignored
Args:
moo: cow
""",
('cls', 'moo',), None, None,
),
("""args are correct, and self is ignored
Args:
moo: cow
*args: here
""",
('self', 'moo',), 'args', 'kwargs',
),
("""args are allowed to wrap
Args:
moo:
a big fat cow
that takes many lines
to describe its fatness
""",
('moo',), None, 'kwargs',
),
)
for dc, args, vararg, kwarg in datasets:
self.results = []
node = TestNode(doc=dc, args=args, vararg=vararg, kwarg=kwarg)
self.checker._check_all_args_in_doc(node, node.lines)
self.assertEqual(len(self.results), 0)
def testBadFuncArgs(self):
"""Verify bad/missing args in Args are caught"""
# pylint: disable=W0212
datasets = (
("""missing 'bar'
Args:
moo: cow
""",
('moo', 'bar',),
),
("""missing 'cow' but has 'bloop'
Args:
moo: cow
""",
('bloop',),
),
("""too much space after colon
Args:
moo: cow
""",
('moo',),
),
("""not enough space after colon
Args:
moo:cow
""",
('moo',),
),
)
for dc, args in datasets:
self.results = []
node = TestNode(doc=dc, args=args)
self.checker._check_all_args_in_doc(node, node.lines)
self.assertEqual(len(self.results), 1)
if __name__ == '__main__':
cros_test_lib.main()
| python |
from yunionclient.common import base
class Federatedrolebinding(base.ResourceBase):
pass
class FederatedrolebindingManager(base.StandaloneManager):
resource_class = Federatedrolebinding
keyword = 'federatedrolebinding'
keyword_plural = 'federatedrolebindings'
_columns = ["Federatednamespace_Id"]
| python |
"""Main module."""
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
from sentimentbot.feeds import FinvizNewsFeed
class SentimentAnalyzer(object):
""" wrapper for the sentiment analyzer """
_analyzer = SentimentIntensityAnalyzer()
def __init__(self, ticker):
self._ticker = ticker
self._newsfeed = FinvizNewsFeed(ticker)
self._data = self._newsfeed.read()
def _analyze_rows(self, data):
sentiment = data["message_text"].apply(self._analyzer.polarity_scores)
return pd.DataFrame(sentiment.tolist())
def analyze(self):
sentiment_data = self._data.pipe(self._analyze_rows).rename(
columns={"neg": "negative", "neu": "neutral", "pos": "positive"}
)
assert (
sentiment_data.shape[0] == self._data.shape[0]
), "Mismatch in rows after analyzing."
data = self._data.join(sentiment_data)
return data
| python |
from __future__ import absolute_import
import os, sys
import imp
class docs(object):
def __init__(self, show=True, update=False):
"""
Class for viewing and building documentation
Parameters
----------
show : bool
If True, show docs after rebuilding (default: True)
update : bool
If True, rebuild documentation to reflect code changes (default:True)
"""
self.build_path = '/'.join(imp.find_module('sct_toolkit')[1].split('/')[:-1])+'/docs'
self.source_path = self.build_path+'/_build/html/index.html'
if update:
self._update_docs()
if show:
self._show_docs()
def _show_docs(self):
""" Launch documentation in web browser """
try:
if sys.platform == 'darwin':
os.system('open {}'.format(self.source_path))
else:
os.system('open-xdg {}'.format(self.source_path))
except IOError:
raise IOError("documentation file '{}' could not be opened".format(self.source_path))
def _update_docs(self):
""" Rebuild documentation """
os.system('make -C {} html'.format(self.build_path))
| python |
import time
from datetime import timedelta, datetime, timezone
from decimal import Decimal, localcontext, DefaultContext
import aiohttp
import asyncio
import signal
from aiokraken.model.asset import Asset
from aiokraken import markets, balance, ohlc, OHLC
from aiokraken.utils import get_kraken_logger, get_nonce
from aiokraken.rest.api import Server, API
from aiokraken.rest.client import RestClient
from aiokraken.model.timeframe import KTimeFrameModel
LOGGER = get_kraken_logger(__name__)
"""
A simple script. Duties:
- connect and retrieve market data
- connect and retrieve user/account data
- analyze current held assets (and their previous cost from trades history).
- interactively propose new trades that might be interesting (given some configuration as input)
MVP : cost of assets, proposes order to recover the cost + fees, and some profit (in the time elapsed between 2 runs)
This is a ONE shot script. after one pass, it will end.
HOWEVER the proposed action shall be argumented, enough for a user to decide
possibly including visual graphs data...
"""
@asyncio.coroutine
def ask_exit(sig_name):
print("got signal %s: exit" % sig_name)
yield from asyncio.sleep(1.0)
asyncio.get_event_loop().stop()
# Ref for coroutine execution flow...
# https://stackoverflow.com/questions/30380110/mutually-recursive-coroutines-with-asyncio
def display(ohlc: OHLC):
return ohlc.show(block=False)
async def analysisbot(assets_allowed, assets_forbidden, markets_allowed, markets_forbidden,
minimize, maximize, lastrun, loop):
from aiokraken.config import load_api_keyfile
keystruct = load_api_keyfile()
# public
pub_client = RestClient(server=Server())
# TODO : use interface client (REST + WS) when ready
priv_client = RestClient(server=Server(
key=keystruct.get('key'),
secret=keystruct.get('secret')
))
mkts = await markets(restclient = priv_client)
# Note : now that self.restclient has markets has trades and orders, we need to use private client...
mkts.filter(whitelist=markets_allowed, blacklist=markets_forbidden)
blnc = await balance(restclient = priv_client)
blnc.filter(whitelist=assets_allowed, blacklist=assets_forbidden)
# normalize list of assets
minimize = [a.restname for _,a in blnc.assets.items() if a.restname in minimize or a.altname in minimize]
maximize = [a.restname for _,a in blnc.assets.items() if a.restname in maximize or a.altname in maximize]
try:
print(blnc)
# get tradable markets without leverage # Note: this is potentially for very long term -> no leverage
tradables = {t: m for t, m in mkts.details.items() if m.base in blnc}
print(tradables)
# picking appropriate timeframe...
now = datetime.now(tz=timezone.utc)
elapsed_time = now - lastrun
tf = KTimeFrameModel.one_minute
for t in KTimeFrameModel:
# picking a time frame detailed enough, but that gives us double time in one ohlc request
if t.to_timedelta() < elapsed_time < t.to_timedelta() * 360:
tf = t
break
# TODO : context manager for timeframe ?
for m, data in {m: d for m, d in mkts.items() if m in tradables}.items():
if data.pair.base not in minimize + maximize and data.pair.quote not in minimize + maximize:
tradables.pop(m)
continue # skip this one, not sure what to do with it...
# Note : we might need it for conversion, bu tthen we should load it lazily...
mdata = await data(tf) # update at specific timeframe to find interesting markets
if (mdata.tf_ohlc[tf].high == mdata.tf_ohlc[tf].low):
# nothing happened there, drop it
print(f"{m} looks flat. Dropping it.")
tradables.pop(m)
# TODO : check open orders to see if we need to make any decision...
# looping on the tradables left (we already have all relevant ohlc)
for m, mdata in {m: d for m, d in mkts.items() if m in tradables}.items():
# TODO : maybe check trend via open/close on the whole ohlc ?
pivot = mdata.tf_ohlc[tf].pivot(before=elapsed_time)
# TODO : maybe figure out best timeframe to compute resistance/ supports based on ohlc ???
print(f"Resistances / Supports for {m}: {pivot}")
# Ref : https://tradingstrategyguides.com/support-and-resistance-strategy/
# select markets based on pivot data:
if pivot.R1 - pivot.S1 < pivot.pivot * 0.0025: # check if the interval is bigger than fees
print(f"{m} Support Resistance interval data too flat to cover fees. Dropping it.")
continue
else:
# TODO : maybe lazy update of data only when required ? how to keep managing async control ?
# Think multiple agents, one per strategy... ( can access one or more markets... )
# NB: they might use the (immutable or time-updated only -> deterministic) data,
# even if requested by another...
ohlc = mdata.tf_ohlc[tf].ema(name="EMA_12", length=12).ema(name="EMA_26", length=26)
# TODO : simplify accessor...
# get last EMA value
print(f" Last EMAs for {m}: {ohlc.indicators['ema'].model.timedataframe.iloc[-1]}")
# how does it looks ?
plt = display(ohlc)
if mdata.pair.quote in minimize or mdata.pair.base in maximize:
# maybe try to buy
last_ema = ohlc.indicators["ema"].model.timedataframe.iloc[-1]
# check trend
if last_ema["EMA_12"] > last_ema["EMA_26"]: # TODO : some minimal different required ?
# trend up -> good to buy
print(f"==> {m} is trending up...")
# calculate good buy limit price
print(f"==> {pivot.S1} seems like a good limit price to buy...")
# TODO : compare with asset average cost
if mdata.pair.quote in blnc and blnc[mdata.pair.quote] > 0: # REMINDER, we want to minimize our asset in this case
# compute average cost basis
consc = await consolidated_tradecost(asset=blnc.assets[mdata.pair.quote],
amount=blnc[mdata.pair.quote], target_asset=blnc.assets[mdata.pair.base], markets=mkts, tf=tf)
print(f" This is currently equivalent to {consc}")
if pivot.S1 < consc.get(mdata.pair.base, Decimal()): # TODO : integrate fees in this...
# we buy cheaper, do it!
print(" We can buy cheaper than it did cost, lets do it !")
input("fake (y/n)")
else:
# errrhhhh, are you sure ??
print(" errhh we re gonna loose money here, are you sure ?")
input("fake (y/n)")
elif mdata.pair.base in blnc:
consc = await consolidated_tradecost(asset=blnc.assets[mdata.pair.base],
amount=blnc[mdata.pair.base], target_asset=blnc.assets[mdata.pair.quote], markets=mkts, tf=tf)
print(f" This is currently equivalent to {consc}")
if pivot.S1 < consc.get(mdata.pair.quote, Decimal()):
# we buy cheaper, do it!
print(" We can buy cheaper, lets do it !")
input("fake (y/n)")
else:
# errrhhhh, are you sure ??
print(" errhh we re gonna loose money here, are you sure ?")
input("fake (y/n)")
else:
print(f"Cant buy anything, we dont hold either {mdata.pair.base} nor {mdata.pair.quote} !")
break
# we are still in this loop: we have a cost basis
elif mdata.pair.quote in minimize or mdata.pair.base in maximize:
pass
# TMP skip until we get proper structure
#
# # how does it looks ?
# await ohlc.ashow()
#
# # try to sell
# last_ema = ohlc.indicators["ema"].model.timedataframe.iloc[-1]
# if last_ema["EMA_12"] < last_ema["EMA_26"]:
# # trend up -> good to sell
# print(f"==> {m} is trending down...")
# # calculate good limit price
# print(f"==> {pivot.S1} seems like a good limit price...")
# # TODO : compare with asset average cost
plt.close("all") # Close currently opened plots
except Exception as e:
LOGGER.error(f"Exception caught : {e}. Terminating...", exc_info=True)
raise
# TODO : backtest on previous day before implementing on current day... => need candles from Day -2
# Goal : choose markets that are likely to be profitable (given fee calculations).
async def consolidated_tradecost(asset: Asset, amount: Decimal, target_asset:Asset, markets, tf):
# compute average cost basis
consc =dict()
c = markets.tradecost(asset=asset, amount=amount)
print(f"{asset}: {amount} cost from trades is {c}")
consc.setdefault(target_asset.restname, c.get(target_asset.restname, Decimal()))
# consolidate in the proper asset
# HOWTO ? might be overly complicated...
for n, a in c.items():
# TODO : better way to look into markets to retrieve price
if n != target_asset and target_asset.restname + n in markets.details.keys():
if tf not in markets.get(target_asset.restname + n).tf_ohlc:
await markets.get(target_asset.restname + n)(tf) # TODO : nicer interface for marketdata...
nprice = markets.get(target_asset.restname + n).tf_ohlc.close
# convert
consc[n] = consc.get(target_asset.restname, Decimal()) + c[n] / nprice
# TODO : units (pint) pleaaaaase...
else: # cannot convert this, keep it intact to not get a wrong cost
consc.update({n: a})
return consc
if __name__ == '__main__':
from configparser import ConfigParser
config = ConfigParser()
config.read("analysis.ini")
loop = asyncio.get_event_loop()
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(
getattr(signal, signame),
lambda: asyncio.ensure_future(ask_exit(signame))
)
assets_ok = set(config["assets"].get('whitelist', "").split())
assets_block = set(config["assets"].get('blacklist',"").split())
assets_ok = assets_ok - assets_block
# TODO : wildcard ?
markets_ok = set(config["markets"].get('whitelist',"").split())
markets_block = set(config["markets"].get('blacklist',"").split())
markets_ok = markets_ok - markets_block
# TODO : wildcard ?
loop.run_until_complete(analysisbot(
assets_allowed=[a for a in assets_ok],
assets_forbidden=[a for a in assets_block],
markets_allowed=[m for m in markets_ok],
markets_forbidden=[m for m in markets_block],
minimize=config["analysis"]["minimize"].split(),
maximize=config["analysis"]["maximize"].split(),
lastrun=datetime.fromisoformat(config["analysis"].get("lastrun",
(datetime.now(tz=timezone.utc) - timedelta(days=1)).isoformat())),
loop=loop
))
if "lastrun" not in config.sections():
config.add_section('lastrun')
config.set('lastrun', 'datetime', datetime.now(tz=timezone.utc).isoformat())
# lets create that config file...
cfgfile = open("analysis.ini", 'w')
# reminder : comments will be gone !
config.write(cfgfile)
cfgfile.close()
| python |
from django.http.response import JsonResponse
from core.apps.basket.basket import Basket
from .models import Order, OrderItem
def add(request):
basket = Basket(request)
if request.POST.get('action') == 'post':
order_key = request.POST.get('order_key')
user_id = request.user.id
basket_total = basket.get_total_price()
# Check if order exists
if Order.objects.filter(order_key=order_key).exists():
pass
else:
order = Order.objects.create(
user_id=user_id, full_name='name', address1='add1',
address2='add2', total_paid=basket_total, order_key=order_key
)
order_id = order.pk
for item in basket:
OrderItem.objects.create(
order_id=order_id, product=item['product'],
price=item['price'], quantity=item['qty']
)
response = JsonResponse({'success': 'Return something'})
return response
def payment_confirmation(data):
Order.objects.filter(order_key=data).update(billing_status=True)
def user_orders(request):
user_id = request.user.id
orders = Order.objects.filter(user_id=user_id).filter(billing_status=True)
return orders
| python |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def nextLargerNodes(self, head: ListNode) -> List[int]:
heap, res, j = [], [], 0
while head:
res.append(0)
while heap and heap[0][0] < head.val:
val, i = heapq.heappop(heap)
res[i] = head.val
heapq.heappush(heap, (head.val, j))
j += 1
head = head.next
return res
| python |
import psutil
import schedule
import time
from userClass import *
class LeagueScheduler:
#Setters
def set_processName(self, processName):
self.__processName = processName
def __set_inGame(self, inGame):
self.__inGame = inGame
#Getters
def get_processName(self):
return self.__processName
def get_inGame(self):
return self.__inGame
def updateUser(self, user):
self.__user = user
def isProcessRunning(self):
if (self.get_processName() in (p.name() for p in psutil.process_iter())):
return True
return False
def checkProcess(self):
#finds process once and doesnt run again until next
if(self.isProcessRunning() and self.get_inGame()):
pass
elif(self.isProcessRunning()):
self.__set_inGame(True)
participants = self.__user.getParticipants()
for summoner in participants:
self.__user.checkParticipant(summoner)
self.__user.pushToJSON(participants)
else:
self.__set_inGame(False)
def __init__(self,userClass):
self.set_processName("League of Legends.exe")
self.updateUser(userClass)
self.__set_inGame(False)
| python |
"""Backend agnostic array operations.
"""
import itertools
import numpy
from autoray import do, reshape, transpose, dag, infer_backend, get_dtype_name
from ..core import njit, qarray
from ..utils import compose
from ..linalg.base_linalg import norm_fro_dense
def asarray(array):
"""Maybe convert data for a tensor to use.
"""
should_convert_to_numpy = (
isinstance(array, (numpy.matrix, qarray)) or
not hasattr(array, 'shape'))
if should_convert_to_numpy:
return numpy.asarray(array)
return array
def ndim(array):
try:
return array.ndim
except AttributeError:
return len(array.shape)
# ------------- miscelleneous other backend agnostic functions -------------- #
def iscomplex(x):
if infer_backend(x) == 'builtins':
return isinstance(x, complex)
return 'complex' in get_dtype_name(x)
def norm_fro(x):
if isinstance(x, numpy.ndarray):
return norm_fro_dense(x.reshape(-1))
try:
return do('linalg.norm', reshape(x, [-1]), 2)
except AttributeError:
return do('sum', do('multiply', do('conj', x), x)) ** 0.5
def sensibly_scale(x):
"""Take an array and scale it *very* roughly such that random tensor
networks consisting of such arrays do not have gigantic norms.
"""
return x / norm_fro(x)**(1.5 / ndim(x))
def _unitize_qr(x):
"""Perform isometrization using the QR decomposition.
"""
fat = x.shape[0] < x.shape[1]
if fat:
x = transpose(x)
Q = do('linalg.qr', x)[0]
if fat:
Q = transpose(Q)
return Q
def _unitize_svd(x):
fat = x.shape[0] < x.shape[1]
if fat:
x = transpose(x)
Q = do('linalg.svd', x)[0]
if fat:
Q = transpose(Q)
return Q
def _unitize_exp(x):
r"""Perform isometrization using the using anti-symmetric matrix
exponentiation.
.. math::
U_A = \exp{A - A^\dagger}
If ``x`` is rectangular it is completed with zeros first.
"""
m, n = x.shape
d = max(m, n)
x = do('pad', x, [[0, d - m], [0, d - n]], 'constant', constant_values=0.0)
expx = do('linalg.expm', x - dag(x))
return expx[:m, :n]
def _unitize_modified_gram_schmidt(A):
"""Perform isometrization explicitly using the modified Gram Schmidt
procedure.
"""
m, n = A.shape
thin = m > n
if thin:
A = do('transpose', A)
Q = []
for j in range(0, min(m, n)):
q = A[j, :]
for i in range(0, j):
rij = do('tensordot', do('conj', Q[i]), q, 1)
q = q - rij * Q[i]
Q.append(q / do('linalg.norm', q, 2))
Q = do('stack', Q, axis=0, like=A)
if thin:
Q = do('transpose', Q)
return Q
_UNITIZE_METHODS = {
'qr': _unitize_qr,
'svd': _unitize_svd,
'exp': _unitize_exp,
'mgs': _unitize_modified_gram_schmidt,
}
def unitize(x, method='qr'):
"""Generate a isometric (or unitary if square) matrix from array ``x``.
Parameters
----------
x : array
The matrix to generate the isometry from.
method : {'qr', 'exp', 'mgs'}, optional
The method used to generate the isometry. Note ``'qr'`` is the fastest
and most robust but, for example, some libraries cannot back-propagate
through it.
"""
return _UNITIZE_METHODS[method](x)
@njit
def _numba_find_diag_axes(x, atol=1e-12): # pragma: no cover
"""Numba-compiled array diagonal axis finder.
Parameters
----------
x : numpy.ndarray
The array to search for diagonal axes.
atol : float
The tolerance with which to compare to zero.
Returns
-------
diag_axes : set[tuple[int]]
The set of pairs of axes which are diagonal.
"""
# create the set of pairs of matching size axes
diag_axes = set()
for d1 in range(x.ndim - 1):
for d2 in range(d1 + 1, x.ndim):
if x.shape[d1] == x.shape[d2]:
diag_axes.add((d1, d2))
# enumerate through every array entry, eagerly invalidating axis pairs
for index, val in numpy.ndenumerate(x):
for d1, d2 in diag_axes:
if (index[d1] != index[d2]) and (abs(val) > atol):
diag_axes.remove((d1, d2))
# all pairs invalid, nothing left to do
if len(diag_axes) == 0:
break
return diag_axes
def find_diag_axes(x, atol=1e-12):
"""Try and find a pair of axes of ``x`` in which it is diagonal.
Parameters
----------
x : array-like
The array to search.
atol : float, optional
Tolerance with which to compare to zero.
Returns
-------
tuple[int] or None
The two axes if found else None.
Examples
--------
>>> x = np.array([[[1, 0], [0, 2]],
... [[3, 0], [0, 4]]])
>>> find_diag_axes(x)
(1, 2)
Which means we can reduce ``x`` without loss of information to:
>>> np.einsum('abb->ab', x)
array([[1, 2],
[3, 4]])
"""
shape = x.shape
if len(shape) < 2:
return None
backend = infer_backend(x)
# use numba-accelerated version for numpy arrays
if backend == 'numpy':
diag_axes = _numba_find_diag_axes(x, atol=atol)
if diag_axes:
# make it determinstic
return min(diag_axes)
return None
indxrs = do('indices', shape, like=backend)
for i, j in itertools.combinations(range(len(shape)), 2):
if shape[i] != shape[j]:
continue
if do('allclose', x[indxrs[i] != indxrs[j]], 0.0,
atol=atol, like=backend):
return (i, j)
return None
@njit
def _numba_find_antidiag_axes(x, atol=1e-12): # pragma: no cover
"""Numba-compiled array antidiagonal axis finder.
Parameters
----------
x : numpy.ndarray
The array to search for anti-diagonal axes.
atol : float
The tolerance with which to compare to zero.
Returns
-------
antidiag_axes : set[tuple[int]]
The set of pairs of axes which are anti-diagonal.
"""
# create the set of pairs of matching size axes
antidiag_axes = set()
for i in range(x.ndim - 1):
for j in range(i + 1, x.ndim):
if x.shape[i] == x.shape[j]:
antidiag_axes.add((i, j))
# enumerate through every array entry, eagerly invalidating axis pairs
for index, val in numpy.ndenumerate(x):
for i, j in antidiag_axes:
d = x.shape[i]
if (index[i] != d - 1 - index[j]) and (abs(val) > atol):
antidiag_axes.remove((i, j))
# all pairs invalid, nothing left to do
if len(antidiag_axes) == 0:
break
return antidiag_axes
def find_antidiag_axes(x, atol=1e-12):
"""Try and find a pair of axes of ``x`` in which it is anti-diagonal.
Parameters
----------
x : array-like
The array to search.
atol : float, optional
Tolerance with which to compare to zero.
Returns
-------
tuple[int] or None
The two axes if found else None.
Examples
--------
>>> x = np.array([[[0, 1], [0, 2]],
... [[3, 0], [4, 0]]])
>>> find_antidiag_axes(x)
(0, 2)
Which means we can reduce ``x`` without loss of information to:
>>> np.einsum('aba->ab', x[::-1, :, :])
array([[3, 4],
[1, 2]])
as long as we flip the order of dimensions on other tensors corresponding
to the the same index.
"""
shape = x.shape
if len(shape) < 2:
return None
backend = infer_backend(x)
# use numba-accelerated version for numpy arrays
if backend == 'numpy':
antidiag_axes = _numba_find_antidiag_axes(x, atol=atol)
if antidiag_axes:
# make it determinstic
return min(antidiag_axes)
return None
indxrs = do('indices', shape, like=backend)
for i, j in itertools.combinations(range(len(shape)), 2):
di, dj = shape[i], shape[j]
if di != dj:
continue
if do('allclose', x[indxrs[i] != dj - 1 - indxrs[j]], 0.0,
atol=atol, like=backend):
return (i, j)
return None
@njit
def _numba_find_columns(x, atol=1e-12): # pragma: no cover
"""Numba-compiled single non-zero column axis finder.
Parameters
----------
x : array
The array to search.
atol : float, optional
Absolute tolerance to compare to zero with.
Returns
-------
set[tuple[int]]
Set of pairs (axis, index) defining lone non-zero columns.
"""
# possible pairings of axis + index
column_pairs = set()
for ax, d in enumerate(x.shape):
for i in range(d):
column_pairs.add((ax, i))
# enumerate over all array entries, invalidating potential column pairs
for index, val in numpy.ndenumerate(x):
if abs(val) > atol:
for ax, i in enumerate(index):
for pax, pi in column_pairs:
if ax == pax and pi != i:
column_pairs.remove((pax, pi))
# all potential pairs invalidated
if not len(column_pairs):
break
return column_pairs
def find_columns(x, atol=1e-12):
"""Try and find columns of axes which are zero apart from a single index.
Parameters
----------
x : array-like
The array to search.
atol : float, optional
Tolerance with which to compare to zero.
Returns
-------
tuple[int] or None
If found, the first integer is which axis, and the second is which
column of that axis, else None.
Examples
--------
>>> x = np.array([[[0, 1], [0, 2]],
... [[0, 3], [0, 4]]])
>>> find_columns(x)
(2, 1)
Which means we can happily slice ``x`` without loss of information to:
>>> x[:, :, 1]
array([[1, 2],
[3, 4]])
"""
shape = x.shape
if len(shape) < 1:
return None
backend = infer_backend(x)
# use numba-accelerated version for numpy arrays
if backend == 'numpy':
columns_pairs = _numba_find_columns(x, atol)
if columns_pairs:
return min(columns_pairs)
return None
indxrs = do('indices', shape, like=backend)
for i in range(len(shape)):
for j in range(shape[i]):
if do('allclose', x[indxrs[i] != j], 0.0, atol=atol, like=backend):
return (i, j)
return None
class PArray:
"""Simple array-like object that lazily generates the actual array by
calling a function with a set of parameters.
Parameters
----------
fn : callable
The function that generates the tensor data from ``params``.
params : sequence of numbers
The initial parameters supplied to the generating function like
``fn(params)``.
See Also
--------
PTensor
"""
def __init__(self, fn, params, shape=None):
self.fn = fn
self.params = params
self._shape = shape
self._shape_fn_id = id(fn)
def copy(self):
new = PArray(self.fn, self.params, self.shape)
new._data = self._data # for efficiency
return new
@property
def fn(self):
return self._fn
@fn.setter
def fn(self, x):
self._fn = x
self._data = None
@property
def params(self):
return self._params
@params.setter
def params(self, x):
self._params = asarray(x)
self._data = None
@property
def data(self):
if self._data is None:
self._data = self._fn(self._params)
return self._data
@property
def shape(self):
# if we haven't calculated shape or have updated function, get shape
_shape_fn_id = id(self.fn)
if (self._shape is None) or (self._shape_fn_id != _shape_fn_id):
self._shape = self.data.shape
self._shape_fn_id = _shape_fn_id
return self._shape
@property
def ndim(self):
return len(self.shape)
def add_function(self, g):
"""Chain the new function ``g`` on top of current function ``f`` like
``g(f(params))``.
"""
f = self.fn
self.fn = compose(g, f)
| python |
"""
The wntr.network package contains methods to define a water network model,
network controls, and graph representation of the network.
"""
from wntr.network.model import WaterNetworkModel, Node, Link, Junction, Reservoir, Tank, Pipe, Pump, Energy, Valve, Curve, LinkStatus, WaterNetworkOptions, LinkType, NodeType
from wntr.network.controls import ControlLogger, ControlAction, TimeControl, ConditionalControl, _CheckValveHeadControl, _MultiConditionalControl, _PRVControl
from wntr.network.graph import WntrMultiDiGraph
| python |
import cv2
img = cv2.imread("dog.jpg")
cv2.imshow("dog", img)
cv2.waitKey()
cv2.destroyAllWindows()
| python |
#-*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
#test film strem vk 1er page dark higlands & tous ces enfants m'appartiennent
from resources.hosters.hoster import iHoster
from resources.lib.handler.requestHandler import cRequestHandler
from resources.lib.parser import cParser
import re
UA = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:72.0) Gecko/20100101 Firefox/72.0'
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'Netu'
self.__sFileName = self.__sDisplayName
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def setUrl(self, sUrl):
self.__sUrl = sUrl.replace('https', 'http')
self.__sUrl = self.__sUrl.replace('http://netu.tv/', 'http://hqq.tv/')
self.__sUrl = self.__sUrl.replace('http://waaw.tv/', 'http://hqq.tv/')
self.__sUrl = self.__sUrl.replace('http://vizplay.icu/', 'http://hqq.tv/')
self.__sUrl = self.__sUrl.replace('http://hqq.tv/player/hash.php?hash=', 'http://hqq.tv/player/embed_player.php?vid=')
self.__sUrl = self.__sUrl.replace('http://hqq.tv/watch_video.php?v=', 'http://hqq.tv/player/embed_player.php?vid=')
def __getIdFromUrl(self):
sPattern = 'https*:\/\/hqq\.(?:tv|player|watch)\/player\/embed_player\.php\?vid=([0-9A-Za-z]+)'
oParser = cParser()
aResult = oParser.parse(self.__sUrl, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return ''
def getPluginIdentifier(self):
return 'netu'
def isDownloadable(self):
return False
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def GetHost(self,sUrl):
oParser = cParser()
sPattern = 'https*:\/\/(.+?)\/'
aResult = oParser.parse(sUrl, sPattern)
if aResult[0]:
return aResult[1][0]
return ''
def __getMediaLinkForGuest(self):
api_call = ''
ids = self.__getIdFromUrl()
self.__sUrl = 'http://hqq.tv/player/embed_player.php?vid=' + ids + '&autoplay=no'
oRequestHandler = cRequestHandler(self.__sUrl)
oRequestHandler.addHeaderEntry('User-Agent', UA)
html = oRequestHandler.request()
vid = re.search("videokeyorig *= *\'(.+?)\'", html, re.DOTALL).group(1)
url = "time=1&ver=0&secure=0&adb=0%2F&v={}&token=>=&embed_from=0&wasmcheck=1".format(vid)
oRequestHandler = cRequestHandler('https://hqq.tv/player/get_md5.php?' + url)
oRequestHandler.addHeaderEntry('User-Agent', UA)
oRequestHandler.addHeaderEntry('Accept', '*/*')
oRequestHandler.addHeaderEntry('Accept-Language', 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3')
oRequestHandler.addHeaderEntry('x-requested-with', 'XMLHttpRequest')
oRequestHandler.addHeaderEntry('Referer', self.__sUrl)
#ok
oRequestHandler.request()
api_call = oRequestHandler.getRealUrl()
if (api_call):
return True, api_call + '.mp4.m3u8' + '|User-Agent=' + UA
return False, False
| python |
__author__ = 'Richard Lincoln, [email protected]'
""" This example demonstrates how to use the discrete Roth-Erev reinforcement
learning algorithms to learn the n-armed bandit task. """
import pylab
import scipy
from pybrain.rl.agents import LearningAgent
from pybrain.rl.explorers import BoltzmannExplorer #@UnusedImport
from pybrain.rl.experiments import Experiment
from pyreto.bandit import BanditEnvironment, BanditTask
from pyreto.roth_erev import RothErev, PropensityTable #@UnusedImport
from pyreto.roth_erev import VariantRothErev #@UnusedImport
payouts = scipy.array([[200.0, 300.0, 100.0], # Expected value: 210
[900.0, 400.0, 600.0], # Expected value: 510
[700.0, 600.0, 550.0], # Expected value: 595
[150.0, 50.0, 1000.0], # Expected value: 147.5
[700.0, 800.0, 900.0]]) # Expected value: 790
distrib = scipy.array([[0.7, 0.2, 0.1],
[0.1, 0.6, 0.3],
[0.4, 0.2, 0.3],
[0.5, 0.45, 0.05],
[0.3, 0.5, 0.2]])
env = BanditEnvironment(payouts, distrib)
task = BanditTask(env)
table = PropensityTable(payouts.shape[0])
table.initialize(500.0)
#learner = RothErev(experimentation=0.55, recency=0.3)
learner = VariantRothErev(experimentation=0.65, recency=0.3)
learner.explorer = BoltzmannExplorer(tau=100.0, decay=0.9995)
agent = LearningAgent(table, learner)
experiment = Experiment(task, agent)
epis = int(1e1)
batch = 2
avgRewards = scipy.zeros(epis)
allActions = scipy.zeros(epis * batch)
c = 0
for i in range(epis):
experiment.doInteractions(batch)
avgRewards[i] = scipy.mean(agent.history["reward"])
allActions[c:c + batch] = agent.history["action"].flatten() + 1
agent.learn()
agent.reset()
c += batch
pylab.figure(figsize=(16, 6))
#pylab.plot(avgRewards)
pylab.plot(allActions)
pylab.show()
| python |
import abc
from enum import Enum as EnumCLS
from typing import Any, List, Optional, Tuple, Type
import pendulum
from starlette.requests import Request
from mongoengine import Document
from mongoengine import QuerySet
from fastapi_admin import constants
from fastapi_admin.widgets.inputs import Input
class Filter(Input):
def __init__(self, name: str, label: str, placeholder: str = "", null: bool = True, **context):
"""
Parent class for all filters
:param name: model field name
:param label:
"""
super().__init__(name=name, label=label, placeholder=placeholder, null=null, **context)
async def get_queryset(self, request: Request, value: Any, qs: QuerySet):
value = await self.parse_value(request, value)
filters = {self.context.get("name"): value}
return qs.filter(**filters)
class Search(Filter):
template = "widgets/filters/search.html"
def __init__(
self,
name: str,
label: str,
search_mode: str = "equal",
placeholder: str = "",
null: bool = True,
):
"""
Search for keyword
:param name:
:param label:
:param search_mode: equal,contains,icontains,startswith,istartswith,endswith,iendswith,iexact,search
"""
if search_mode == "equal":
super().__init__(name, label, placeholder, null)
else:
super().__init__(name + "__" + search_mode, label, placeholder)
self.context.update(search_mode=search_mode)
class Datetime(Filter):
template = "widgets/filters/datetime.html"
def __init__(
self,
name: str,
label: str,
format_: str = constants.DATETIME_FORMAT_MOMENT,
null: bool = True,
placeholder: str = "",
):
"""
Datetime filter
:param name:
:param label:
:param format_: the format of moment.js
"""
super().__init__(
name + "__range", label, null=null, format=format_, placeholder=placeholder
)
async def parse_value(self, request: Request, value: Optional[str]):
if value:
ranges = value.split(" - ")
return pendulum.parse(ranges[0]), pendulum.parse(ranges[1])
async def render(self, request: Request, value: Tuple[pendulum.DateTime, pendulum.DateTime]):
format_ = self.context.get("format")
if value is not None:
value = value[0].format(format_) + " - " + value[1].format(format_)
return await super().render(request, value)
class Date(Datetime):
def __init__(
self,
name: str,
label: str,
format_: str = constants.DATE_FORMAT_MOMENT,
null: bool = True,
placeholder: str = "",
):
super().__init__(
name=name, label=label, format_=format_, null=null, placeholder=placeholder
)
self.context.update(date=True)
class Select(Filter):
template = "widgets/filters/select.html"
def __init__(self, name: str, label: str, null: bool = True):
super().__init__(name, label, null=null)
@abc.abstractmethod
async def get_options(self):
"""
return list of tuple with display and value
[("on",1),("off",2)]
:return: list of tuple with display and value
"""
async def render(self, request: Request, value: Any):
options = await self.get_options()
self.context.update(options=options)
return await super(Select, self).render(request, value)
class Enum(Select):
def __init__(
self,
enum: Type[EnumCLS],
name: str,
label: str,
enum_type: Type = int,
null: bool = True,
):
super().__init__(name=name, label=label, null=null)
self.enum = enum
self.enum_type = enum_type
async def parse_value(self, request: Request, value: Any):
return self.enum(self.enum_type(value))
async def get_options(self):
options = [(v.name, v.value) for v in self.enum]
if self.context.get("null"):
options = [("", "")] + options
return options
class ForeignKey(Select):
def __init__(self, model: Type[Document], name: str, label: str, null: bool = True):
super().__init__(name=name, label=label, null=null)
self.model = model
async def get_options(self):
ret = await self.get_models()
options = [
(
str(x),
x.pk,
)
for x in ret
]
if self.context.get("null"):
options = [("", "")] + options
return options
async def get_models(self):
return await self.model.all()
async def render(self, request: Request, value: Any):
if value is not None:
value = int(value)
return await super().render(request, value)
class DistinctColumn(Select):
def __init__(self, model: Type[Document], name: str, label: str, null: bool = True):
super().__init__(name=name, label=label, null=null)
self.model = model
self.name = name
async def get_options(self):
ret = await self.get_values()
options = [
(
str(x[0]),
str(x[0]),
)
for x in ret
]
if self.context.get("null"):
options = [("", "")] + options
return options
async def get_values(self):
return await self.model.all().distinct().values_list(self.name)
class Boolean(Select):
async def get_options(self) -> List[Tuple[str, str]]:
"""Return list of possible values to select from."""
options = [
("TRUE", "true"),
("FALSE", "false"),
]
if self.context.get("null"):
options.insert(0, ("", ""))
return options
async def get_queryset(self, request: Request, value: str, qs: QuerySet) -> QuerySet:
"""Return filtered queryset."""
filters = {self.context.get("name"): (value == "true")}
return qs.filter(**filters)
| python |
from itertools import chain
from functools import lru_cache
import abc
import collections
from schema import Schema
from experta.pattern import Bindable
from experta.utils import freeze, unfreeze
from experta.conditionalelement import OperableCE
from experta.conditionalelement import ConditionalElement
class BaseField(metaclass=abc.ABCMeta):
@abc.abstractmethod
def validate(self, data):
"""Raise an exception on invalid data."""
pass
class Field(BaseField):
NODEFAULT = object()
def __init__(self, schema_definition, mandatory=False, default=NODEFAULT):
self.validator = Schema(schema_definition)
self.mandatory = mandatory
self.default = default
def validate(self, data):
self.validator.validate(unfreeze(data))
class Validable(type):
def __new__(mcl, name, bases, nmspc):
# Register fields
newnamespace = {"__fields__": dict()}
for base in bases:
if isinstance(base, Validable):
for key, value in base.__fields__.items():
if key.startswith('_') and key[1:].isdigit():
key = int(key[1:])
newnamespace["__fields__"][key] = value
for key, value in nmspc.items():
if key.startswith('_') and key[1:].isdigit():
key = int(key[1:])
if isinstance(value, BaseField):
newnamespace["__fields__"][key] = value
else:
newnamespace[key] = value
return super(Validable, mcl).__new__(mcl, name, bases, newnamespace)
class Fact(OperableCE, Bindable, dict, metaclass=Validable):
"""Base Fact class"""
def __init__(self, *args, **kwargs):
self.update(dict(chain(enumerate(args), kwargs.items())))
self.__defaults = dict()
def __missing__(self, key):
if key not in self.__fields__:
raise KeyError(key)
else:
default = self.__fields__[key].default
if default is Field.NODEFAULT:
raise KeyError(key)
elif key in self.__defaults:
return self.__defaults[key]
elif isinstance(default, collections.abc.Callable):
return self.__defaults.setdefault(key, default())
else:
return self.__defaults.setdefault(key, default)
def __setitem__(self, key, value):
if self.__factid__ is None:
super().__setitem__(key, freeze(value))
else:
raise RuntimeError("A fact can't be modified after declaration.")
def validate(self):
for name, field in self.__fields__.items():
if name in self:
try:
field.validate(self[name])
except Exception as exc:
raise ValueError(
"Invalid value on field %r for fact %r"
% (name, self))
elif field.mandatory:
raise ValueError(
"Mandatory field %r is not defined for fact %r"
% (name, self))
else:
pass
def update(self, mapping):
for k, v in mapping.items():
self[k] = v
def as_dict(self):
"""Return a dictionary containing this `Fact` data."""
return {k: unfreeze(v)
for k, v in self.items()
if not self.is_special(k)}
def copy(self):
"""Return a copy of this `Fact`."""
content = [(k, v) for k, v in self.items()]
intidx = [(k, v) for k, v in content if isinstance(k, int)]
args = [v for k, v in sorted(intidx)]
kwargs = {k: v
for k, v in content
if not isinstance(k, int) and not self.is_special(k)}
return self.__class__(*args, **kwargs)
def has_field_constraints(self):
return any(isinstance(v, ConditionalElement) for v in self.values())
def has_nested_accessor(self):
return any(("__" in str(k).strip('__') for k in self.keys()))
@staticmethod
def is_special(key):
return (isinstance(key, str)
and key.startswith('__')
and key.endswith('__'))
@property
def __bind__(self):
return self.get('__bind__', None)
@__bind__.setter
def __bind__(self, value):
super().__setitem__('__bind__', value)
@property
def __factid__(self):
return self.get('__factid__', None)
@__factid__.setter
def __factid__(self, value):
super().__setitem__('__factid__', value)
@classmethod
def from_iter(cls, pairs):
obj = cls()
obj.update(dict(pairs))
return obj
def __str__(self): # pragma: no cover
if self.__factid__ is None:
return "<Undeclared Fact> %r" % self
else:
return "<f-%d>" % self.__factid__
def __repr__(self): # pragma: no cover
return "{}({})".format(
self.__class__.__name__,
", ".join(
(repr(v) if isinstance(k, int) else "{}={!r}".format(k, v)
for k, v in self.items()
if not self.is_special(k))))
def __hash__(self):
try:
return self._hash
except AttributeError:
self._hash = hash(frozenset(self.items()))
return self._hash
def __eq__(self, other):
return (self.__class__ == other.__class__
and super().__eq__(other))
class InitialFact(Fact):
"""
InitialFact
"""
pass
| python |
from tensorflow.keras.models import Sequential
import tensorflow.keras.layers as layers
import numpy as np
from os.path import join
import os
from invoke.context import Context
import unittest
import templates
import ennclave_inference as ennclave
import config as cfg
def common(backend: str):
target_dir = join(cfg.get_ennclave_home(), 'backend', 'generated')
preamble_backend = backend
if backend == 'sgx':
preamble_backend = 'sgx_enclave'
with open(join(target_dir, f'{backend}_forward.cpp'), 'w+') as forward_file:
forward_file.write(templates.preamble.render(backend=preamble_backend))
forward_file.write(
f"print_out(\"Hello, this is backend {backend}\\n\");")
forward_file.write(templates.postamble)
with open(join(target_dir, 'parameters.bin'), 'w') as parameter_file:
pass
with open(join(target_dir, 'sgx_config.xml'), 'w') as config_file:
config_file.write("""
<EnclaveConfiguration>
<ProdID>0</ProdID>
<ISVSVN>0</ISVSVN>
<StackMaxSize>0x40000</StackMaxSize>
<HeapInitSize>0x7e00000</HeapInitSize>
<HeapMaxSize>0x7e00000</HeapMaxSize>
<TCSNum>10</TCSNum>
<TCSPolicy>1</TCSPolicy>
<!-- Recommend changing 'DisableDebug' to 1 to make the sgx undebuggable for sgx release -->
<DisableDebug>0</DisableDebug>
<MiscSelect>0</MiscSelect>
<MiscMask>0xFFFFFFFF</MiscMask>
</EnclaveConfiguration>""")
context = Context()
with context.cd(cfg.get_ennclave_home()):
context.run('mkdir -p build')
with context.cd('build'):
# context.run('cmake ..')
context.run(f'make backend_{backend}')
if backend == 'native':
ennclave.native_forward(b'', 0, 0)
else:
ennclave.sgx_forward(b'', 0, 0)
# noinspection PyMethodMayBeStatic
class BasicTests(unittest.TestCase):
def test_native(self):
common('native')
@unittest.skipIf(os.environ.get('SGX_SDK') is None, "SGX is not available")
def test_sgx(self):
common('sgx')
| python |
import numpy as np
import os
import time
from . import util
from tensorboardX import SummaryWriter
import torch
class TBVisualizer:
def __init__(self, opt):
self._opt = opt
self._save_path = os.path.join(opt.checkpoints_dir, opt.name)
self._log_path = os.path.join(self._save_path, 'loss_log2.txt')
self._tb_path = os.path.join(self._save_path, 'summary.json')
self._writer = SummaryWriter(self._save_path)
with open(self._log_path, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def __del__(self):
self._writer.close()
def display_current_results(self, visuals, it, is_train, save_visuals=True):
for label, image_numpy in visuals.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
# self._writer.add_image(sum_name, image_numpy, it)
I=torch.from_numpy(image_numpy).permute(2,0,1)
self._writer.add_image(sum_name, I/255, it)
if save_visuals:
util.save_image(image_numpy,
os.path.join(self._opt.checkpoints_dir, self._opt.name,
'event_imgs', sum_name, '%08d.png' % it))
self._writer.export_scalars_to_json(self._tb_path)
def plot_scalars(self, scalars, it, is_train):
for label, scalar in scalars.items():
sum_name = '{}/{}'.format('Train' if is_train else 'Test', label)
self._writer.add_scalar(sum_name, scalar, it)
def print_current_train_errors(self, epoch, i, iters_per_epoch, errors, t, visuals_were_stored):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
visuals_info = "v" if visuals_were_stored else ""
message = '%s (T%s, epoch: %d, it: %d/%d, t/smpl: %.3fs) ' % (log_time, visuals_info, epoch, i, iters_per_epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def print_current_validate_errors(self, epoch, errors, t):
log_time = time.strftime("[%d/%m/%Y %H:%M:%S]")
message = '%s (V, epoch: %d, time_to_val: %ds) ' % (log_time, epoch, t)
for k, v in errors.items():
message += '%s:%.3f ' % (k, v)
print(message)
with open(self._log_path, "a") as log_file:
log_file.write('%s\n' % message)
def save_images(self, visuals):
for label, image_numpy in visuals.items():
image_name = '%s.png' % label
save_path = os.path.join(self._save_path, "samples", image_name)
util.save_image(image_numpy, save_path)
| python |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .instance_agent_command_source_details import InstanceAgentCommandSourceDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceAgentCommandSourceViaTextDetails(InstanceAgentCommandSourceDetails):
"""
The source of the command when provided using plain text.
"""
def __init__(self, **kwargs):
"""
Initializes a new InstanceAgentCommandSourceViaTextDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.compute_instance_agent.models.InstanceAgentCommandSourceViaTextDetails.source_type` attribute
of this class is ``TEXT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param source_type:
The value to assign to the source_type property of this InstanceAgentCommandSourceViaTextDetails.
Allowed values for this property are: "TEXT", "OBJECT_STORAGE_URI", "OBJECT_STORAGE_TUPLE"
:type source_type: str
:param text:
The value to assign to the text property of this InstanceAgentCommandSourceViaTextDetails.
:type text: str
:param text_sha256:
The value to assign to the text_sha256 property of this InstanceAgentCommandSourceViaTextDetails.
:type text_sha256: str
"""
self.swagger_types = {
'source_type': 'str',
'text': 'str',
'text_sha256': 'str'
}
self.attribute_map = {
'source_type': 'sourceType',
'text': 'text',
'text_sha256': 'textSha256'
}
self._source_type = None
self._text = None
self._text_sha256 = None
self._source_type = 'TEXT'
@property
def text(self):
"""
**[Required]** Gets the text of this InstanceAgentCommandSourceViaTextDetails.
The plain text command.
:return: The text of this InstanceAgentCommandSourceViaTextDetails.
:rtype: str
"""
return self._text
@text.setter
def text(self, text):
"""
Sets the text of this InstanceAgentCommandSourceViaTextDetails.
The plain text command.
:param text: The text of this InstanceAgentCommandSourceViaTextDetails.
:type: str
"""
self._text = text
@property
def text_sha256(self):
"""
Gets the text_sha256 of this InstanceAgentCommandSourceViaTextDetails.
SHA-256 checksum value of the text content.
:return: The text_sha256 of this InstanceAgentCommandSourceViaTextDetails.
:rtype: str
"""
return self._text_sha256
@text_sha256.setter
def text_sha256(self, text_sha256):
"""
Sets the text_sha256 of this InstanceAgentCommandSourceViaTextDetails.
SHA-256 checksum value of the text content.
:param text_sha256: The text_sha256 of this InstanceAgentCommandSourceViaTextDetails.
:type: str
"""
self._text_sha256 = text_sha256
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import pandas as pd
from funcs import shortpath
def print_inp(inp_file_name):
inp_file_full = pd.read_csv(inp_file_name, sep='\t', header=1, dtype=str)
for j in range(len(inp_file_full)):
inp_file = inp_file_full.loc[[j], :]
# format df for display
with pd.option_context('display.colheader_justify', 'left', 'display.max_rows', None,
'display.max_columns', None, 'display.max_colwidth', -1):
df_display = inp_file.copy()
site_name = os.path.basename(os.path.dirname(df_display.sam_path.values[0]))
df_display.sam_path = df_display.sam_path.map(shortpath)
df_display = df_display.T
df_display.rename(index={'dont_average_replicate_measurements': 'dont_average'},
inplace=True)
print("{:-^80}".format(" "+site_name+" "), end="\n")
print("\n".join([" | {}".format(i)
for i in df_display.to_string(header=False).split("\n")]))
print("{:-^80}".format(""))
def main():
parser = argparse.ArgumentParser(prog="parse_inp.py",
description="""Simple tools for inspecting inp
files""")
parser.add_argument('inp_file', nargs='*')
parser.add_argument('-p', '--print', action='store_true',
help="""print contents of inp file in readable format""")
args = vars(parser.parse_args())
inp_file_list = args.pop('inp_file')
for filename_inp in inp_file_list:
if args['print']:
print_inp(filename_inp)
if __name__ == "__main__":
main()
| python |
format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
minimal_format = "%(message)s"
def _get_formatter_and_handler(use_minimal_format: bool = False):
logging_dict = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"colored": {
"()": "coloredlogs.ColoredFormatter",
"format": minimal_format if use_minimal_format else format,
"datefmt": "%m-%d %H:%M:%S",
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "colored",
},
},
"loggers": {},
}
return logging_dict
def get_logging_config(django_log_level: str, wkz_log_level: str):
logging_dict = _get_formatter_and_handler()
logging_dict["loggers"] = {
"django": {
"handlers": ["console"],
"level": django_log_level,
},
"wizer": {
"handlers": ["console"],
"level": wkz_log_level,
},
}
return logging_dict
| python |
import argparse
import json
import os
import shutil
import logging
from weed_annotator.semantic_segmentation import utils
from weed_annotator.semantic_segmentation.train import train
from weed_annotator.semantic_segmentation.inference import inference
from weed_annotator.post_processing.post_process_masks import post_process_masks
from weed_annotator.full_pipeline.mask_proposal_evaluator import MaskProposalsEvaluator
from weed_annotator.image_composition.compose_imgs import compose_images
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='weed_annotator')
parser.add_argument('-c', '--config_folder', default='configs', type=str,
help='Folder with pipeline configs')
args = parser.parse_args()
# create logger
logger = logging.getLogger('weed_annotator_logger')
logger.setLevel(logging.INFO)
fh = logging.StreamHandler()
fh_formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(fh_formatter)
logger.addHandler(fh)
# Setting seed for reproducability
utils.set_seeds()
pipeline_config = json.load(open(f"{args.config_folder}/weed_annotator.json"))
# Image Composition
if pipeline_config["image_composition"]["enable"]:
logger.info("Generating image compositions for training.")
img_comp_config = json.load(open(f"{args.config_folder}/image_composition.json"))
compose_images(img_comp_config)
train_folder = f"{img_comp_config['folders']['out_folder']}/{img_comp_config['folders']['ident']}"
else:
train_folder = pipeline_config["image_composition"]["reuse"]
# Training Semantic Segmemntation
train_config = json.load(open(f"{args.config_folder}/seg_config.json"))
if pipeline_config["sem_segmentation"]["enable_train"]:
train_config["data"]["train_data"] = train_folder
logger.info(f"Training semantic segmentation model on: {train_folder}.")
train(train_config)
log_folder = f"{train_config['logging_path']}/{train_config['train_ident']}"
else:
log_folder = pipeline_config["sem_segmentation"]["reuse_model"]
# Inference
input_data = pipeline_config["input_imgs"]
if pipeline_config["sem_segmentation"]["enable_inference"]:
logger.info(f"Generating mask predictions for: {input_data}.")
mp_raw = f"/tmp/{train_config['train_ident']}/mask_proposals/raw"
os.makedirs(mp_raw)
inference(f"{log_folder}/config.json", f"{log_folder}/checkpoints/best.pth", input_data, mp_raw)
else:
mp_raw = pipeline_config["sem_segmentation"]["reuse_masks"]
# Postprocess
if pipeline_config["post_processing"]["enable"]:
logger.info("Post-processing mask predictions.")
mp_pp = pipeline_config["mask_proposals"]
os.makedirs(mp_pp, exist_ok=True)
post_process_masks(f"{input_data}", mp_raw, mp_pp)
else:
mp_pp = pipeline_config["post_processing"]["reuse"]
# Evaluation
if pipeline_config["enable_evaluation"] and os.path.exists(f"{input_data}/annotations.xml"):
logger.info(f"Evaluation of pipeline performance on: {input_data}.")
me = MaskProposalsEvaluator(input_data, train_config["data"]["weed_label"])
result_raw = me.evaluate(mp_raw)
with open(f"{log_folder}/eval_raw.json", 'w') as f:
json.dump(result_raw, f)
result_pp = me.evaluate(mp_pp)
with open(f"{log_folder}/eval_pp.json", 'w') as f:
json.dump(result_pp, f)
# Cleanup
if pipeline_config["sem_segmentation"]["enable_inference"]:
shutil.rmtree(f"{mp_raw}")
| python |
"""
This module contains helper functions.
The main purpose is to remove clutter in the main
file
"""
from __future__ import print_function
import argparse
import sys
import os
import logging
import copy
import subprocess
from operator import attrgetter
from string import Formatter
try:
# Python 3
import _string
except ImportError:
# Python 2
pass
class StyleFormatter(Formatter):
""" Custom formatter that handles nested field of two levels
such as '{mass[element]}'. Don't know how it works
"""
def get_value(self, field_name, args, kwargs):
# Return kwargs[field_name], else return ''
return kwargs.get(field_name, '')
def get_field(self, field_name, args, kwargs):
# To illustrate, the example '{mass[element]}' is used with
# the kwargs {"element":"Pr", "mass":{"Pr":128}}
# Split the field_name into the field and an iterator
# ex. mass <fieldnameiterator object at 0x105308840>
try:
# Python 2.7
first, rest = field_name._formatter_field_name_split()
except:
# Python 3 (Only tested on 3.5)
first, rest = _string.formatter_field_name_split(field_name)
# print("First:", first)
# print("Kwargs:", kwargs)
# obj = kwargs[field_name] or obj = '' if KeyError
# ex. obj = {"Pr":128}
obj = self.get_value(first, args, kwargs)
# Often, "rest" is only one deep
# is_attr is a bool. I think it is true if something.keyword exists
# keyword is just a keyword, like something[keyword] or something.keyword
for is_attr, keyword in rest:
# This is the juciy stuff. If the keyword is in kwargs, return the
# value in obj
# ex. obj = {"Pr":128}["Pr"] = 128
if keyword in kwargs:
#print(obj)
obj = obj[kwargs.get(keyword)]
# ex. 128
return obj, first
def correct(input_argument):
""" Function to check syntax of input arguments given by user """
if input_argument in('n', 'no'):
return 'no'
elif input_argument in('y', 'yes'):
return 'yes'
# if input argument is given incorrectly, function returns 'error'
else:
error_message = " please make sure these input arguments are gives as: \n input = 'no' or input = 'yes' \n input = 'n' or input = 'y' \n input = ['no', 'yes'] or input = ['n', 'y'] \n"
sys.exit(error_message)
def mkdir(directory):
""" Check if directory exists. If not, create it
Parameters: directory: the name of the directory
Returns: None
Algorithm: Check if the direcctory exists, if not, create it
"""
if not os.path.exists(directory):
os.makedirs(directory)
def make_iterable(dictionary):
""" Makes every entry in the dictionary iterable and returns the result
Parameters: dictionary: the dict to be made iterable
Output: The iterable dictionary
Algorithm: Make every key in the list iterable and make the results
entries unique"""
new_dict = copy.deepcopy(dictionary)
for key in dictionary:
if not isinstance(dictionary[key], (tuple, list)):
new_dict[key] = [new_dict[key]]
# check if every item in user given list is unique
for key, value in new_dict.items():
try:
# if variable tuple or list => new list with value only once
if len(set(value)) != len(value):
newlist = []
for val in value:
if val not in newlist:
newlist.append(val)
new_dict[key] = newlist
except TypeError:
# if variable == dict => new dict with value only once inside
# user_input[key]
for keys, values in value[0].items():
if len(set(values)) != len(values):
newlist = []
for val in values:
if val not in newlist:
newlist.append(val)
value[0][keys] = newlist
new_dict[key] = value[0]
return new_dict
def which(program):
""" Find path of binary
Paramteres: program: name of binary
Returns: Path to binary if found, else none
Algorithm: Mimic the UNIX 'which' command
"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def talys_version(local=False):
""" Get the version of TALYS being used
Parameters: local: Wether to use a binary talys file in the current
directory or the system-wide talys
Returns: String of the format #.#
Algorithm: Call shell command "strings" and greps the result
"""
# Find the path of TALYS
if local:
talys_path = os.path.join(os.getcwd(), "talys")
else:
talys_path = which("talys")
if talys_path is None or "talys" not in talys_path:
raise RuntimeError("Could not find talys.")
# Use the UNIX command 'strings' to extract all strings from
# the binary
talys18string = "pshiftadjust"
talys16string = "fisbaradjust"
talys14string = "deuteronomp"
talys12string = "gamgamadjust"
last_resort_string = "massmodel"
strings = subprocess.check_output(["strings", talys_path]).decode("utf8")
if talys18string in strings:
return "1.8"
elif talys16string in strings:
return "1.6"
elif talys14string in strings:
return "1.4"
elif talys12string in strings:
return "1.2"
elif last_resort_string in strings:
return "1.0"
else:
return "unknown"
class SortingHelpFormatter(argparse.RawTextHelpFormatter):
""" Custom formatter for argparse help """
def add_arguments(self, actions):
actions = sorted(actions, key=attrgetter('option_strings'))
super(SortingHelpFormatter, self).add_arguments(actions)
def get_args():
"""
Manages the argparse module.
Any changes to the arguments from terminal are done here
Parameters: none
Returns: class instance of 'argparse.Namespace'
Algorithm: Add arguments to argparse.ArgumentParser(), fix some arguments
regarding logging, and return the parsed arguments.
"""
parser = argparse.ArgumentParser(description=("Automates the process of "
"creating and running thousands of simulations with TALYS"),
formatter_class=SortingHelpFormatter)
parser.add_argument("-d", "--debug",
help="show debugging information. Overrules log and verbosity",
action="store_true")
parser.add_argument("-l", "--log",
help="set the verbosity for the log file",
choices=["DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL"],
type=str.upper, default="INFO")
parser.add_argument("-v", "--verbosity",
help="set the verbosity level",
choices=["DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL"],
type=str.upper, default="INFO")
parser.add_argument("--lfile",
help="filename of the log file",
type=str, default="talys.log",
metavar='LOG_FILENAME',
dest="log_filename")
parser.add_argument("--efile",
help="filename of the error file",
type=str, default="error.log",
metavar='ERROR_FILENAME',
dest="error_filename")
parser.add_argument("--ifile",
help=("the filename for where the options are stored"
"\nDefault is input.json"),
type=str, default="structure.json",
metavar='INPUT_FILENAME',
dest="input_filename")
parser.add_argument("-p", "--processes",
help=("set the number of processes the script will use."
"\nShould be less than or equal to number of CPU cores."
"\nIf no N is specified, all available cores are used"),
type=int, nargs="?",
metavar='N', const=0)
parser.add_argument("--enable-pausing",
help="enable pausing by running a process that checks for input",
action="store_true",
dest="enable_pausing")
parser.add_argument("--multi",
help=("the name of the level at which multiprocessing will be run."
"\nThis should only be used if _only_ mass and elements vary"),
nargs='+', type=str, default=[])
parser.add_argument("--default-excepthook",
help="use the default excepthook",
action="store_true",
dest="default_excepthook")
parser.add_argument("--disable-filters",
help="do not filter log messages",
action="store_true",
dest="disable_filters")
parser.add_argument("-r", "--resume",
help=("resume from previous checkpoint. If there are"
"\nmore than one TALYS-directory, it will choose"
"\nthe last directory"),
action="store_true")
parser.add_argument("--dummy",
help="for not run TALYS, only create the directories",
action="store_true")
args = parser.parse_args()
# Convert the input strings to the corresponding logging type
args.log = getattr(logging, args.log)
args.verbosity = getattr(logging, args.verbosity)
# --debug overrules --log and --verbosity
if args.debug:
args.log = logging.DEBUG
args.verbosity = logging.DEBUG
return args
class Cd:
""" Simplifies directory mangement """
def __init__(self, newPath):
""" When an object of cd is created, the given path is expanded all the way back to $HOME"""
self.newPath = os.path.expanduser(newPath)
""" In order for an cd object to be used with the with-statement, __enter__ and __exit__ are needed """
def __enter__(self):
""" Changes directory to the one given in __init__ while saving the current when entering
the with-statement """
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
""" Returns to the original path when exiting the with-statement """
os.chdir(self.savedPath)
def getkey():
# Magic
import termios
TERMIOS = termios
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
new[6][TERMIOS.VMIN] = 1
new[6][TERMIOS.VTIME] = 0
termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
c = None
try:
c = os.read(fd, 1)
finally:
termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
return c
| python |
"""
Use to populate:
from crs.populate_crs_table import CrsFromApi
crs_api = CrsFromApi()
crs_api.populate()
"""
import re
import math
from bills.models import Bill
from crs.scrapers.everycrsreport_com import EveryCrsReport
# Bill's types {'sres', 'hjres', 'hconres', 's', 'hres', 'sjres', 'hr', 'sconres'}
BILL_NUMBER_RE = re.compile(r"\W((?:h\.\s?r\.|s\.|h\.conres\.|s\.conres\.|h\.\s?j\.\s?res\.|s\.\s?j\.\s?res\.|"
+ r"h\.\s?res\.|s\.\s?res\.)\s?(?:[1-9]\d{0,3}))", re.I | re.M)
def cleanBillNumber(billnumber):
return billnumber.replace('.', '').replace(' ', '').lower()
def get_congress_number_for_year(year: str) -> int:
return math.ceil((int(year) - 1788) / 2)
class CrsFromApi:
matched_count = 0
extracted_count = 0
def process_bills_for_report(self, bill_numbers, report, source='title'):
congress_number = get_congress_number_for_year(report.date[:4])
# construct IDs and remove duplicates
bill_ids = set()
for bill_number in bill_numbers:
bill_id = f'{congress_number}{bill_number}'.replace(' ', '')\
.replace('\n', '').lower()
bill_ids.add(bill_id)
# Add prior year if report was in January or February
if int(report.date[5:7]) < 3:
bill_id = f'{congress_number-1}{bill_number}'.replace(' ', '')\
.replace('\n', '').lower()
bill_ids.add(bill_id)
self.extracted_count += len(bill_ids)
for bill_id in bill_ids:
try:
bill = Bill.objects.get(bill_congress_type_number=bill_id)
print(f'{bill_id} was matched, use existing bill.')
self.matched_count += 1
except Bill.DoesNotExist:
print(f'{bill_id} does not have a match in Bills.')
# Do no create bill if it is not found in db
continue
bill.save()
report.bills.add(bill)
def populate(self):
reports_count = 0
api = EveryCrsReport()
for report in api.scrape():
reports_count += 1
print(report)
# ignore years before 2010
try:
reportyear = int(report.date[:4])
except ValueError:
continue
if reportyear < 2010:
continue
report.save()
bill_numbers = map(cleanBillNumber, BILL_NUMBER_RE.findall(report.title))
if bill_numbers:
self.process_bills_for_report(bill_numbers, report, source='title')
if report.report_content_raw:
bill_numbers = map(cleanBillNumber, BILL_NUMBER_RE.findall(report.report_content_raw))
if bill_numbers:
self.process_bills_for_report(bill_numbers, report, source='text')
report.save() # call save after all bills will be added
print(f'{reports_count} reports processed')
print(f'{self.extracted_count} bill numbers extracted')
print(f'{self.matched_count} bills matched') | python |
import os
import numpy as np
import pandas as pd
from trackml.dataset import load_event
from trackml.score import score_event
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import DBSCAN
class Clusterer(object):
def __init__(self, eps):
self.eps = eps
def _preprocess(self, hits):
x = hits.x.values
y = hits.y.values
z = hits.z.values
r = np.sqrt(x ** 2 + y ** 2 + z ** 2)
hits['x2'] = x / r
hits['y2'] = y / r
r = np.sqrt(x ** 2 + y ** 2)
hits['z2'] = z / r
ss = StandardScaler()
X = ss.fit_transform(hits[['x2', 'y2', 'z2']].values)
return X
def predict(self, hits):
X = self._preprocess(hits)
cl = DBSCAN(eps=self.eps, min_samples=3, algorithm='kd_tree')
labels = cl.fit_predict(X)
return labels
def create_one_event_submission(event_id, hits, labels):
sub_data = np.column_stack(([event_id]*len(hits), hits.hit_id.values, labels))
submission = pd.DataFrame(data=sub_data, columns=["event_id", "hit_id", "track_id"]).astype(int)
return submission
if __name__ == "__main__":
# training and test data folder paths
path_to_train = "../../data/raw/train_sample/train_100_events"
# chose a single event to work with
event_prefix = "event000001000"
# read data
hits, cells, particles, truth = load_event(os.path.join(path_to_train, event_prefix))
# perform clustering
model = Clusterer(eps=0.006)
labels = model.predict(hits)
print(labels)
submission = create_one_event_submission(0, hits, labels)
score = score_event(truth, submission)
print("Your score: ", score)
| python |
"""
This module encapsulates QCoDeS database: its schema, structure, convenient
and relevant queries, wrapping around :mod:`sqlite3`, etc.
The dependency structure of the sub-modules is the following:
::
.connection .settings
/ | \ |
/ | \ |
/ | V V
| | .query_helpers
| | | |
| V V |
| .db_upgrades |
| / V
| / .queries
v v
.database
"""
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: constants.py
Author: dhilipsiva <[email protected]>
Date created: 2016-11-20
"""
class QuestionType(object):
UNKNOWN = -1
MULTIPLE_CHOICE = 0
CHOICE = 1
BOOLEAN = 2
TEXT = 3
| python |
"""
author:xing xiangrui
test os.system()
"""
import os
os.chdir("mAP/")
#os.system("cd mAP/")
os.system("python main.py -na") | python |
'''
Author : ZHP
Date : 2022-04-12 16:00:40
LastEditors : ZHP
LastEditTime : 2022-04-12 17:01:01
FilePath : /models/PointFormer/similarity.py
Description :
Copyright 2022 ZHP, All Rights Reserved.
2022-04-12 16:00:40
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append("../..")
from models.pointnet.pointNet2_Ops import *
from models.PointFormer.basic_block import K_MLP_Layer
class Affinity(nn.Module):
def __init__(self) -> None:
super().__init__()
pass
def forward(self, src, dst):
pass
def extra_repr(self) -> str:
print_paras = ["sigma", "k", "mu", "epsilon"]
s = ""
for para in print_paras:
if para in self.__dict__:
s += f'{para}={self.__dict__[para]},'
s = s[:-1]
return s.format(**self.__dict__)
class pointnet2(Affinity):
def __init__(self, k=3) -> None:
super().__init__()
self.k = k
def forward(self, src, dst):
'''
Author: ZHP
description: pointnet++ 中插值函数
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
return {tensor} score: 相似度矩阵[B, N, S]
'''
B, N, _ = src.shape
# KNN 插值
dists = square_distance(src, dst) # [B, N, S],距离的平方
dists, idx = dists.sort(dim=-1) # [B, N, S]
dists, idx = dists[:, :, :self.k], idx[:, :, :self.k] # [B, N, k]
# 以src的点云(N个)为中心,计算当前点云距离其最近的K(3)个点,记录距离和索引
dist_recip = 1.0 / (dists + 1e-8) # 反向加权 w_i 防止为0 [B, N, k]
norm = torch.sum(dist_recip, dim=2, keepdim=True) # 分母,[B, N, 1]
weight = dist_recip / norm # weight_i = w_i / sum(w_i)
score = torch.zeros(B, N, dst.shape[1]).to(src.device) # [B, N, S]
score = score.scatter_(-1, idx, weight) # [B, N, S]
# 在当前点云中取出[B, N, k]个最近点数据[B, N, k, C],score除了该k个点外,其他位置为0
return score
class euclidean(Affinity):
def __init__(self, mu=2, epsilon=1e-8) -> None:
super().__init__()
self.mu = mu
self.epsilon = epsilon
def forward(self, src, dst):
'''
Author: ZHP
description: 基于欧氏距离反比的权重 1 / (||xi - yj||2)^mu + epsilon
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
return {tensor} score 相似度矩阵 [B, N, S]
'''
dists = square_distance(src, dst) # [B, N, S]
dists = torch.pow(dists, exponent=self.mu)
score = 1 / (dists + self.epsilon) # [B, N, S]
score = F.softmax(score, dim=-1)
return score
class cosine_similarity(Affinity):
def __init__(self, epsilon=1e-8) -> None:
super().__init__()
self.epsilon = epsilon
def forward(self, src, dst):
'''
Author: ZHP
description: 计算点之间余弦相似度 notion:F.cosine_similarity是向量对应相似度
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
param {int} epsilon: 防止分母为0的极小值
return {tensor} score 相似度矩阵 [B, N, S]
'''
B, N, _ = src.shape
_, S, _ = dst.shape
cdot = torch.matmul(src, dst.transpose(1,-1)) # [B, N, S]
norm_src = torch.norm(src, dim=-1, keepdim=True) # [B, N, 1] ||src||2
norm_dst = torch.norm(dst, dim=-1, keepdim=True) # [B, S, 1] ||dst||2
norm_ = torch.matmul(norm_src, norm_dst.transpose(1,-1)) # [B, N, S]
norm_ = torch.max(norm_, torch.ones_like(norm_) * self.epsilon)
score = cdot / norm_ # [B, N, S]
score = F.softmax(score, dim=-1)
return score
class gaussian_kernel(Affinity):
def __init__(self, sigma=1) -> None:
super().__init__()
self.sigma = sigma
def forward(self, src, dst):
'''
Author: ZHP
description: 高斯核函数 k(x1,x2) = exp(- ||x1 - x2||^2 / (2*sigma^2))
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
return {tensor} score 相似度矩阵 [B, N, S]
'''
gap = src[:,:,None] - dst[:,None] # [B, N, S, 3]
gap = torch.norm(gap, dim=-1) # [B, N, S]
gap = - (gap / (self.sigma ** 2)) * 0.5
score = torch.exp(gap) # [B, N, S]
score = F.softmax(score, dim=-1)
return score
class chebyshev_distance(Affinity):
def __init__(self, epsilon=1e-8) -> None:
super().__init__()
self.epsilon = epsilon
def forward(self, src, dst):
'''
Author: ZHP
description: 切比雪夫距离 max|xi-yi|
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
param {int} epsilon: 防止分母为0的极小值
return {tensor} score 相似度矩阵 [B, N, S]
'''
dist = src[:,:,None] - dst[:,None] # [B, N, S, 3]
dist = torch.max(dist, dim=-1)[0] # [B, N, S]
dist = 1.0 / (dist + self.epsilon)
score = F.softmax(dist, dim=-1) # [B, N, S]
return score
class minkowski_distance(Affinity):
def __init__(self, p=1, epsilon=1e-8) -> None:
super().__init__()
self.p = p
self.epsilon = epsilon
def forward(self, src, dst):
'''
Author: ZHP
description: 闵氏距离 [sum(|xi-yi|^p)]^(1/p)
param {tensor} src:大基数点云 [B, N, 3]
param {tensor} dst: 小基数点云 [B, S, 3]
param {int} p: p=1表示曼哈顿距离,p=2表示欧氏距离,p=无穷大表示切比雪夫距离
param {int} epsilon: 防止分母为0的极小值
return {tensor} score 相似度矩阵 [B, N, S]
'''
#
dist = src[:,:,None] - dst[:,None] # [B, N, S, 3]
dist = torch.pow(dist, self.p)
dist = torch.sum(dist, dim=-1)
dist = torch.pow(dist, 1/self.p)
dist = 1 / (dist + self.epsilon)
score = F.softmax(dist, dim=-1)
return score
class PointUpsampleAttn(nn.Module):
def __init__(self, dim_in, relation=pointnet2(), dim_out=None, dropout=0.):
super().__init__()
if dim_out is None:
self.embed = lambda x : x
else:
self.embed = K_MLP_Layer(3, dim_in, dim_out, True, True, dropout)
self.relation = relation # 计算相似度方法
def forward(self, q, k, v):
'''
Author: ZHP
description: relation(qi,kj)*vj 1 / ||qi-kj||
param {tensor} q : 原始点云坐标 [B, N, 3]
param {tensor} k : 采样后的点云坐标 [B, S, 3]
param {tensor} v : 采样后的点云特征 [B, S, C]
return {tensor} extract: 上采样后的点云特征 [B, D, N]
'''
score = self.relation(q, k) # [B, N, S]
extract = torch.matmul(score, v) # [B, N, C]
extract = extract.transpose(1,-1)
extract = self.embed(extract) # [B, D, N]
return extract
if __name__ == "__main__":
p2 = euclidean()
# src = torch.randn(1, 10, 3, dtype=torch.float)
# dst = torch.randn(1, 10, 3, dtype=torch.float)
# a = p2(src, dst)
# print(a.shape)
print(p2) | python |
import re
import pytest
from perl.translator import translate_string
from perl.utils import re_match, reset_vars
@pytest.fixture
def _globals():
return {"re": re, "__perl__re_match": re_match, "__perl__reset_vars": reset_vars}
def test_match__value_present__returns_true(_globals):
ldict = {"var": "one foo two"}
src = translate_string("var =~ /foo/")
result = eval(src, _globals, ldict)
assert isinstance(result, re.Match)
def test_match__value_not_present__returns_false(_globals):
ldict = {"var": "one two"}
src = translate_string("var =~ /foo/")
result = eval(src, _globals, ldict)
assert result is None
def test_match__value_match__value_set(_globals):
ldict = {"var": "one foo two"}
src = translate_string("var =~ /(foo)/")
result = eval(src, _globals, ldict)
assert isinstance(result, re.Match)
assert "__perl__var__1" in _globals["__builtins__"]
assert _globals["__builtins__"]["__perl__var__1"] == "foo"
| python |
# Authors: Stephane Gaiffas <[email protected]>
# License: BSD 3 clause
"""This modules introduces the Dataset class allowing to store a binned features matrix.
It uses internally a bitarray to save the values of the features in a memory efficient
fashion. It exploits the fact that any columns j of the features matrix X contain
only contiguous non-negative integers {0, 1, 2, ..., max_value_j} obtained through
binning of both categorical and continuous columns.
If a column contains M modalities, it will look for the minimum number of bits required
to save such values, and will stack them into 64 bits words of a contiguous memory
region of a bitarray (a 1D numpy array, using a F-major ordering of the matrix X).
For familiarity with bitwise operations:
https://en.wikipedia.org/wiki/Bitwise_operation
"""
from math import ceil, floor
import numpy as np
from numba import jit, void, uint8, int8, uint16, int16, uint32, int32, uint64, int64
from numba.experimental import jitclass
from .._utils import get_type
# Global jit decorator options
NOPYTHON = True
NOGIL = True
BOUNDSCHECK = False
CACHE = True
_UINT8_MAX = np.iinfo(np.uint8).max
_UINT16_MAX = np.iinfo(np.uint16).max
_UINT32_MAX = np.iinfo(np.uint32).max
_UINT64_MAX = np.iinfo(np.uint64).max
spec_dataset = [
# Number of samples in the dataset
("n_samples", uint64),
# Number of features in the dataset
("n_features", uint64),
# maximum value in each column
("max_values", uint64[::1]),
# Number of bits used for each values of each columns
("n_bits", uint64[::1]),
# bitarray[offsets[j]:offsets[j+1]] is the array of words for the j-th column
("offsets", uint64[::1]),
# n_values_in_words[j] is the number of values saved in a word for column j
("n_values_in_words", uint64[::1]),
# The bitarray containing all values
("bitarray", uint64[::1]),
# The bitmasks used for each column
("bitmasks", uint64[::1]),
]
@jitclass(spec_dataset)
class Dataset(object):
"""This is a class containing the binned features matrix. It uses internally a
bitarray to save the values of the features in a memory efficient fashion. It
exploits the fact that all the columns of the features matrix X contain only
contiguous non-negative integers {0, 1, 2, ..., max_value} obtained through
binning of both categorical and continuous columns.
If a column contains M modalities, it will look for the minimum number of bits
required to save such values, and will stack them into 64 bits words in a
contiguous memory region of the bitarray (a 1D numpy array, using a F-major
ordering of the matrix X).
For familiarity with bitwise operations:
https://en.wikipedia.org/wiki/Bitwise_operation
Parameters
----------
n_samples : int
Number samples (rows) in the dataset
max_values : ndarray
Number array of shape (n_features,) containing the maximum value (number of
bins + 1) in each column.
Attributes
----------
n_samples : int
Number samples (rows) in the dataset
n_features : int
Number of features (columns) in the dataset
max_values : ndarray
Numpy array of shape (n_features,) containing the maximum value (number of
bins + 1) in each column.
n_bits : ndarray
Numpy array of shape (n_features,) such that n_bits[j] is the number of bits
used for the values of the j-th column
offsets : ndarray
Numpy array of shape (n_features + 1,) such that
bitarray[offsets[j]:offsets[j+1]] is the array of words for the j-th column
n_values_in_words : ndarray
Numpy array of shape (n_features,) such that n_values_in_words[j] is the number
of values saved in a single 64-bits word for the values in column j
bitmasks : ndarray
Numpy array of shape (n_features,) such that bitmasks[j] contains the
bitmask using the shift and back-shift operations to retrieve values from the
bitarray
bitarray : ndarray
Numpy array of shape (n_total_words,) containing the values of the dataset,
where n_total_words is the total number of words used (for all columns) to
store the values.
"""
def __init__(self, n_samples, max_values):
self.n_samples = n_samples
self.n_features = max_values.size
self.max_values = max_values
self.n_bits = np.empty(self.n_features, dtype=np.uint64)
self.offsets = np.empty(self.n_features + 1, dtype=np.uint64)
self.n_values_in_words = np.empty(self.n_features, dtype=np.uint64)
self.bitmasks = np.empty(self.n_features, dtype=np.uint64)
# The first offset is 0
offset = 0
self.offsets[0] = offset
for j, max_value in enumerate(max_values):
# Number of bits required to save numbers up to n_modalities
if max_value == 1:
self.n_bits[j] = 1
self.n_values_in_words[j] = 64
self.bitmasks[j] = 1
else:
self.n_bits[j] = ceil(np.log2(max_value + 1))
self.n_values_in_words[j] = floor(64 / self.n_bits[j])
self.bitmasks[j] = (1 << self.n_bits[j]) - 1
n_words = ceil(n_samples / self.n_values_in_words[j])
offset += n_words
self.offsets[j + 1] = offset
self.bitarray = np.empty(offset, dtype=np.uint64)
DatasetType = get_type(Dataset)
numba_int_types = [uint8, int8, uint16, int16, uint32, int32, uint64, int64]
# TODO: put back signatures everywhere
@jit(
# [void(uint64[::1], uint64, uint64, col_type[:]) for col_type in numba_int_types],
nopython=NOPYTHON,
nogil=NOGIL,
boundscheck=BOUNDSCHECK,
cache=CACHE,
locals={"i": uint64, "x_ij": uint64, "word": uint64, "pos_in_word": uint64},
)
def _dataset_fill_column(col_bitarray, n_bits, n_values_in_word, col):
"""Private function that fills the values of a column in the dataset.
Parameters
----------
col_bitarray : ndarray
Numpy array of shape (n_words,) containing the values of the column, where
n_words is the number of words used to store its values.
n_bits : int
Number of bits used to store one value from the column
n_values_in_word : int
Number of values from the column saved in a single 64-bits word
col : ndarray
Numpy array of shape (n_samples,) corresponding to the values of a column to
add to the dataset. This function exploits the fact that the values in col
contain only contiguous non-negative integers {0, 1, 2, ..., max_value}
coming from binning of both categorical and continuous columns.
"""
for i, x_ij in enumerate(col):
word = i // n_values_in_word
pos_in_word = i % n_values_in_word
if pos_in_word == 0:
col_bitarray[word] = x_ij
else:
col_bitarray[word] = (col_bitarray[word] << n_bits) | x_ij
# We need to shift the last word according to the position of the last value in
# the word, so that the bits of the values in the last word are on the left
# of it. If pos_in_word = n_values_in_word - 1 it does nothing, since the
# word is full and already left-aligned
col_bitarray[word] = col_bitarray[word] << (
(n_values_in_word - pos_in_word - 1) * n_bits
)
@jit(
# [void(DatasetType, col_type[:, :]) for col_type in numba_int_types],
nopython=NOPYTHON,
nogil=NOGIL,
boundscheck=BOUNDSCHECK,
cache=CACHE,
locals={
"bitarray": uint64[::1],
"offsets": uint64[::1],
"n_values_in_words": uint64[::1],
"n_bits": uint64[::1],
"n_features": uint64,
"j": uint64,
"n_values_in_word": uint64,
"bitarray_j": uint64[::1],
"n_bits_j": uint64,
"i": uint64,
"x_ij": uint64,
"word": uint64,
"pos_in_word": uint64,
},
)
def _dataset_fill_values(dataset, X):
"""Private function that fills the values in X inside the dataset.
Parameters
----------
dataset : Dataset
The dataset to fill with the values in X
X : ndarray
Numpy array of shape (n_samples, n_features) corresponding to the matrix of
features to be transformed in a Dataset. This function exploits the fact
that all the columns of X contain only contiguous non-negative integers {0,
1, 2, ..., max_value} obtained through binning of both categorical and
continuous columns.
"""
bitarray = dataset.bitarray
offsets = dataset.offsets
n_values_in_words = dataset.n_values_in_words
n_bits = dataset.n_bits
n_features = dataset.n_features
for j in range(n_features):
col_bitarray = bitarray[offsets[j] : offsets[j + 1]]
_dataset_fill_column(col_bitarray, n_bits[j], n_values_in_words[j], X[:, j])
def dataset_fill_column(dataset, col_idx, col):
"""Fills the values of a column in the dataset.
Parameters
----------
dataset : Dataset
The dataset to fill with the values in X
col_idx : int
Index of the column in the dataset
col : ndarray
Numpy array of shape (n_samples,) corresponding to the values of a column to
add to the dataset. This function exploits the fact that the values in col
contain only contiguous non-negative integers {0, 1, 2, ..., max_value}
coming from binning of both categorical and continuous columns.
"""
bitarray = dataset.bitarray
offsets = dataset.offsets
col_bitarray = bitarray[offsets[col_idx] : offsets[col_idx + 1]]
n_values_in_word = dataset.n_values_in_words[col_idx]
n_bits = dataset.n_bits[col_idx]
_dataset_fill_column(col_bitarray, n_bits, n_values_in_word, col)
def array_to_dataset(X):
"""Converts a numpy array to a Dataset.
Parameters
----------
X : ndarray
Numpy array of shape (n_samples, n_features) corresponding to the matrix of
features to be transformed to a Dataset. This function exploits the fact
that all the columns of X contain only contiguous non-negative integers {0,
1, 2, ..., max_value} obtained through binning of both categorical and
continuous columns.
Returns
-------
output : Dataset
The dataset corresponding to the values in X.
"""
n_samples, n_features = X.shape
max_values = np.empty(n_features, dtype=np.uint64)
X.max(axis=0, initial=0, out=max_values)
if hasattr(X, "ndim") and hasattr(X, "dtype") and hasattr(X, "shape"):
if X.ndim == 2:
if X.dtype not in (np.uint8, np.uint16, np.uint32, np.uint64):
raise ValueError(
"X dtype must be one of uint8, uint16, uint32 or " "uint64"
)
else:
raise ValueError("X is must be a 2D numpy array")
else:
raise ValueError("X is not a numpy array")
if X.shape[1] != max_values.size:
raise ValueError("max_values size must match X.shape[1]")
dataset = Dataset(n_samples, max_values)
_dataset_fill_values(dataset, X)
return dataset
def _get_empty_matrix(n_samples, n_features, max_value):
"""A private function that creates an empty F-ordered ndarray with shape
(n_samples, n_features) and dtype in (uint8, uint16, uint32, uint64) depending on
the exected maximum value to store in it.
Parameters
----------
n_samples : int
Number of samples (number of rows of the matrix)
n_features : int
Number of features (number of columns of the matrix)
max_value : int
Maximum value expected in the matrix (to choose the dtype)
Returns
-------
output : ndarray
An ndarray with shape (n_samples, n_features) and minimal dtype to store values
"""
# Let's find out the correct dtype depending on the max_value
if max_value <= _UINT8_MAX:
X = np.empty((n_samples, n_features), dtype=np.uint8, order="F")
elif _UINT8_MAX < max_value <= _UINT16_MAX:
X = np.empty((n_samples, n_features), dtype=np.uint16, order="F")
elif _UINT16_MAX < max_value <= _UINT32_MAX:
X = np.empty((n_samples, n_features), dtype=np.uint32, order="F")
elif _UINT32_MAX < max_value <= _UINT64_MAX:
X = np.empty((n_samples, n_features), dtype=np.uint64, order="F")
else:
raise ValueError("X cannot be created")
return X
@jit(
[
void(DatasetType, uint8[:, :]),
void(DatasetType, uint16[:, :]),
void(DatasetType, uint32[:, :]),
void(DatasetType, uint64[:, :]),
void(DatasetType, uint8[::1, :]),
void(DatasetType, uint16[::1, :]),
void(DatasetType, uint32[::1, :]),
void(DatasetType, uint64[::1, :]),
],
nopython=NOPYTHON,
nogil=NOGIL,
boundscheck=BOUNDSCHECK,
locals={
"n_samples": uint64,
"n_features": uint64,
"n_values_in_words": uint64[::1],
"offsets": uint64[::1],
"bitarray": uint64[::1],
"n_bits": uint64[::1],
"bitmasks": uint64[::1],
"j": uint64,
"n_values_in_word": uint64,
"bitarray_j": uint64[::1],
"n_bits_j": uint64,
"bitmask": uint64,
"i": uint64,
"word": uint64,
"pos_in_word": uint64,
"b": uint64,
"n_shifts": uint64,
},
)
def _dataset_to_array(dataset, X):
n_samples = dataset.n_samples
n_features = dataset.n_features
n_values_in_words = dataset.n_values_in_words
offsets = dataset.offsets
bitarray = dataset.bitarray
n_bits = dataset.n_bits
bitmasks = dataset.bitmasks
for j in range(n_features):
n_values_in_word = n_values_in_words[j]
bitarray_j = bitarray[offsets[j] : offsets[j + 1]]
n_bits_j = n_bits[j]
bitmask = bitmasks[j]
for i in range(n_samples):
word = i // n_values_in_word
pos_in_word = i % n_values_in_word
b = bitarray_j[word]
n_shifts = (n_values_in_word - pos_in_word - 1) * n_bits_j
X[i, j] = (b & (bitmask << n_shifts)) >> n_shifts
def dataset_to_array(dataset):
X = _get_empty_matrix(
dataset.n_samples, dataset.n_features, dataset.max_values.max()
)
_dataset_to_array(dataset, X)
return X
| python |
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances
try:
from bottleneck import argpartsort
except ImportError:
try:
# Added in version 1.8, which is pretty new.
# Sadly, it's still slower than bottleneck's version.
argpartsort = np.argpartition
except AttributeError:
argpartsort = lambda arr,k: np.argsort(arr)
def min_k_indices(arr, k, inv_ind=False):
'''Returns indices of the k-smallest values in each row, unsorted.
The `inv_ind` flag returns the tuple (k-smallest,(n-k)-largest). '''
psorted = argpartsort(arr, k)
if inv_ind:
return psorted[...,:k], psorted[...,k:]
return psorted[...,:k]
def neighbor_graph(X, precomputed=False, k=None, epsilon=None, symmetrize=True, weighting='binary'):
'''Construct an adj matrix from a matrix of points (one per row).
When `precomputed` is True, X is a distance matrix.
`weighting` param can be one of {binary, none}.'''
assert ((k is not None) or (epsilon is not None)
), "Must provide `k` or `epsilon`"
assert weighting in ('binary','none'), "Invalid weighting param: "+weighting
num_pts = X.shape[0]
if precomputed:
dist = X.copy()
else:
dist = pairwise_distances(X, metric='sqeuclidean')
if k is not None:
k = min(k+1, num_pts)
nn,not_nn = min_k_indices(dist, k, inv_ind=True)
if epsilon is not None:
if k is not None:
dist[np.arange(dist.shape[0]), not_nn.T] = np.inf
in_ball = dist <= epsilon
dist[~in_ball] = 0 # zero out neighbors too far away
if symmetrize and k is not None:
# filtering may have caused asymmetry
dist = (dist + dist.T) / 2
else:
for i in xrange(num_pts):
dist[i,not_nn[i]] = 0 # zero out neighbors too far away
if symmetrize:
dist = (dist + dist.T) / 2
if weighting is 'binary':
# cycle through boolean and back to get 1/0 in floating points
return dist.astype(bool).astype(float)
return dist
| python |
#!/usr/bin/env python3
#-----------------------------------------------------------------------------
# This file is part of the rogue_example software. It is subject to
# the license terms in the LICENSE.txt file found in the top-level directory
# of this distribution and at:
# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.
# No part of the rogue_example software, including this file, may be
# copied, modified, propagated, or distributed except according to the terms
# contained in the LICENSE.txt file.
#-----------------------------------------------------------------------------
import sys
import rogue.utilities
import rogue.utilities.fileio
import rogue.interfaces.stream
import pyrogue
import time
class EventReader(rogue.interfaces.stream.Slave):
def __init__(self):
rogue.interfaces.stream.Slave.__init__(self)
self.enable = True
def _acceptFrame(self,frame):
if self.enable:
# Get the channel number
chNum = (frame.getFlags() >> 24)
# Check if channel number is 0x1 (streaming data channel)
if (chNum == 0x1) :
print('-------- Event --------')
# Collect the data
p = bytearray(frame.getPayload())
frame.read(p,0)
cnt = 0
while (cnt < len(p)):
value = 0
for x in range(0,4):
value += (p[cnt] << (x*8))
cnt += 1
print ('data[%d]: 0x%.8x' % ( (cnt/4), value ))
def main(arg):
# Create the objects
fileReader = rogue.utilities.fileio.StreamReader()
eventReader = EventReader()
# Connect the fileReader to our event processor
pyrogue.streamConnect(fileReader,eventReader)
# Open the data file
fileReader.open(arg)
time.sleep(1)
if __name__ == '__main__':
main(sys.argv[1])
| python |
# Generated by Django 2.2 on 2019-05-18 19:06
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="CIPRSRecord",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("label", models.CharField(max_length=2048)),
("date_uploaded", models.DateTimeField(auto_now_add=True)),
("report_pdf", models.FileField(upload_to="ciprs/")),
("data", django.contrib.postgres.fields.jsonb.JSONField(blank=True)),
],
)
]
| python |
retrieve = [
{"scenario":"Patient Exists","patient":"9000000009", "response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Patient Does Not Exist","patient":"9111231130", "response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}}, # noqa: E231, E501
{"scenario":"Sensetive Patient Exists","patient":"9000000025", "response":{"birthDate":"2010-10-22","deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","id":"9000000025","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000025"}],"meta":{"security":[{"code":"R","display":"restricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smythe","given":["Janet"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient"}}, # noqa: E231, E501
{"scenario": "Invalid NHS number", "patient": "9000000001", "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_RESOURCE_ID", "display": "Resource Id is invalid"}]}}]}}, # noqa: E231, E501
{"scenario": "Invalid X-Request-ID", "patient": "9000000001", "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_VALUE", "display": "Provided value is invalid"}]}, "diagnostics": "Invalid value - '1234' in header 'X-Request-ID'"}]}} # noqa: E231, E501
]
search = [
{"scenario":"Simple Search","query_params":{"family":"Smith","gender":"female","birthdate":"eq2010-10-22"},"response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":1},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Wildcard Search","query_params":{"family":"Sm*","gender":"female","birthdate":"eq2010-10-22"},"response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Limited results Search","query_params":{"family":"Sm*","gender":"female","birthdate":"eq2010-10-22","_max-results":"2"},"response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8343},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Date Range Search","response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009","search":{"score":1},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario":"Fuzzy Search","query_params":{"family":"Smith","given":"jane","gender":"female","birthdate":"2010-10-22","_fuzzy-match":True},"response":{"resourceType":"Bundle","type":"searchset","total":1,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000017","search":{"score":0.8976},"resource":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000017","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000017"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":"2"},"multipleBirthInteger":1,"name":[{"family":"Smyth","given":["Jayne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}]}}, # noqa: E231, E501
{"scenario": "Restricted Patient Search","query_params": {"family": "Smythe", "given": "janet", "gender": "female", "birthdate": "eq2005-06-16"}, "response": {"resourceType": "Bundle", "type": "searchset", "total": 1, "entry": [{"fullUrl": "https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000025", "search": {"score": 1}, "resource": {"birthDate": "2005-06-16", "deceasedDateTime": "2005-06-16T00:00:00+00:00", "extension": [{"extension": [{"url": "deathNotificationStatus", "valueCodeableConcept": {"coding": [{"code": "2", "display": "Formal - death notice received from Registrar of Deaths", "system": "https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus", "version": "1.0.0"}]}}, {"url": "systemEffectiveDate", "valueDateTime": "2005-06-16T00:00:00+00:00"}], "url": "https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"}], "gender": "female", "id": "9000000025","identifier": [{"extension": [{"url": "https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus", "valueCodeableConcept": {"coding": [{"code": "01", "display": "Number present and verified", "system": "https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus", "version": "1.0.0"}]}}], "system": "https://fhir.nhs.uk/Id/nhs-number", "value": "9000000025"}], "meta": {"security": [{"code": "R", "display": "restricted", "system": "https://www.hl7.org/fhir/valueset-security-labels.html"}], "versionId": "2"}, "multipleBirthInteger": 1, "name": [{"family": "Smythe", "given": ["Janet"], "id": "123", "period": {"end": "2021-12-31", "start": "2020-01-01"}, "prefix": ["Mrs"], "suffix": ["MBE"], "use": "usual"}], "resourceType": "Patient"}}]}}, # noqa: E231, E501
{"scenario":"Unsuccessful Search","query_params":{"family":"Bingham","given":"john","gender":"male","birthdate":"1934-12-18"},"response":{"resourceType":"Bundle","type":"searchset","total":0}}, # noqa: E231, E501
{"scenario": "Invalid Date Format Search","query_params": {"family": "Smith", "given": "jane", "gender": "female", "birthdate": "20101022"}, "response": {"resourceType": "OperationOutcome", "issue": [{"severity": "error", "code": "value", "details": {"coding": [{"system": "https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode", "version": "1", "code": "INVALID_SEARCH_DATA", "display": "Search data is invalid"}]}, "diagnostics": "Invalid value - '20101022' in field 'birthdate'"}]}}, # noqa: E231, E501
{"scenario":"Too Few Search Parameters","response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"required","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"MISSING_VALUE","display":"Required value is missing"}]},"diagnostics":"Not enough search parameters were provided to be able to make a search"}]}}, # noqa: E231, E501
]
update = [
{"scenario":"Add New Name", "patient":"9000000009","patient_record":2,"patch":{"patches": [{"op": "add", "path": "/name/-", "value": {"use": "usual", "period": {"start": "2019-12-31"}, "prefix": "Dr", "given": ["Joe", "Horation", "Maximus"], "family": "Bloggs", "suffix": "PhD"}}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"},{"use":"usual","period":{"start":"2019-12-31"},"prefix":"Dr","given":["Joe","Horation","Maximus"],"family":"Bloggs","suffix":"PhD"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Replace Given Name", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"replace","path":"/name/0/given/0","value":"Anne"}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Anne"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"Remove Suffix from Name", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"},{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"id":"T456","line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","text":"Student Accommodation","use":"temp"}],"birthDate":"2010-10-22","contact":[{"id":"C123","period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"C","display":"Emergency Contact","system":"http://terminology.hl7.org/CodeSystem/v2-0131"}]}],"telecom":[{"system":"phone","value":"01632960587"}]}],"deceasedDateTime":"2010-10-22T00:00:00+00:00","extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NominatedPharmacy","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-PreferredDispenserOrganization","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y23456"}}},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-MedicalApplianceSupplier","valueReference":{"identifier":{"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y34567"}}},{"extension":[{"url":"deathNotificationStatus","valueCodeableConcept":{"coding":[{"code":"2","display":"Formal - death notice received from Registrar of Deaths","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-DeathNotificationStatus","version":"1.0.0"}]}},{"url":"systemEffectiveDate","valueDateTime":"2010-10-22T00:00:00+00:00"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-DeathNotificationStatus"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"url":"http://hl7.org/fhir/StructureDefinition/patient-birthPlace","valueAddress":{"city":"Manchester","country":"GBR","district":"Greater Manchester"}}],"gender":"female","generalPractitioner":[{"id":"254406A3","identifier":{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"https://fhir.nhs.uk/Id/ods-organization-code","value":"Y12345"},"type":"Organization"}],"id":"9000000009","identifier":[{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSNumberVerificationStatus","valueCodeableConcept":{"coding":[{"code":"01","display":"Number present and verified","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-NHSNumberVerificationStatus","version":"1.0.0"}]}}],"system":"https://fhir.nhs.uk/Id/nhs-number","value":"9000000009"}],"meta":{"security":[{"code":"U","display":"unrestricted","system":"https://www.hl7.org/fhir/valueset-security-labels.html"}],"versionId":3},"multipleBirthInteger":1,"name":[{"family":"Smith","given":["Jane"],"id":"123","period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":[],"use":"usual"}],"resourceType":"Patient","telecom":[{"id":"789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"},{"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-OtherContactSystem","valueCoding":{"code":"textphone","display":"Minicom (Textphone)","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-OtherContactSystem"}}],"id":"OC789","period":{"end":"2021-12-31","start":"2020-01-01"},"system":"other","use":"home","value":"01632960587"}]}}, # noqa: E231, E501
{"scenario":"No Patch Sent", "patient":"9000000009","patient_record":2,"patch":{},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_UPDATE","display":"Update is invalid"}]},"diagnostics":"Invalid update with error - No patches found"}]}}, # noqa: E231, E501
{"scenario":"Incorrect resource version", "patient":"9000000009","patient_record":3,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"PRECONDITION_FAILED","display":"Required condition was not fulfilled"}]},"diagnostics":"Invalid update with error - This resource has changed since you last read. Please re-read and try again with the new version number."}]}}, # noqa: E231, E501
{"scenario":"Invalid Request ID", "patient":"9000000009","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"value","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_VALUE","display":"Provided value is invalid"}]},"diagnostics":"Invalid value - '12345' in header 'X-Request-ID'"}]}}, # noqa: E231, E501
{"scenario":"Missing If Match Header", "patient":"9000000009","patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"PRECONDITION_FAILED","display":"Required condition was not fulfilled"}]},"diagnostics":"Invalid update with error - If-Match header must be supplied to update this resource"}]}}, # noqa: E231, E501
{"scenario":"Incorrect Content Type", "patient":"9000000009","patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"processing","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"UNSUPPORTED_SERVICE","display":"Unsupported Service"}]}}]}}, # noqa: E231, E501
{"scenario":"Invalid patch", "patient":"9000000009","patient_record":2, "patch":{"patches":[{"op":"bad_value","path":"not a path"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"structure","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_UPDATE","display":"Update is invalid"}]},"diagnostics":"Invalid patch: Operation `op` property is not one of operations defined in RFC-6902"}]}}, # noqa: E231, E501
{"scenario":"Invalid NHS Number", "patient":"9000000000","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"value","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"INVALID_RESOURCE_ID","display":"Resource Id is invalid"}]}}]}}, # noqa: E231, E501
{"scenario":"Patient does not Exist", "patient":"9111231130","patient_record":2,"patch":{"patches":[{"op":"test","path":"/name/0/id","value":"123"},{"op":"remove","path":"/name/0/suffix/0"}]},"response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}} # noqa: E231, E501
]
relatedPerson = [
{"scenario":"Related Person Exists","patient":"9000000009", "response":{"resourceType":"Bundle","type":"searchset","total":2,"entry":[{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009/RelatedPerson/507B7621","resource":{"active":True,"address":[{"extension":[{"extension":[{"url":"type","valueCoding":{"code":"PAF","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AddressKeyType"}},{"url":"value","valueString":"12345678"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-AddressKey"}],"line":["1 Trevelyan Square","Boar Lane","City Centre","Leeds","West Yorkshire"],"period":{"end":"2021-12-31","start":"2020-01-01"},"postalCode":"LS1 6AE","use":"home"}],"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-CopyCorrespondenceIndicator","valueBoolean":True},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactRank","valuePositiveInt":1},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"}],"id":"507B7621","name":[{"family":"Smith","given":["Jane"],"period":{"end":"2021-12-31","start":"2020-01-01"},"prefix":["Mrs"],"suffix":["MBE"],"use":"usual"}],"patient":{"identifier":{"system":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient","value":"90000000009"},"reference":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/90000000009","type":"Patient"},"period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"Guardian","display":"Guardian of patient","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AdditionalRelatedPersonRole"}]}],"resourceType":"RelatedPerson","telecom":[{"period":{"end":"2021-12-31","start":"2020-01-01"},"system":"phone","use":"home","value":"01632960587"}]}},{"fullUrl":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/9000000009/RelatedPerson/B3380E98","resource":{"active":True,"extension":[{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-CopyCorrespondenceIndicator","valueBoolean":True},{"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactRank","valuePositiveInt":1},{"extension":[{"url":"PreferredWrittenCommunicationFormat","valueCodeableConcept":{"coding":[{"code":"12","display":"Braille","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredWrittenCommunicationFormat"}]}},{"url":"PreferredContactMethod","valueCodeableConcept":{"coding":[{"code":"1","display":"Letter","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-PreferredContactMethod"}]}},{"url":"PreferredContactTimes","valueString":"Not after 7pm"}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-ContactPreference"},{"extension":[{"url":"language","valueCodeableConcept":{"coding":[{"code":"fr","display":"French","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-HumanLanguage","version":"1.0.0"}]}},{"url":"interpreterRequired","valueBoolean":True}],"url":"https://fhir.nhs.uk/R4/StructureDefinition/Extension-UKCore-NHSCommunication"}],"id":"B3380E98","patient":{"identifier":{"system":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient","value":"90000000009"},"reference":"https://api.service.nhs.uk/personal-demographics/FHIR/R4/Patient/90000000009","type":"Patient"},"period":{"end":"2021-12-31","start":"2020-01-01"},"relationship":[{"coding":[{"code":"Guardian","display":"Guardian of patient","system":"https://fhir.nhs.uk/R4/CodeSystem/UKCore-AdditionalRelatedPersonRole"}]}],"resourceType":"RelatedPerson"}}]}}, # noqa: E231, E501
{"scenario":"Patient Does Not Exist","patient":"9111231130","response":{"resourceType":"OperationOutcome","issue":[{"severity":"error","code":"not_found","details":{"coding":[{"system":"https://fhir.nhs.uk/R4/CodeSystem/Spine-ErrorOrWarningCode","version":"1","code":"RESOURCE_NOT_FOUND","display":"Resource not found"}]}}]}}, # noqa: E231, E501
{"scenario": "Related Person Does Not Exist", "patient": "9000000025", "response": {"resourceType":"Bundle","type":"searchset","total":0}} # noqa: E231, E501
]
| python |
from django.contrib import admin
from . models import Ads
class AdsAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Ads, AdsAdmin)
| python |
#write import statement for Die class
from src.homework.homework9.die import Die
'''
Create a Player class.
'''
class Player:
def __init__(self):
'''
Constructor method creates two Die attributes die1 and die2
'''
self.die1 = Die()
self.die2 = Die()
def roll_doubles(self):
'''
The roll_doubles method that will roll die1 and die2 (attributes from constructor method),
display rolled values,and continue iterating until a double is rolled.
'''
roll1 = 1
roll2 = 2
while roll1 != roll2:
roll1 = self.die1.roll()
roll2 = self.die2.roll()
print ('You got a ', roll1, 'and a ', roll2)
else:
print('Doubles! You got a ', roll1, 'and a ', roll2)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This is needed as multiprocessing shouldn't include nsz
# as it won't be able to optain __main__.__file__ and so crash inside Keys.py
if __name__ == '__main__':
import sys
if sys.hexversion < 0x03060000:
raise ImportError("NSZ requires at least Python 3.6!\nCurrent python version is " + sys.version)
import multiprocessing
multiprocessing.freeze_support()
import nsz
nsz.main()
| python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Cosmos module wrapping the public and private key cryptography and ledger api."""
import base64
import hashlib
import json
import logging
import os
import subprocess # nosec
import tempfile
import time
from pathlib import Path
from typing import Any, BinaryIO, Dict, Optional, Tuple
from bech32 import bech32_encode, convertbits
from ecdsa import SECP256k1, SigningKey, VerifyingKey
from ecdsa.util import sigencode_string_canonize
import requests
from aea.crypto.base import Crypto, FaucetApi, Helper, LedgerApi
from aea.helpers.base import try_decorator
from aea.mail.base import Address
logger = logging.getLogger(__name__)
_COSMOS = "cosmos"
COSMOS_TESTNET_FAUCET_URL = "https://faucet-agent-land.prod.fetch-ai.com:443/claim"
TESTNET_NAME = "testnet"
DEFAULT_ADDRESS = "https://rest-agent-land.prod.fetch-ai.com:443"
DEFAULT_CURRENCY_DENOM = "atestfet"
DEFAULT_CHAIN_ID = "agent-land"
class CosmosCrypto(Crypto[SigningKey]):
"""Class wrapping the Account Generation from Ethereum ledger."""
identifier = _COSMOS
def __init__(self, private_key_path: Optional[str] = None):
"""
Instantiate an ethereum crypto object.
:param private_key_path: the private key path of the agent
"""
super().__init__(private_key_path=private_key_path)
self._public_key = self.entity.get_verifying_key().to_string("compressed").hex()
self._address = CosmosHelper.get_address_from_public_key(self.public_key)
@property
def private_key(self) -> str:
"""
Return a private key.
:return: a private key string
"""
return self.entity.to_string().hex()
@property
def public_key(self) -> str:
"""
Return a public key in hex format.
:return: a public key string in hex format
"""
return self._public_key
@property
def address(self) -> str:
"""
Return the address for the key pair.
:return: a display_address str
"""
return self._address
@classmethod
def load_private_key_from_path(cls, file_name) -> SigningKey:
"""
Load a private key in hex format from a file.
:param file_name: the path to the hex file.
:return: the Entity.
"""
path = Path(file_name)
with open(path, "r") as key:
data = key.read()
signing_key = SigningKey.from_string(bytes.fromhex(data), curve=SECP256k1)
return signing_key
def sign_message(self, message: bytes, is_deprecated_mode: bool = False) -> str:
"""
Sign a message in bytes string form.
:param message: the message to be signed
:param is_deprecated_mode: if the deprecated signing is used
:return: signature of the message in string form
"""
signature_compact = self.entity.sign_deterministic(
message, hashfunc=hashlib.sha256, sigencode=sigencode_string_canonize,
)
signature_base64_str = base64.b64encode(signature_compact).decode("utf-8")
return signature_base64_str
@staticmethod
def format_default_transaction(
transaction: Any, signature: str, base64_pbk: str
) -> Any:
"""
Format default CosmosSDK transaction and add signature
:param transaction: the transaction to be formatted
:param signature: the transaction signature
:param base64_pbk: the base64 formatted public key
:return: formatted transaction with signature
"""
pushable_tx = {
"tx": {
"msg": transaction["msgs"],
"fee": transaction["fee"],
"memo": transaction["memo"],
"signatures": [
{
"signature": signature,
"pub_key": {
"type": "tendermint/PubKeySecp256k1",
"value": base64_pbk,
},
"account_number": transaction["account_number"],
"sequence": transaction["sequence"],
}
],
},
"mode": "async",
}
return pushable_tx
@staticmethod
def format_wasm_transaction(
transaction: Any, signature: str, base64_pbk: str
) -> Any:
"""
Format CosmWasm transaction and add signature
:param transaction: the transaction to be formatted
:param signature: the transaction signature
:param base64_pbk: the base64 formatted public key
:return: formatted transaction with signature
"""
pushable_tx = {
"type": "cosmos-sdk/StdTx",
"value": {
"msg": transaction["msgs"],
"fee": transaction["fee"],
"signatures": [
{
"pub_key": {
"type": "tendermint/PubKeySecp256k1",
"value": base64_pbk,
},
"signature": signature,
}
],
"memo": transaction["memo"],
},
}
return pushable_tx
def sign_transaction(self, transaction: Any) -> Any:
"""
Sign a transaction in bytes string form.
:param transaction: the transaction to be signed
:return: signed transaction
"""
transaction_str = json.dumps(transaction, separators=(",", ":"), sort_keys=True)
transaction_bytes = transaction_str.encode("utf-8")
signed_transaction = self.sign_message(transaction_bytes)
base64_pbk = base64.b64encode(bytes.fromhex(self.public_key)).decode("utf-8")
if (
"msgs" in transaction
and len(transaction["msgs"]) == 1
and "type" in transaction["msgs"][0]
and "wasm" in transaction["msgs"][0]["type"]
):
return self.format_wasm_transaction(
transaction, signed_transaction, base64_pbk
)
else:
return self.format_default_transaction(
transaction, signed_transaction, base64_pbk
)
@classmethod
def generate_private_key(cls) -> SigningKey:
"""Generate a key pair for cosmos network."""
signing_key = SigningKey.generate(curve=SECP256k1)
return signing_key
def dump(self, fp: BinaryIO) -> None:
"""
Serialize crypto object as binary stream to `fp` (a `.write()`-supporting file-like object).
:param fp: the output file pointer. Must be set in binary mode (mode='wb')
:return: None
"""
fp.write(self.private_key.encode("utf-8"))
class CosmosHelper(Helper):
"""Helper class usable as Mixin for CosmosApi or as standalone class."""
@staticmethod
def is_transaction_settled(tx_receipt: Any) -> bool:
"""
Check whether a transaction is settled or not.
:param tx_digest: the digest associated to the transaction.
:return: True if the transaction has been settled, False o/w.
"""
is_successful = False
if tx_receipt is not None:
# TODO: quick fix only, not sure this is reliable
is_successful = True
return is_successful
@staticmethod
def is_transaction_valid(
tx: Any, seller: Address, client: Address, tx_nonce: str, amount: int,
) -> bool:
"""
Check whether a transaction is valid or not.
:param tx: the transaction.
:param seller: the address of the seller.
:param client: the address of the client.
:param tx_nonce: the transaction nonce.
:param amount: the amount we expect to get from the transaction.
:return: True if the random_message is equals to tx['input']
"""
if tx is None:
return False # pragma: no cover
try:
_tx = tx.get("tx").get("value").get("msg")[0]
recovered_amount = int(_tx.get("value").get("amount")[0].get("amount"))
sender = _tx.get("value").get("from_address")
recipient = _tx.get("value").get("to_address")
is_valid = (
recovered_amount == amount and sender == client and recipient == seller
)
except (KeyError, IndexError): # pragma: no cover
is_valid = False
return is_valid
@staticmethod
def generate_tx_nonce(seller: Address, client: Address) -> str:
"""
Generate a unique hash to distinguish txs with the same terms.
:param seller: the address of the seller.
:param client: the address of the client.
:return: return the hash in hex.
"""
time_stamp = int(time.time())
aggregate_hash = hashlib.sha256(
b"".join([seller.encode(), client.encode(), time_stamp.to_bytes(32, "big")])
)
return aggregate_hash.hexdigest()
@staticmethod
def get_address_from_public_key(public_key: str) -> str:
"""
Get the address from the public key.
:param public_key: the public key
:return: str
"""
public_key_bytes = bytes.fromhex(public_key)
s = hashlib.new("sha256", public_key_bytes).digest()
r = hashlib.new("ripemd160", s).digest()
five_bit_r = convertbits(r, 8, 5)
assert five_bit_r is not None, "Unsuccessful bech32.convertbits call"
address = bech32_encode(_COSMOS, five_bit_r)
return address
@staticmethod
def recover_message(
message: bytes, signature: str, is_deprecated_mode: bool = False
) -> Tuple[Address, ...]:
"""
Recover the addresses from the hash.
:param message: the message we expect
:param signature: the transaction signature
:param is_deprecated_mode: if the deprecated signing was used
:return: the recovered addresses
"""
signature_b64 = base64.b64decode(signature)
verifying_keys = VerifyingKey.from_public_key_recovery(
signature_b64, message, SECP256k1, hashfunc=hashlib.sha256,
)
public_keys = [
verifying_key.to_string("compressed").hex()
for verifying_key in verifying_keys
]
addresses = [
CosmosHelper.get_address_from_public_key(public_key)
for public_key in public_keys
]
return tuple(addresses)
@staticmethod
def get_hash(message: bytes) -> str:
"""
Get the hash of a message.
:param message: the message to be hashed.
:return: the hash of the message.
"""
digest = hashlib.sha256(message).hexdigest()
return digest
class CosmosApi(LedgerApi, CosmosHelper):
"""Class to interact with the Cosmos SDK via a HTTP APIs."""
identifier = _COSMOS
def __init__(self, **kwargs):
"""
Initialize the Ethereum ledger APIs.
"""
self._api = None
self.network_address = kwargs.pop("address", DEFAULT_ADDRESS)
self.denom = kwargs.pop("denom", DEFAULT_CURRENCY_DENOM)
self.chain_id = kwargs.pop("chain_id", DEFAULT_CHAIN_ID)
@property
def api(self) -> None:
"""Get the underlying API object."""
return self._api
def get_balance(self, address: Address) -> Optional[int]:
"""Get the balance of a given account."""
balance = self._try_get_balance(address)
return balance
@try_decorator(
"Encountered exception when trying get balance: {}",
logger_method=logger.warning,
)
def _try_get_balance(self, address: Address) -> Optional[int]:
"""Try get the balance of a given account."""
balance = None # type: Optional[int]
url = self.network_address + f"/bank/balances/{address}"
response = requests.get(url=url)
if response.status_code == 200:
result = response.json()["result"]
if len(result) == 0:
balance = 0
else:
balance = int(result[0]["amount"])
return balance
def get_deploy_transaction(
self,
contract_interface: Dict[str, str],
deployer_address: Address,
tx_fee: int = 0,
gas: int = 80000,
denom: Optional[str] = None,
memo: str = "",
chain_id: Optional[str] = None,
**kwargs,
) -> Dict[str, Any]:
"""
Create a CosmWasm bytecode deployment transaction.
:param sender_address: the sender address of the message initiator.
:param filename: the path to wasm bytecode file.
:param gas: Maximum amount of gas to be used on executing command.
:param memo: Any string comment.
:param chain_id: the Chain ID of the CosmWasm transaction. Default is 1 (i.e. mainnet).
:return: the unsigned CosmWasm contract deploy message
"""
denom = denom if denom is not None else self.denom
chain_id = chain_id if chain_id is not None else self.chain_id
account_number, sequence = self._try_get_account_number_and_sequence(
deployer_address
)
deploy_msg = {
"type": "wasm/store-code",
"value": {
"sender": deployer_address,
"wasm_byte_code": contract_interface["wasm_byte_code"],
"source": "",
"builder": "",
},
}
tx = self._get_transaction(
account_number,
chain_id,
tx_fee,
denom,
gas,
memo,
sequence,
msg=deploy_msg,
)
return tx
def get_init_transaction(
self,
deployer_address: Address,
code_id: int,
init_msg: Any,
amount: int,
tx_fee: int,
gas: int = 80000,
denom: Optional[str] = None,
label: str = "",
memo: str = "",
chain_id: Optional[str] = None,
) -> Optional[Any]:
"""
Create a CosmWasm InitMsg transaction.
:param deployer_address: the deployer address of the message initiator.
:param amount: Contract's initial funds amount
:param code_id: the ID of contract bytecode.
:param init_msg: the InitMsg containing parameters for contract constructor.
:param gas: Maximum amount of gas to be used on executing command.
:param denom: the name of the denomination of the contract funds
:param label: the label name of the contract
:param memo: Any string comment.
:param chain_id: the Chain ID of the CosmWasm transaction. Default is 1 (i.e. mainnet).
:return: the unsigned CosmWasm InitMsg
"""
denom = denom if denom is not None else self.denom
chain_id = chain_id if chain_id is not None else self.chain_id
account_number, sequence = self._try_get_account_number_and_sequence(
deployer_address
)
instantiate_msg = {
"type": "wasm/instantiate",
"value": {
"sender": deployer_address,
"code_id": str(code_id),
"label": label,
"init_msg": init_msg,
"init_funds": [{"denom": denom, "amount": str(amount)}],
},
}
tx = self._get_transaction(
account_number,
chain_id,
tx_fee,
denom,
gas,
memo,
sequence,
msg=instantiate_msg,
)
return tx
def get_handle_transaction(
self,
sender_address: Address,
contract_address: Address,
handle_msg: Any,
amount: int,
tx_fee: int,
denom: Optional[str] = None,
gas: int = 80000,
memo: str = "",
chain_id: Optional[str] = None,
) -> Optional[Any]:
"""
Create a CosmWasm HandleMsg transaction.
:param sender_address: the sender address of the message initiator.
:param contract_address: the address of the smart contract.
:param handle_msg: HandleMsg in JSON format.
:param gas: Maximum amount of gas to be used on executing command.
:param memo: Any string comment.
:param chain_id: the Chain ID of the CosmWasm transaction. Default is 1 (i.e. mainnet).
:return: the unsigned CosmWasm HandleMsg
"""
denom = denom if denom is not None else self.denom
chain_id = chain_id if chain_id is not None else self.chain_id
account_number, sequence = self._try_get_account_number_and_sequence(
sender_address
)
execute_msg = {
"type": "wasm/execute",
"value": {
"sender": sender_address,
"contract": contract_address,
"msg": handle_msg,
"sent_funds": [{"amount": str(amount), "denom": denom}],
},
}
tx = self._get_transaction(
account_number,
chain_id,
tx_fee,
denom,
gas,
memo,
sequence,
msg=execute_msg,
)
return tx
@staticmethod
@try_decorator(
"Encountered exception when trying to execute wasm transaction: {}",
logger_method=logger.warning,
)
def try_execute_wasm_transaction(
tx_signed: Any, signed_tx_filename: str = "tx.signed"
) -> Optional[str]:
"""
Execute a CosmWasm Transaction. QueryMsg doesn't require signing.
:param tx_signed: the signed transaction.
:return: the transaction digest
"""
with tempfile.TemporaryDirectory() as tmpdirname:
with open(os.path.join(tmpdirname, signed_tx_filename), "w") as f:
f.write(json.dumps(tx_signed))
command = [
"wasmcli",
"tx",
"broadcast",
os.path.join(tmpdirname, signed_tx_filename),
]
stdout, _ = subprocess.Popen( # nosec
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
).communicate()
return stdout.decode("ascii")
@staticmethod
@try_decorator(
"Encountered exception when trying to execute wasm query: {}",
logger_method=logger.warning,
)
def try_execute_wasm_query(
contract_address: Address, query_msg: Any
) -> Optional[str]:
"""
Execute a CosmWasm QueryMsg. QueryMsg doesn't require signing.
:param contract_address: the address of the smart contract.
:param query_msg: QueryMsg in JSON format.
:return: the message receipt
"""
command = [
"wasmcli",
"query",
"wasm",
"contract-state",
"smart",
str(contract_address),
json.dumps(query_msg),
]
stdout, _ = subprocess.Popen( # nosec
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
).communicate()
return stdout.decode("ascii")
def get_transfer_transaction( # pylint: disable=arguments-differ
self,
sender_address: Address,
destination_address: Address,
amount: int,
tx_fee: int,
tx_nonce: str,
denom: Optional[str] = None,
gas: int = 80000,
memo: str = "",
chain_id: Optional[str] = None,
**kwargs,
) -> Optional[Any]:
"""
Submit a transfer transaction to the ledger.
:param sender_address: the sender address of the payer.
:param destination_address: the destination address of the payee.
:param amount: the amount of wealth to be transferred.
:param tx_fee: the transaction fee.
:param tx_nonce: verifies the authenticity of the tx
:param denom: the denomination of tx fee and amount
:param gas: the gas used.
:param memo: memo to include in tx.
:param chain_id: the chain ID of the transaction.
:return: the transfer transaction
"""
denom = denom if denom is not None else self.denom
chain_id = chain_id if chain_id is not None else self.chain_id
account_number, sequence = self._try_get_account_number_and_sequence(
sender_address
)
transfer_msg = {
"type": "cosmos-sdk/MsgSend",
"value": {
"amount": [{"amount": str(amount), "denom": denom}],
"from_address": sender_address,
"to_address": destination_address,
},
}
tx = self._get_transaction(
account_number,
chain_id,
tx_fee,
denom,
gas,
memo,
sequence,
msg=transfer_msg,
)
return tx
@staticmethod
def _get_transaction(
account_number: int,
chain_id: str,
tx_fee: int,
denom: str,
gas: int,
memo: str,
sequence: int,
msg: Dict[str, Any],
) -> Dict[str, Any]:
"""
Get a transaction.
:param account_number: the account number.
:param chain_id: the chain ID of the transaction.
:param tx_fee: the transaction fee.
:param denom: the denomination of tx fee and amount
:param gas: the gas used.
:param memo: memo to include in tx.
:param msg: the transaction msg.
:param sequence: the sequence.
:return: the transaction
"""
tx = {
"account_number": str(account_number),
"chain_id": chain_id,
"fee": {
"amount": [{"amount": str(tx_fee), "denom": denom}],
"gas": str(gas),
},
"memo": memo,
"msgs": [msg],
"sequence": str(sequence),
}
return tx
@try_decorator(
"Encountered exception when trying to get account number and sequence: {}",
logger_method=logger.warning,
)
def _try_get_account_number_and_sequence(
self, address: Address
) -> Optional[Tuple[int, int]]:
"""
Try get account number and sequence for an address.
:param address: the address
:return: a tuple of account number and sequence
"""
result = None # type: Optional[Tuple[int, int]]
url = self.network_address + f"/auth/accounts/{address}"
response = requests.get(url=url)
if response.status_code == 200:
result = (
int(response.json()["result"]["value"]["account_number"]),
int(response.json()["result"]["value"]["sequence"]),
)
return result
def send_signed_transaction(self, tx_signed: Any) -> Optional[str]:
"""
Send a signed transaction and wait for confirmation.
:param tx_signed: the signed transaction
:return: tx_digest, if present
"""
if self.is_cosmwasm_transaction(tx_signed):
tx_digest = self.try_execute_wasm_transaction(tx_signed)
elif self.is_transfer_transaction(tx_signed):
tx_digest = self._try_send_signed_transaction(tx_signed)
else: # pragma: nocover
logger.warning(
"Cannot send transaction. Unknown transaction type: {}".format(
tx_signed
)
)
tx_digest = None
return tx_digest
@staticmethod
def is_cosmwasm_transaction(tx_signed: Any) -> bool:
"""Check whether it is a cosmwasm tx."""
try:
_type = tx_signed["value"]["msg"][0]["type"]
result = _type in ["wasm/store-code", "wasm/instantiate", "wasm/execute"]
except KeyError: # pragma: nocover
result = False
return result
@staticmethod
def is_transfer_transaction(tx_signed: Any) -> bool:
"""Check whether it is a transfer tx."""
try:
_type = tx_signed["tx"]["msg"][0]["type"]
result = _type in ["cosmos-sdk/MsgSend"]
except KeyError: # pragma: nocover
result = False
return result
@try_decorator(
"Encountered exception when trying to send tx: {}", logger_method=logger.warning
)
def _try_send_signed_transaction(self, tx_signed: Any) -> Optional[str]:
"""
Try send the signed transaction.
:param tx_signed: the signed transaction
:return: tx_digest, if present
"""
tx_digest = None # type: Optional[str]
url = self.network_address + "/txs"
response = requests.post(url=url, json=tx_signed)
if response.status_code == 200:
tx_digest = response.json()["txhash"]
return tx_digest
def get_transaction_receipt(self, tx_digest: str) -> Optional[Any]:
"""
Get the transaction receipt for a transaction digest.
:param tx_digest: the digest associated to the transaction.
:return: the tx receipt, if present
"""
tx_receipt = self._try_get_transaction_receipt(tx_digest)
return tx_receipt
@try_decorator(
"Encountered exception when trying to get transaction receipt: {}",
logger_method=logger.warning,
)
def _try_get_transaction_receipt(self, tx_digest: str) -> Optional[Any]:
"""
Try get the transaction receipt for a transaction digest.
:param tx_digest: the digest associated to the transaction.
:return: the tx receipt, if present
"""
result = None # type: Optional[Any]
url = self.network_address + f"/txs/{tx_digest}"
response = requests.get(url=url)
if response.status_code == 200:
result = response.json()
return result
def get_transaction(self, tx_digest: str) -> Optional[Any]:
"""
Get the transaction for a transaction digest.
:param tx_digest: the digest associated to the transaction.
:return: the tx, if present
"""
# Cosmos does not distinguis between transaction receipt and transaction
tx_receipt = self._try_get_transaction_receipt(tx_digest)
return tx_receipt
def get_contract_instance(
self, contract_interface: Dict[str, str], contract_address: Optional[str] = None
) -> Any:
"""
Get the instance of a contract.
:param contract_interface: the contract interface.
:param contract_address: the contract address.
:return: the contract instance
"""
# Instance object not available for cosmwasm
return None
class CosmWasmCLIWrapper:
"""Wrapper of the CosmWasm CLI."""
class CosmosFaucetApi(FaucetApi):
"""Cosmos testnet faucet API."""
identifier = _COSMOS
testnet_name = TESTNET_NAME
def get_wealth(self, address: Address) -> None:
"""
Get wealth from the faucet for the provided address.
:param address: the address.
:return: None
"""
self._try_get_wealth(address)
@staticmethod
@try_decorator(
"An error occured while attempting to generate wealth:\n{}",
logger_method=logger.error,
)
def _try_get_wealth(address: Address) -> None:
"""
Get wealth from the faucet for the provided address.
:param address: the address.
:return: None
"""
response = requests.post(
url=COSMOS_TESTNET_FAUCET_URL, data={"Address": address}
)
if response.status_code == 200:
tx_hash = response.text
logger.info("Wealth generated, tx_hash: {}".format(tx_hash))
else: # pragma: no cover
logger.warning(
"Response: {}, Text: {}".format(response.status_code, response.text)
)
| python |
from datetime import datetime
from unittest import mock
import dateutil.relativedelta
from carbonserver.api.infra.repositories.repository_projects import SqlAlchemyRepository
from carbonserver.api.usecases.project.project_sum import ProjectSumsUsecase
PROJECT_ID = "e60afa92-17b7-4720-91a0-1ae91e409ba1"
END_DATE = datetime.now()
START_DATE = END_DATE - dateutil.relativedelta.relativedelta(months=3)
EMISSIONS_SUM = 152.28955200363455
PROJECT_WITH_DETAILS = {
"project_id": PROJECT_ID,
"name": "DataForGood",
"description": "DataForGood Project",
"emissions": 152.28955200363455,
"cpu_power": 5760,
"gpu_power": 2983.9739999999993,
"ram_power": 806.0337192959997,
"cpu_energy": 191.8251863024175,
"gpu_energy": 140.01098718681496,
"ram_energy": 26.84332784201141,
"energy_consumed": 358.6795013312438,
"duration": 7673204,
"emissions_rate": 1.0984556074701752,
"emissions_count": 64,
}
def test_sum_computes_for_project_id():
repository_mock: SqlAlchemyRepository = mock.Mock(spec=SqlAlchemyRepository)
project_id = PROJECT_ID
project_global_sum_usecase = ProjectSumsUsecase(repository_mock)
expected_emission_sum = EMISSIONS_SUM
repository_mock.get_project_detailed_sums.return_value = [PROJECT_WITH_DETAILS]
actual_project_global_sum_by_experiment = (
project_global_sum_usecase.compute_detailed_sum(
project_id, START_DATE, END_DATE
)
)
assert (
actual_project_global_sum_by_experiment[0]["emissions"] == expected_emission_sum
)
| python |
# -*- coding: utf-8 -*-
'''
Created on 2017-6-22
@author: hshl.ltd
'''
from __future__ import absolute_import, unicode_literals
import warnings
from sqlalchemy import orm
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from django.conf import settings
from django.dispatch import receiver
from django.core.signals import request_finished
from django.core.exceptions import ImproperlyConfigured
from sqlalchemy_django.middleware import get_current_request
class BaseQuery(orm.Query):
def get_or_404(self, ident):
pass
def first_or_404(self):
return self.first()
def first_dict(self):
row = self.first()
return None if row is None else row.to_dict()
def all_dict(self):
rows = self.all()
if rows is None:
return None
return [row.to_dict() for row in rows]
class Model(object):
#: Query class used by :attr:`query`.
#: Defaults to :class:`SQLAlchemy.Query`, which defaults to :class:`BaseQuery`.
query_class = None
#: Convenience property to query the database for instances of this model using the current session.
#: Equivalent to ``db.session.query(Model)`` unless :attr:`query_class` has been changed.
query = None
# http://ju.outofmemory.cn/entry/200879
def to_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def merge(self, obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
if hasattr(self, key):
setattr(self, key, value)
class SQLAlchemy(object):
"""django SQLAlchemy主要是把sqlalchemy与web request绑定实现session的自动化管理"""
def __init__(self, session_options=None, metadata=None,
query_class=BaseQuery, model_class=Model, bind_key='default'):
self.config = self.init_config(bind_key)
self.Query = query_class
self.Session = self.create_scoped_session(session_options)
self.Model = self.make_declarative_base(model_class, metadata)
@receiver(request_finished, weak=False)
def shutdown_session(sender, **kwargs):
try:
if self.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']:
self.Session.commit()
self.Session.remove()
except Exception as e:
print(e)
def get_session(self):
session = self.Session()
return session
@property
def metadata(self):
return self.Model.metadata
def create_scoped_session(self, options=None):
if options is None:
options = {}
options.setdefault('query_cls', self.Query)
return orm.scoped_session(self.create_session(options), scopefunc=get_current_request)
def create_session(self, options):
engine = create_engine(
self.config['SQLALCHEMY_DATABASE_URI'], echo=self.config['SQLALCHEMY_ECHO'], pool_size=self.config['SQLALCHEMY_POOL_SIZE'])
return orm.sessionmaker(bind=engine, **options)
def make_declarative_base(self, model, metadata=None):
"""Creates the declarative base."""
base = declarative_base(cls=model, metadata=metadata)
if not getattr(base, 'query_class', None):
base.query_class = self.Query
return base
def init_config(self, bind_key):
if not hasattr(settings, 'SQLALCHEMY_DATABASES'):
raise ImproperlyConfigured(
"SQLALCHEMY_DATABASES not find in settings"
)
sqlalchemy_config = settings.SQLALCHEMY_DATABASES
if bind_key not in sqlalchemy_config:
raise ImproperlyConfigured(
"SQLALCHEMY_DATABASES not find in settings"
)
bind_config = sqlalchemy_config[bind_key]
bind_config.setdefault('SQLALCHEMY_DATABASE_URI', 'sqlite:///:memory:')
bind_config.setdefault('SQLALCHEMY_BINDS', None)
bind_config.setdefault('SQLALCHEMY_NATIVE_UNICODE', None)
bind_config.setdefault('SQLALCHEMY_ECHO', True)
bind_config.setdefault('SQLALCHEMY_RECORD_QUERIES', None)
bind_config.setdefault('SQLALCHEMY_POOL_SIZE', None)
bind_config.setdefault('SQLALCHEMY_POOL_TIMEOUT', None)
bind_config.setdefault('SQLALCHEMY_POOL_RECYCLE', None)
bind_config.setdefault('SQLALCHEMY_MAX_OVERFLOW', None)
bind_config.setdefault('SQLALCHEMY_COMMIT_ON_TEARDOWN', True)
return bind_config
| python |
import os
import requests
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
# how to generate URL https://www.youtube.com/watch?v=lEQ68HhpO4g
INCOMING_WEBHOOKS_ACCESS_URL=os.getenv("INCOMING_WEBHOOKS_ACCESS_URL")
def send_message(post_data, api_url, headers={'Content-Type': 'application/json'}):
response = requests.post(api_url, headers=headers, json=post_data)
return response
def generate_post_data(markdown_texts):
# https://api.slack.com/messaging/composing/layouts#attachments
if type(markdown_texts)!=list:
markdown_texts = [markdown_texts]
post_data = {'blocks': []}
for text in markdown_texts:
content = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": text
}
}
post_data['blocks'].append(content)
print(post_data)
return post_data
def send_markdown(text_or_list_of_texts, api_url=INCOMING_WEBHOOKS_ACCESS_URL):
post_data = generate_post_data(text_or_list_of_texts)
return send_message(post_data, api_url)
def main():
post_data = generate_post_data("```hellow!!```")
send_message(post_data, api_url=INCOMING_WEBHOOKS_ACCESS_URL)
if __name__=='__main__':
main() | python |
import os
import sys
import random
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torchvision.utils
import imgaug as ia
from torch.utils.data import DataLoader,Dataset
from torch.autograd import Variable
from torch import optim
from imgaug import augmenters as iaa
from PIL import Image
from torchsummaryX import summary
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def imshow(img,text=None,should_save=False, name=None):
npimg = img.numpy()
plt.axis("off")
if text:
plt.text(75, 8, text, style='italic',fontweight='bold',
bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})
plt.imshow(np.transpose(npimg, (1, 2, 0)), cmap=plt.cm.gray)
if should_save:
plt.savefig(name)
plt.show()
def show_plot(iteration,loss):
plt.plot(iteration,loss)
plt.show()
class Augmenter():
def __init__(self, seq):
self.seq = seq
def __call__(self, img_and_annotation):
normal_image = img_and_annotation[0]
defect_image = img_and_annotation[1]
box_annotation_dict = img_and_annotation[2]
normal_image = np.array(normal_image)
defect_image = np.array(defect_image)
normal_image_aug, defect_image_aug, bbs_aug = self.augment_image(normal_image, defect_image,
box_annotation_dict, self.seq)
normal_image_aug = Image.fromarray(normal_image_aug)
defect_image_aug = Image.fromarray(defect_image_aug)
return normal_image_aug, defect_image_aug, bbs_aug
def augment_image(self, normal_image, defect_image, box_annotation_dict, seq):
bbs = self.transform_imgaug_style_boxes(box_annotation_dict)
seq_det = seq.to_deterministic()
normal_image_aug = seq_det.augment_images([normal_image])[0]
defect_image_aug = seq_det.augment_images([defect_image])[0]
bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
bbs_aug = bbs_aug.remove_out_of_image().cut_out_of_image()
augmented_box = self.transofrm_annotation_information_style(box_annotation_dict, bbs_aug)
return normal_image_aug, defect_image_aug, augmented_box
@staticmethod
def transofrm_annotation_information_style(box_annotation_dict, bbs_aug):
assert isinstance(box_annotation_dict, dict)
box_annotation_keys = box_annotation_dict.keys()
assert "size" in box_annotation_keys
assert "object" in box_annotation_keys
size_tag_keys = box_annotation_dict["size"].keys()
assert "width" in size_tag_keys
assert "height" in size_tag_keys
assert "depth" in size_tag_keys
assert isinstance(box_annotation_dict["object"], list)
for _object in box_annotation_dict["object"]:
_object_keys = _object.keys()
assert "name" in _object_keys
assert "xmin" in _object_keys
assert "ymin" in _object_keys
assert "xmax" in _object_keys
assert "ymax" in _object_keys
assert isinstance(bbs_aug, ia.BoundingBoxesOnImage)
objects = box_annotation_dict["object"]
objects.clear()
for i in range(len(bbs_aug.bounding_boxes)):
augmented_box = bbs_aug.bounding_boxes[i]
objects.append(
{
"name": augmented_box.label,
"xmin": augmented_box.x1,
"ymin": augmented_box.y1,
"xmax": augmented_box.x2,
"ymax": augmented_box.y2
}
)
return box_annotation_dict
@staticmethod
def transform_imgaug_style_boxes(box_annotation_dict):
assert isinstance(box_annotation_dict, dict)
box_annotation_keys = box_annotation_dict.keys()
assert "size" in box_annotation_keys
assert "object" in box_annotation_keys
size_tag_keys = box_annotation_dict["size"].keys()
assert "width" in size_tag_keys
assert "height" in size_tag_keys
assert "depth" in size_tag_keys
assert isinstance(box_annotation_dict["object"], list)
for _object in box_annotation_dict["object"]:
_object_keys = _object.keys()
assert "name" in _object_keys
assert "xmin" in _object_keys
assert "ymin" in _object_keys
assert "xmax" in _object_keys
assert "ymax" in _object_keys
image_width = int(box_annotation_dict["size"]["width"])
image_height = int(box_annotation_dict["size"]["height"])
bbs = ia.BoundingBoxesOnImage([], shape=(image_height, image_width))
for _object in box_annotation_dict["object"]:
name = _object["name"]
xmin = int(_object["xmin"])
ymin = int(_object["ymin"])
xmax = int(_object["xmax"])
ymax = int(_object["ymax"])
bbs.bounding_boxes.append(ia.BoundingBox(x1=xmin,
x2=xmax,
y1=ymin,
y2=ymax,
label=name))
return bbs
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', 'webp']
LABEL_EXTENSIONS = ['.xml']
def has_file_allowed_extension(filename, extensions):
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (iterable of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in extensions)
class DefectDataset(torch.utils.data.Dataset):
def __init__(self, root, transform=None):
self.folder = self._find_each_folder(root)
self.root = root
self.transform = transform
self.samples = self.load_data()
self.classes = ["defect"]
def load_data(self):
datas = list()
directory = os.path.expanduser(self.root)
for target in sorted(self.folder.keys()):
d = os.path.join(directory, target)
imgs = dict()
label = None
for a in os.scandir(d):
name = a.name.split(".")[0]
ext = a.name.split(".")[-1]
if ext == "tif":
imgs[name] = os.path.join(d, a.name)
elif ext == "xml":
label = os.path.join(d, a.name)
datas.append([imgs, label])
return datas
def __getitem__(self, index):
imgs, label = self.samples[index]
label = self._parse_voc(label)
normal_img = self.pil_loader(imgs["normal"])
defect_img = self.pil_loader(imgs["defect"])
if self.transform != None:
normal_aug_img, defect_aug_img, aug_label = self.transform([normal_img, defect_img, label])
if random.choice([True, False]):
# same image
image1 = normal_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
image2 = normal_aug_img
label = np.array([0.], dtype=np.float)
else:
# difference image
image1 = normal_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
image2 = defect_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
label = np.array([1.], dtype=np.float)
elif self.transform == None:
if random.choice([True, False]):
# same image
image1 = normal_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
image2 = normal_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
label = np.array([0.], dtype=np.float)
else:
# difference image
image1 = normal_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
image2 = defect_img.resize((Config.RESIZE[0], Config.RESIZE[1]), Image.ANTIALIAS)
label = np.array([1.], dtype=np.float)
image1 = image1.convert('L')
image2 = image2.convert('L')
image1 = torchvision.transforms.ToTensor()(image1)
image2 = torchvision.transforms.ToTensor()(image2)
label = torch.from_numpy(label)
return image1, image2, label
def __len__(self):
return len(self.samples)
def pil_loader(self, path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def _find_each_folder(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return class_to_idx
def _convert_box_label_to_yolo_label(self, label, classes_list):
assert isinstance(label, dict)
assert isinstance(classes_list, list)
for cls in classes_list:
assert isinstance(cls, str)
root_keys = label.keys()
size_keys = label["size"].keys()
number_of_objects = len(label["object"])
assert "size" in root_keys
assert "object" in root_keys
assert "width" in size_keys
assert "height" in size_keys
if number_of_objects == 0:
print("here")
return []
yolo_label = list()
image_size = {
"width": float(label["size"]["width"]),
"height": float(label["size"]["height"]),
}
for _object in label["object"]:
_object_keys = _object.keys()
assert "name" in _object_keys
assert "xmin" in _object_keys
assert "ymin" in _object_keys
assert "xmax" in _object_keys
assert "ymax" in _object_keys
name = _object["name"]
cls = float(classes_list.index(name))
box_coordinate = {
"xmin": float(_object["xmin"]),
"ymin": float(_object["ymin"]),
"xmax": float(_object["xmax"]),
"ymax": float(_object["ymax"]),
}
yolo_coordinate = self._convert_coordinate(image_size, box_coordinate)
yolo_coordinate.insert(0, cls)
yolo_label.append(yolo_coordinate)
return yolo_label
@staticmethod
def _parse_voc(annotation_path):
import xml.etree.ElementTree as Et
assert isinstance(annotation_path, str)
xml_file = open(annotation_path, "r")
tree = Et.parse(xml_file)
element_list = list()
for elem in tree.iter():
element_list.append(elem.tag)
assert "size" in element_list
assert "width" in element_list
assert "height" in element_list
assert "object" in element_list
assert "name" in element_list
assert "bndbox" in element_list
assert "xmin" in element_list
assert "ymin" in element_list
assert "xmax" in element_list
assert "ymax" in element_list
result = dict()
root = tree.getroot()
size_tag = root.find("size")
result["size"] = {
"width": size_tag.find("width").text,
"height": size_tag.find("height").text,
"depth": size_tag.find("depth").text
}
result["object"] = list()
objects = root.findall("object")
assert objects
for _object in objects:
result["object"].append({
"name": _object.find("name").text,
"xmin": _object.find("bndbox").find("xmin").text,
"ymin": _object.find("bndbox").find("ymin").text,
"xmax": _object.find("bndbox").find("xmax").text,
"ymax": _object.find("bndbox").find("ymax").text
})
return result
@staticmethod
def _convert_coordinate(image_size, box_coordinate):
image_size_keys = image_size.keys()
box_coordinate_keys = box_coordinate.keys()
assert "width" in image_size_keys
assert "height" in image_size_keys
assert "xmin" in box_coordinate_keys
assert "ymin" in box_coordinate_keys
assert "xmax" in box_coordinate_keys
assert "ymax" in box_coordinate_keys
assert isinstance(image_size, dict)
assert isinstance(box_coordinate, dict)
assert isinstance(image_size["width"], float)
assert isinstance(image_size["height"], float)
assert isinstance(box_coordinate["xmin"], float)
assert isinstance(box_coordinate["ymin"], float)
assert isinstance(box_coordinate["xmax"], float)
assert isinstance(box_coordinate["ymax"], float)
x_of_box = (box_coordinate["xmin"] + box_coordinate["xmax"]) / 2.0
y_of_box = (box_coordinate["ymin"] + box_coordinate["ymax"]) / 2.0
width_of_box = box_coordinate["xmax"] - box_coordinate["xmin"]
height_of_box = box_coordinate["ymax"] - box_coordinate["ymin"]
relative_x_of_center = x_of_box / image_size["width"]
relative_y_of_center = y_of_box / image_size["height"]
relative_box_width = width_of_box / image_size["width"]
relative_box_height = height_of_box / image_size["height"]
return [relative_x_of_center, relative_y_of_center,
relative_box_width, relative_box_height]
class SiameseNetwork(nn.Module):
def __init__(self, size):
self.size = size
super(SiameseNetwork, self).__init__()
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.cnn1 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(1, 4, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(4))
self.cnn2 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(4, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8)
)
self.cnn3 = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(8, 8, kernel_size=3),
nn.ReLU(inplace=True),
nn.BatchNorm2d(8),
)
self.fc1 = nn.Sequential(
nn.Linear(8 * self.size[0] * self.size[1], 500),
nn.ReLU(inplace=True),
nn.Linear(500, 500),
nn.ReLU(inplace=True),
nn.Linear(500, 5))
def forward_once(self, x):
output = self.cnn1(x)
output = self.cnn2(output)
output = self.cnn3(output)
output = output.view(output.size()[0], -1)
output = self.fc1(output)
return output
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
def summary(self):
summary(self, torch.zeros((1, 1, self.size[0], self.size[1])), input2=torch.zeros((1, 1, self.size[0], self.size[1])))
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=2.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1 - label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
class Config():
training_dir = "./dataset/training/"
testing_dir = "./dataset/testing/"
train_batch_size = 64
train_number_epochs = 100
RESIZE = (250, 250)
if __name__ == "__main__":
# Augmentation Demo
seq = iaa.Sequential([
iaa.Resize({"height": Config.RESIZE[0], "width": Config.RESIZE[1]}),
iaa.SomeOf(2, [iaa.Multiply((1, 1.1)), # change brightness, doesn't affect BBs
iaa.Affine(
translate_px={"x": 5, "y": 5},
scale=(1, 1)
), # translate by 40/60px on x/y axis, and scale to 50-70%, affects BBs
iaa.GaussianBlur(sigma=(0.0, 0.1)),
iaa.Affine(rotate=(-10, 10)),
])
#iaa.Sharpen(alpha=(0, 0.0001)),
#iaa.Fliplr(0.5)
])
#seq = iaa.Sometimes(0.5, iaa.Crop(percent=(0.4)))
#seq = iaa.Sequential([iaa.Crop(percent=(0.3))])
composed = transforms.Compose([Augmenter(seq)])
siamese_dataset = DefectDataset(root=Config.training_dir, transform=composed)
vis_dataloader = DataLoader(siamese_dataset,
shuffle=True,
num_workers=0,
batch_size=8)
dataiter = iter(vis_dataloader)
example_batch = next(dataiter)
concatenated = torch.cat((example_batch[0],example_batch[1]),0)
imshow(torchvision.utils.make_grid(concatenated))
print(example_batch[2].numpy())
print(example_batch[0].shape)
train_dataloader = DataLoader(siamese_dataset,
shuffle=True,
num_workers=0,
batch_size=Config.train_batch_size)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
net = SiameseNetwork(size=(250, 250))
if device.type == 'cpu':
model = torch.nn.DataParallel(net)
else:
model = torch.nn.DataParallel(net, device_ids=[0, 1]).cuda()
model.to(device)
criterion = ContrastiveLoss()
optimizer = optim.Adam(net.parameters(),lr = 0.0005)
counter = []
loss_history = []
iteration_number= 0
for epoch in range(0, Config.train_number_epochs):
for i, data in enumerate(train_dataloader, 0):
img0, img1, label = data
img0, img1, label = img0.to(device), img1.to(device), label.to(device)
optimizer.zero_grad()
output1, output2 = model(img0, img1)
label = label.double()
output1 = output1.double()
output2 = output2.double()
loss_contrastive = criterion(output1, output2, label)
loss_contrastive.backward()
optimizer.step()
if i % 10 == 0:
print("Epoch number {}\n Current loss {}\n".format(epoch, loss_contrastive.item()))
iteration_number += 10
counter.append(iteration_number)
loss_history.append(loss_contrastive.item())
show_plot(counter, loss_history)
save_checkpoint({
'epoch': epoch + 1,
'arch': "YOLOv1",
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, False, filename=os.path.join("./", 'result.pth.tar'))
# TEST
"""
siamese_dataset = DefectDataset(root=Config.testing_dir, transform=None)
test_dataloader = DataLoader(siamese_dataset, num_workers=6, batch_size=1, shuffle=True)
dataiter = iter(test_dataloader)
x0, _, _ = next(dataiter)
for i in range(10):
_, x1, label2 = next(dataiter)
concatenated = torch.cat((x0, x1), 0)
output1, output2 = net(Variable(x0).cuda(), Variable(x1).cuda())
euclidean_distance = F.pairwise_distance(output1, output2)
imshow(torchvision.utils.make_grid(concatenated), 'Dissimilarity: {:.2f}'.format(euclidean_distance.item()))
""" | python |
from django.http import Http404
from django.test.testcases import TestCase
from corehq.apps.app_manager.models import (
AdvancedModule,
Application,
BuildProfile,
GlobalAppConfig,
LatestEnabledBuildProfiles,
Module,
)
from corehq.apps.app_manager.views.utils import get_default_followup_form_xml
from corehq.apps.domain.models import Domain
class TestGetDefaultFollowupForm(TestCase):
def test_default_followup_form(self):
app = Application.new_app('domain', "Untitled Application")
parent_module = app.add_module(AdvancedModule.new_module('parent', None))
parent_module.case_type = 'parent'
parent_module.unique_id = 'id_parent_module'
context = {
'lang': None,
'default_label': "Default label message"
}
attachment = get_default_followup_form_xml(context=context)
followup = app.new_form(0, "Followup Form", None, attachment=attachment)
self.assertEqual(followup.name['en'], "Followup Form")
self.assertEqual(app.modules[0].forms[0].name['en'], "Followup Form")
first_question = app.modules[0].forms[0].get_questions([], include_triggers=True, include_groups=True)[0]
self.assertEqual(first_question['label'], " Default label message ")
class TestGlobalAppConfig(TestCase):
domain = 'test-latest-app'
@classmethod
def setUpClass(cls):
super(TestGlobalAppConfig, cls).setUpClass()
cls.project = Domain(name=cls.domain)
cls.project.save()
cls.build_profile_id = 'english'
app = Application(
domain=cls.domain,
name='foo',
langs=["en"],
version=1,
modules=[Module()],
build_profiles={
cls.build_profile_id: BuildProfile(langs=['en'], name='English only'),
}
) # app is v1
app.save() # app is now v2
cls.v2_build = app.make_build()
cls.v2_build.is_released = True
cls.v2_build.save() # v2 is starred
app.save() # app is now v3
cls.v3_build = app.make_build()
cls.v3_build.is_released = True
cls.v3_build.save() # v3 is starred
app.save() # app is v4
# Add a build-profile-specific release at v2
cls.latest_profile = LatestEnabledBuildProfiles(
domain=cls.domain,
app_id=app.get_id,
build_profile_id=cls.build_profile_id,
version=cls.v2_build.version,
build_id=cls.v2_build.get_id,
active=True,
)
cls.latest_profile.save()
cls.app = app
@classmethod
def tearDownClass(cls):
cls.project.delete()
super(TestGlobalAppConfig, cls).tearDownClass()
def test_apk_prompt(self):
from corehq.apps.builds.utils import get_default_build_spec
latest_apk = get_default_build_spec().version
test_cases = [
('off', {}),
('on', {'value': latest_apk, 'force': False}),
('forced', {'value': latest_apk, 'force': True}),
]
for config, response in test_cases:
app_config = self.app.global_app_config
app_config.apk_prompt = config
app_config.save()
config = GlobalAppConfig.by_app_id(self.domain, self.app.master_id)
self.assertEqual(
config.get_latest_apk_version(),
response
)
def test_apk_prompt_preset(self):
preset_apk = '2.20.0/latest' # some random version
test_cases = [
('off', {}),
('on', {'value': '2.20.0', 'force': False}),
('forced', {'value': '2.20.0', 'force': True}),
]
app_config = self.app.global_app_config
app_config.apk_version = preset_apk
app_config.save()
for config, response in test_cases:
app_config = self.app.global_app_config
app_config.apk_prompt = config
app_config.save()
config = GlobalAppConfig.by_app_id(self.domain, self.app.master_id)
self.assertEqual(
config.get_latest_apk_version(),
response
)
def test_app_prompt(self):
app_config = self.app.global_app_config
app_config.save()
test_cases = [
('off', '', {}),
('on', '', {'value': self.v3_build.version, 'force': False}),
('forced', '', {'value': self.v3_build.version, 'force': True}),
('off', self.build_profile_id, {}),
('on', self.build_profile_id, {'value': self.v2_build.version, 'force': False}),
('forced', self.build_profile_id, {'value': self.v2_build.version, 'force': True}),
]
for config, build_profile_id, response in test_cases:
app_config = self.app.global_app_config
app_config.app_prompt = config
app_config.save()
config = GlobalAppConfig.by_app_id(self.domain, self.app.master_id)
self.assertEqual(
config.get_latest_app_version(build_profile_id),
response
)
def test_app_prompt_preset(self):
preset_app = 21 # some random version
test_cases = [
('off', {}),
('on', {'value': preset_app, 'force': False}),
('forced', {'value': preset_app, 'force': True}),
]
app_config = self.app.global_app_config
app_config.app_version = preset_app
app_config.save()
for config, response in test_cases:
app_config = self.app.global_app_config
app_config.app_prompt = config
app_config.save()
config = GlobalAppConfig.by_app_id(self.domain, self.app.master_id)
self.assertEqual(
config.get_latest_app_version(build_profile_id=''),
response
)
def test_load_from_build(self):
config = self._fresh_config(self.v3_build.id)
with self.assertRaises(AssertionError):
config.get_latest_app_version(build_profile_id='')
def test_missing_app(self):
config = self._fresh_config('missing_id')
with self.assertRaises(Http404):
config.get_latest_app_version(build_profile_id='')
def test_latest_profile_serialize(self):
self.assertEqual(
self.latest_profile.to_json({self.app.get_id: self.app.name}),
{
'id': self.latest_profile.id,
'app_id': self.app.get_id,
'active': True,
'version': self.v2_build.version,
'build_profile_id': self.build_profile_id,
'app_name': 'foo',
'profile_name': 'English only'
}
)
def _fresh_config(self, app_id):
config = GlobalAppConfig.by_app_id(self.domain, app_id)
config.app_prompt = 'on'
return config
| python |
# Gradient Norm Scaling/Clipping
from keras import optimizers
# configure sgd with gradient norm scaling
# i.e. changing the derivatives of the loss function to have a given vector norm when
# the L2 vector norm (sum of the squared values) of the gradient vector exceeds
# a threshold value.
opt = optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=1.0)
# configure sgd with gradient norm clipping
# clipping the derivatives of the loss function to have a given value if a gradient value is less
# than a negative threshold or more than the positive threshold.
opt = optimizers.SGD(lr=0.01, momentum=0.9, clipvalue=1.0)
#######################################################################
# regression predictive modeling problem
from sklearn.datasets import make_regression
from matplotlib import pyplot
# generate regression dataset
X, y = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=1)
# histogram of target variable
pyplot.subplot(131)
pyplot.hist(y)
# boxplot of target variable
pyplot.subplot(132)
pyplot.boxplot(y)
pyplot.show()
# scatter plot
pyplot.subplot(133)
pyplot.show(X,y)
####################################################################
# mlp with unscaled data for the regression problem
from sklearn.datasets import make_regression
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import SGD
from matplotlib import pyplot
# generate regression dataset
X, y = make_regression(n_samples=1000, n_features=20, noise=0.1, random_state=1)
# split into train and test
n_train = 500
trainX, testX = X[:n_train, :], X[n_train:, :]
trainy, testy = y[:n_train], y[n_train:]
# define model
model = Sequential() # the model with a linear stack of layers
model.add(Dense(25, input_dim=20, activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(1, activation='linear'))
# compile model
# model.compile(loss='mean_squared_error', optimizer=SGD(lr=0.01, momentum=0.9))
opt_scaling = SGD(lr=0.01, momentum=0.9, clipvalue=5.0)
model.compile(loss='mean_squared_error', optimizer=opt_scaling)
# fit model
history = model.fit(trainX, trainy, validation_data=(testX, testy), epochs=100, verbose=0)
# evaluate the model
train_mse = model.evaluate(trainX, trainy, verbose=0)
test_mse = model.evaluate(testX, testy, verbose=0)
print('Train: %.3f, Test: %.3f' % (train_mse, test_mse))
# plot loss during training
pyplot.title('Mean Squared Error')
pyplot.plot(history.history['loss'], label='train')
pyplot.plot(history.history['val_loss'], label='test')
pyplot.legend()
pyplot.show()
# The model above is NOT able to learn for the problem, resulting in nans.
# Solutions:
# 1. The traditional solution is to rescale the target variable using either standardization or normalization.
# 2. using Gradient Norm Scaling: replace the optimizer with:
opt_scaling = optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=1.0)
# 3. using Gradient Norm Clipping: replace the optimizer with:
opt_clipping = SGD(lr=0.01, momentum=0.9, clipvalue=5.0)
| python |
class DEQue:
__slots__ = '_length', '_data'
def __init__(self):
self._length = 0
self._data = []
def __len__(self):
return self._length
def is_empty(self):
return len(self) == 0
def first(self):
if self.is_empty():
print('DEQue is empty')
return None
return self._data[0]
def last(self):
if self.is_empty():
print('DEQue is empty')
return None
return self._data[-1]
def enqueue_first(self, val):
self._data.insert(0, val)
self._length += 1
def enqueue_last(self, val):
self._data.append(val)
self._length += 1
def dequeue_first(self):
if self.is_empty():
print('DEQue is empty')
return None
value = self._data[0]
self._data.remove(value)
return value
def dequeue_last(self):
if self.is_empty():
print('DEQue is empty')
return None
value = self._data.pop()
return value
deque = DEQue()
deque.enqueue_first(23)
deque.enqueue_last(24)
print(deque.first())
print(deque.last())
deque.dequeue_first()
deque.dequeue_last()
print(deque.is_empty())
| python |
from tcprecord import TCPRecord, TCPRecordStream
from httprecord import HTTPRecordStream
from tcpsession import TCPSession, tcp_flags, SeqException
from httpsession import parse_http_streams, HTTPParsingError, HTTPResponse, HTTPRequest
from errors import *
import sys
import printing
from printing import print_tcp_session, print_results
# ========================= NEW CODE =============================== #
def make_tcp_sessions_ng(session):
connection = None # key == directed_key
reverse_connection = None
for ip,tcp in session:
directed_key = TCPSession.directed_key(ip.src,ip.dst,tcp.sport,tcp.dport)
not_repeat = None
while not not_repeat:
if not connection:
connection=TCPSession(directed_key)
reversed_key = TCPSession.directed_key(ip.dst,ip.src,tcp.dport,tcp.sport)
reverse_connection=TCPSession(reversed_key)
connection.pair = reverse_connection
reverse_connection.pair = connection
tcp.string_flags = tcp_flags(tcp.flags)
#tcp.partof=set()
if directed_key == connection.directed_key:
not_repeat=connection.packet(tcp)
elif directed_key == reverse_connection.directed_key:
not_repeat=reverse_connection.packet(tcp)
else:
assert False
if not not_repeat:
yield (connection,reverse_connection)
connection=None
reverse_connection=None
yield (connection,reverse_connection)
def handle_lite_tcp_session_ng(lite_tcp_session):
unpacked_content=list(lite_tcp_session.packets())
try:
for connection, reverse_connection in make_tcp_sessions_ng(unpacked_content):
try:
#these calls create side effects on packets
#TODO: refactor it
stream = connection.stream()
rstream = reverse_connection.stream()
tcp_record_stream = TCPRecordStream(connection.content, reverse_connection.content)
http_record_stream = HTTPRecordStream(tcp_record_stream)
print str(tcp_record_stream)
print str(http_record_stream)
except(StreamClassError) as err:
print >> sys.stderr, err
except(ConnectionClassError) as err:
print >> sys.stderr, err
except(FatalClassError) as err:
print >> sys.stderr, err
raise
| python |
from .fid import FIDScore
| python |
# Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
MCollective-based deploy strategy class.
"""
import json
import re
import tds.utils
import tds.utils.processes as processes
from .base import DeployStrategy
import logging
log = logging.getLogger('tds')
class TDSMCODeployStrategy(DeployStrategy):
"""MCO deploy strategy class."""
def __init__(self, bin, **_kwargs):
"""Initialize object."""
self.mco_bin = bin
@tds.utils.debug
def _process_mco_command(self, mco_cmd, retry):
"""Run a given MCollective 'mco' command"""
log.debug('Running MCollective command')
log.debug(5, 'Command is: %s' % ' '.join(mco_cmd))
proc = processes.run(mco_cmd, expect_return_code=None)
stdout, stderr = proc.stdout, proc.stderr
if proc.returncode:
return (False, 'The mco process failed to run successfully.\n'
'return code is %r.\n'
'Stdout: %r\n'
'Stderr: %r' % (proc.returncode, stdout, stderr))
mc_output = None
summary = None
# Extract the JSON output and summary line
for line in stdout.split('\n'):
if not line:
continue
if line.startswith('{'):
mc_output = json.loads(line)
if line.startswith('Finished'):
summary = line.strip()
# Ensure valid response and extract information
if mc_output is None or summary is None:
return (False, 'No output or summary information returned '
'from mco process')
log.debug(summary)
match = re.search(r'processing (\d+) / (\d+) ', summary)
if match is None:
return (False, 'Error parsing summary line.')
# Virtual hosts in dev tend to time out unpredictably, probably
# because vmware is slow to respond when the hosts are not
# active. Subsequent retries after a timeout work better.
if match.group(2) == '0' and retry > 0:
log.debug('Discovery failure, trying again.')
return self._process_mco_command(mco_cmd, retry-1)
for _host, hostinfo in mc_output.iteritems():
if hostinfo['exitcode'] != 0:
return (False, hostinfo['stderr'].strip())
else:
return (True, 'Deploy successful')
return (False, 'Unknown/unparseable mcollective output: %s' %
stdout)
@tds.utils.debug
def restart_host(self, dep_host, app, retry=4):
"""Restart application on a given host"""
log.debug('Restarting application on host %r', dep_host)
mco_cmd = [self.mco_bin, 'tds', '--discovery-timeout', '4',
'--timeout', '60', '-W', 'hostname=%s' % dep_host,
app, 'restart']
return self._process_mco_command(mco_cmd, retry)
@tds.utils.debug
def deploy_to_host(self, dep_host, app, version, retry=4):
log.debug('Deploying to host %r', dep_host)
mco_cmd = [self.mco_bin, 'tds', '--discovery-timeout', '4',
'--timeout', '60', '-W', 'hostname=%s' % dep_host,
app, version]
return self._process_mco_command(mco_cmd, retry)
| python |
"""
A file just to hold the version number, allows automated version increasing.
"""
SEMANTIC = '0.1.4-SNAPSHOT'
BUILD_TIME = 'UNKNOWN'
try:
with open('build-time.txt') as f:
CONTENTS = f.readline().rstrip()
if CONTENTS:
BUILD_TIME = CONTENTS
except IOError:
pass
| python |
import unittest
from iterable_collections import collect
class TestMap(unittest.TestCase):
def test_list(self):
c = collect(list(range(10))).map(lambda x: x + 1)
self.assertEqual(c.list(), list(map(lambda x: x + 1, list(range(10)))))
def test_lists(self):
c = collect(list(range(10))).map(lambda x: x + 1)
self.assertEqual(c.list(), list(map(lambda x: x + 1, list(range(10)))))
def test_set(self):
c = collect(set(range(10))).map(lambda x: x + 1)
self.assertEqual(c.set(), set(map(lambda x: x + 1, list(range(10)))))
def test_tuple(self):
c = collect(tuple(range(10))).map(lambda x: x + 1)
self.assertEqual(c.tuple(), tuple(map(lambda x: x + 1, list(range(10)))))
def test_iterator(self):
c = collect(iter(range(10))).map(lambda x: x + 1)
self.assertEqual(c.list(), list(map(lambda x: x + 1, list(range(10)))))
def test_dict(self):
c = collect({'a': 1, 'b': 2}).map(lambda x: x + 'b')
self.assertEqual(c.list(), list(map(lambda x: x + 'b', {'a': 1, 'b': 2})))
def test_dict_items(self):
c = collect({'a': 1, 'b': 2}.items()).map(lambda x: x[1] + 1)
self.assertEqual(c.list(), list(map(lambda x: x[1] + 1, {'a': 1, 'b': 2}.items())))
| python |
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains some multithreading worker and queue logic plus the functionality -- related
to foma compilation ang LM estimation -- that the worther thread initiates.
The the foma worker compiles foma FST phonology, morphology and morphophonology scripts
and estimates morpheme language models. Having a worker perform these tasks in a separate
thread from that processing the HTTP request allows us to immediately respond to the user.
The foma worker can only run a callable that is a global in
:mod:`onlinelinguisticdatabase.lib.foma_worker` and which takes keyword arguments.
Example usage::
from onlinelinguisticdatabase.lib.foma_worker import foma_worker_q
foma_worker_q.put({
'id': h.generate_salt(),
'func': 'compile_foma_script',
'args': {'model_name': u'Phonology', 'model_id': phonology.id,
'script_dir_path': phonology_dir_path, 'user_id': session['user'].id,
'verification_string': u'defined phonology: ', 'timeout': h.phonology_compile_timeout}
})
Cf. http://www.chrismoos.com/2009/03/04/pylons-worker-threads.
For an introduction to Python threading, see
http://www.ibm.com/developerworks/aix/library/au-threadingpython/.
"""
import Queue
import threading
import logging
from uuid import uuid4
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.model.meta import Session
import onlinelinguisticdatabase.model as model
log = logging.getLogger(__name__)
################################################################################
# WORKER THREAD & QUEUE
################################################################################
foma_worker_q = Queue.Queue(1)
class FomaWorkerThread(threading.Thread):
"""Define the foma worker.
"""
def run(self):
while True:
msg = foma_worker_q.get()
try:
globals()[msg.get('func')](**msg.get('args'))
except Exception, e:
log.warn('Unable to process in worker thread: %s' % e)
foma_worker_q.task_done()
def start_foma_worker():
"""Called in :mod:`onlinelinguisticdatabase.config.environment.py`.
"""
foma_worker = FomaWorkerThread()
foma_worker.setDaemon(True)
foma_worker.start()
foma_worker2 = FomaWorkerThread()
foma_worker2.setDaemon(True)
foma_worker2.start()
################################################################################
# PHONOLOGY
################################################################################
def compile_phonology(**kwargs):
"""Compile the foma script of a phonology and save it to the db with values that indicating compilation success.
"""
phonology = Session.query(model.Phonology).get(kwargs['phonology_id'])
phonology.compile(kwargs['timeout'])
phonology.datetime_modified = h.now()
phonology.modifier_id = kwargs['user_id']
Session.commit()
################################################################################
# MORPHOLOGY
################################################################################
def generate_and_compile_morphology(**kwargs):
"""Generate a foma script for a morphology and (optionally) compile it.
:param int kwargs['morphology_id']: id of a morphology.
:param bool kwargs['compile']: if True, the script will be generated *and* compiled.
:param int kwargs['user_id']: id of the user model performing the generation/compilation.
:param float kwargs['timeout']: how many seconds to wait before killing the foma compile process.
"""
morphology = Session.query(model.Morphology).get(kwargs['morphology_id'])
unknown_category = h.unknown_category
try:
morphology.write(unknown_category)
except Exception, e:
log.warn(e)
pass
if kwargs.get('compile', True):
try:
morphology.compile(kwargs['timeout'])
except Exception, e:
log.warn(e)
pass
morphology.generate_attempt = unicode(uuid4())
morphology.modifier_id = kwargs['user_id']
morphology.datetime_modified = h.now()
Session.commit()
################################################################################
# MORPHEME LANGUAGE MODEL
################################################################################
def generate_language_model(**kwargs):
"""Write the requisite files (corpus, vocab, ARPA, LMTrie) of a morpheme LM to disk.
:param str kwargs['morpheme_language_model_id']: ``id`` value of a morpheme LM.
:param int/float kwargs['timeout']: seconds to allow for ARPA file creation.
:param str kwargs['user_id']: ``id`` value of an OLD user.
:returns: ``None``; side-effect is to change relevant attributes of LM object.
"""
lm = Session.query(model.MorphemeLanguageModel).get(kwargs['morpheme_language_model_id'])
trie_path = lm.get_file_path('trie')
trie_mod_time = lm.get_modification_time(trie_path)
lm.generate_succeeded = False
try:
lm.write_corpus()
except Exception, e:
lm.generate_message = u'Error writing the corpus file. %s' % e
try:
lm.write_vocabulary()
except Exception, e:
lm.generate_message = u'Error writing the vocabulary file. %s' % e
try:
lm.write_arpa(kwargs['timeout'])
except Exception, e:
lm.generate_message = u'Error writing the ARPA file. %s' % e
try:
lm.generate_trie()
except Exception, e:
lm.generate_message = u'Error generating the LMTrie instance. %s' % e
else:
if lm.get_modification_time(trie_path) != trie_mod_time:
lm.generate_succeeded = True
lm.generate_message = u'Language model successfully generated.'
else:
lm.generate_message = u'Error generating the LMTrie instance.'
lm.generate_attempt = unicode(uuid4())
lm.modifier_id = kwargs['user_id']
lm.datetime_modified = h.now()
Session.commit()
def compute_perplexity(**kwargs):
"""Evaluate the LM by attempting to calculate its perplexity and changing some attribute values to reflect the attempt.
"""
lm = Session.query(model.MorphemeLanguageModel).get(kwargs['morpheme_language_model_id'])
timeout = kwargs['timeout']
iterations = 5
try:
lm.perplexity = lm.compute_perplexity(timeout, iterations)
except Exception:
lm.perplexity = None
if lm.perplexity is None:
lm.perplexity_computed = False
else:
lm.perplexity_computed = True
lm.perplexity_attempt = unicode(uuid4())
lm.modifier_id = kwargs['user_id']
lm.datetime_modified = h.now()
Session.commit()
################################################################################
# MORPHOLOGICAL PARSER (MORPHOPHONOLOGY)
################################################################################
def generate_and_compile_parser(**kwargs):
"""Write the parser's morphophonology FST script to file and compile it if ``compile_`` is True.
Generate the language model and pickle it.
"""
parser = Session.query(model.MorphologicalParser).get(kwargs['morphological_parser_id'])
parser.changed = False
parser.write()
if kwargs.get('compile', True):
parser.compile(kwargs['timeout'])
parser.modifier_id = kwargs['user_id']
parser.datetime_modified = h.now()
if parser.changed:
parser.cache.clear(persist=True)
Session.commit()
| python |
# Software License Agreement (BSD License)
#
# Copyright (c) 2018, Fraunhofer FKIE/CMS, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function, unicode_literals
import fkie_multimaster_msgs.grpc.launch_pb2 as lmsg
from .common import utf8
from .host import get_hostname
from .url import nmduri, nmdport
STRING = lmsg.Argument.ValueType.Value('STRING')
INT32 = lmsg.Argument.ValueType.Value('INT32')
DOUBLE = lmsg.Argument.ValueType.Value('DOUBLE')
BOOL = lmsg.Argument.ValueType.Value('BOOL')
LIST = lmsg.Argument.ValueType.Value('LIST')
class StartConfig():
def __init__(self, package, binary):
'''
:param str host: master uri from host where to run the node. Masteruri is used for cases where NMD uri needed.
'''
self.package = package
self.binary = binary
self.config_path = ''
self.binary_path = ''
self.name = ''
self.namespace = ''
self.fullname = ''
self.prefix = ''
self.cwd = ''
self.env = {}
self.remaps = {}
self.params = {}
self.clear_params = []
self.args = []
self.masteruri = None
self.host = None
self.loglevel = ''
self.logformat = ''
self.respawn = False
self.respawn_delay = 30
self.respawn_max = 0
self.respawn_min_runtime = 0
def __repr__(self):
params = "name=%s" % self.name
params += ", ns=%s" % self.namespace
params += ", package=%s" % self.package
params += ", binary=%s" % self.binary
params += ", prefix=%s" % self.prefix
params += ", cwd=%s" % self.cwd
params += ", masteruri=%s" % self.masteruri
params += ", host=%s" % self.host
params += ", loglevel=%s" % self.loglevel
params += ", respawn=%s" % self.respawn
return "<StartConfig %s/>" % params
@property
def hostname(self):
'''
:return: host name from host_masteruri if it is not None.
'''
if self.host:
return get_hostname(self.host)
return None
@property
def nmduri(self):
'''
:return: the nmd uri where to launch the node from host_masteruri if it is not None.
'''
if self.host:
try:
return nmduri(self.host, prefix='')
except ValueError:
return '%s:%d' % (self.host, nmdport(self.masteruri))
return None
def _msg_type(self, value):
valtype = type(value)
if valtype == int:
return INT32
if valtype == float:
return DOUBLE
if valtype == bool:
return BOOL
if valtype == list:
return LIST
return STRING
@classmethod
def _from_msg_type(cls, value, value_type):
if value_type == INT32:
return int(value)
if value_type == DOUBLE:
return float(value)
if value_type == BOOL:
return value.lower() in ("yes", "true", "t", "1")
if value_type == LIST:
try:
return eval(value)
except Exception:
return []
return value
def to_msg(self):
msg = lmsg.StartConfig(package=self.package, binary=self.binary)
self.fill_msg(msg)
return msg
def fill_msg(self, msg):
msg.package = self.package
msg.binary = self.binary
if self.binary_path:
msg.binary_path = self.binary_path
if self.name:
msg.name = self.name
if self.namespace:
msg.namespace = self.namespace
if self.fullname:
msg.fullname = self.fullname
if self.prefix:
msg.prefix = self.prefix
if self.cwd:
msg.cwd = self.cwd
if self.env:
msg.env.extend([lmsg.Argument(name=name, value=value) for name, value in self.env.items()])
if self.remaps:
msg.remaps.extend([lmsg.Remapping(from_name=name, to_name=value) for name, value in self.remaps.items()])
if self.params:
msg.params.extend([lmsg.Argument(name=name, value=utf8(value), value_type=self._msg_type(value)) for name, value in self.params.items()])
if self.clear_params:
msg.clear_params.extend(self.clear_params)
if self.args:
msg.args.extend(self.args)
if self.masteruri:
msg.masteruri = self.masteruri
if self.host:
msg.host = self.host
msg.loglevel = self.loglevel
msg.respawn = self.respawn
msg.respawn_delay = self.respawn_delay
msg.respawn_max = self.respawn_max
msg.respawn_min_runtime = self.respawn_min_runtime
@classmethod
def from_msg(cls, msg):
startcfg = StartConfig(msg.package, msg.binary)
startcfg.binary_path = msg.binary_path
startcfg.name = msg.name
startcfg.namespace = msg.namespace
startcfg.fullname = msg.fullname
startcfg.prefix = msg.prefix
startcfg.cwd = msg.cwd
startcfg.env = {env.name: env.value for env in msg.env}
startcfg.remaps = {remap.from_name: remap.to_name for remap in msg.remaps}
startcfg.params = {param.name: cls._from_msg_type(param.value, param.value_type) for param in msg.params}
startcfg.clear_params = list(msg.clear_params)
startcfg.args = list(msg.args)
startcfg.masteruri = msg.masteruri
startcfg.host = msg.host
startcfg.loglevel = msg.loglevel
startcfg.respawn = msg.respawn
startcfg.respawn_delay = msg.respawn_delay
startcfg.respawn_max = msg.respawn_max
startcfg.respawn_min_runtime = msg.respawn_min_runtime
return startcfg
| python |
# Generated by Django 3.0.2 on 2021-05-11 11:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('banks', '0002_bankcode_otp_enabled'),
('loans', '0021_loanrequests'),
]
operations = [
migrations.CreateModel(
name='DRFDisbursement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('has_data', models.BooleanField(default=False)),
('status', models.BooleanField(default=False)),
('response_id', models.CharField(blank=True, max_length=299, null=True)),
('request_date', models.CharField(blank=True, max_length=299, null=True)),
('response_date', models.CharField(blank=True, max_length=299, null=True)),
('response_code', models.CharField(blank=True, max_length=299, null=True)),
('customer_id', models.CharField(blank=True, max_length=200, null=True)),
('authorisation_code', models.CharField(blank=True, max_length=200, null=True)),
('account_number', models.CharField(blank=True, max_length=200, null=True)),
('amount', models.CharField(blank=True, max_length=200, null=True)),
('mandate_reference', models.CharField(blank=True, max_length=200, null=True)),
('bank', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='banks.BankCode')),
('loan', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='loans.Loan')),
],
options={
'verbose_name': 'DRF Disbursement',
'verbose_name_plural': 'DRF Disbursements',
'db_table': 'DRF Disbursement',
},
),
]
| python |
#!/usr/bin/env python3
from argparse import ArgumentParser, FileType
from collections import OrderedDict
from datetime import datetime
import logging
from json import dumps
from sys import stdout
from time import sleep
from coloredlogs import install as coloredlogs_install
from ssnapshot.ssnapshot import (
create_account_cpu_usage_summary,
create_account_cputime_remaining_summary,
create_fairshare_summaries,
create_node_summaries,
create_partition_cpu_count_summary,
create_partition_cpu_load_summary,
create_partition_memory_summary,
create_partition_node_state_summary,
create_reservation_summaries,
create_top_users_summaries,
sinfo_ttl_cache,
squeue_ttl_cache,
sstat_ttl_cache,
)
def create_arg_parser() -> ArgumentParser:
new_parser = ArgumentParser(
description='ssnapshot returns a brief summary of the status of slurm',
)
new_parser.add_argument(
'--verbose', '-v',
default=0,
action='count',
help='0×v = ERRORs, 1×v = WARNINGs, 2×v = INFOs and 3×v = DEBUGs',
)
new_parser.add_argument(
'--daemonize', '-d',
default=False,
action='store_true',
help='run in daemon mode',
)
new_parser.add_argument(
'--sleep', '-s',
default=300,
type=int,
help='Number of seconds to sleep between runs in daemon mode',
)
new_parser.add_argument(
'--outfile', '-o',
default=stdout,
type=FileType('w'),
help='Where to write output. Default is stdout',
)
new_parser.add_argument(
'--accounts', '-a',
dest='tables',
action='append_const',
const='accounts',
help='Show account summary information. (Default: False)',
)
new_parser.add_argument(
'--fairshare', '-f',
dest='tables',
action='append_const',
const='fairshare',
help='Show fairshare summary information. (Default: False)',
)
new_parser.add_argument(
'--nodes', '-n',
dest='tables',
action='append_const',
const='nodes',
help='Show node summary information. (Default: False)',
)
new_parser.add_argument(
'--partitions', '-p',
dest='tables',
action='append_const',
const='partitions',
help='Show partition summary information. (Default: False)',
)
new_parser.add_argument(
'--reservations', '-r',
dest='tables',
action='append_const',
const='reservations',
help='Display Reservation information. (Default: False)',
)
new_parser.add_argument(
'--top-users', '-t',
dest='tables',
action='append_const',
const='topusers',
help='Display Top Users. (Default: False)',
)
output_group = new_parser.add_mutually_exclusive_group()
output_group.add_argument(
'--json',
dest='output',
action='store_const',
const='json',
help='Output is JSON',
)
output_group.add_argument(
'--html',
dest='output',
action='store_const',
const='html',
help='Output is HTML',
)
output_group.add_argument(
'--markdown',
dest='output',
action='store_const',
const='markdown',
help='Output is markdown',
)
output_group.add_argument(
'--prometheus',
dest='output',
action='store_const',
const='prometheus',
help='Output is for prometheus exporter',
)
new_parser.set_defaults(
output='markdown',
tables=[],
human_readable=True,
)
return new_parser
def generate_markdown(output: dict) -> str:
lines = []
header = output.get('header')
if header:
title = f'{header.get("value")}'
time = header.get('time')
if time:
time = f' ({time})'
lines.append(f'# {title}{time}')
for name, value in output.items():
output_type = value.get('type')
if output_type == 'dataframe':
table_md = value.get('dataframe').reset_index().to_markdown(index=False, floatfmt="0.4f")
lines.append(f'## {name}\n{table_md}\n\n')
return '\n'.join(lines)
def generate_html(output: dict) -> str:
lines = []
header = output.get('header')
if header:
title = f'{header.get("value")}'
time = header.get('time')
if time:
time = f' ({time})'
lines.append(f'<h1>{title}{time}</h1>')
for name, value in output.items():
output_type = value.get('type')
if output_type == 'dataframe':
table_html = value.get('dataframe').reset_index().to_html(index=False)
lines.append(f'<h2>{name}</h2>\n{table_html}\n')
return '\n'.join(lines)
def generate_json(output: dict) -> str:
for key, value in output.items():
value_type = value.get('type')
if key == 'header':
timestamp = value.get('time')
if timestamp:
output['header']['time'] = str(timestamp)
if value_type == 'dataframe':
value['dataframe'] = value.get('dataframe').reset_index().to_dict()
return dumps(output, indent=2)
def generate_prometheus(output: dict) -> str:
lines = []
for key, value in output.items():
output_type = value.get('type')
if output_type == 'dataframe':
table_name = key.lower().replace(' ', '_')
dataframe = value.get('dataframe')
index_names = [name.lower().replace(' ', '_') for name in dataframe.index.names]
for row_index, row in dataframe.iterrows():
if type(row_index) != tuple:
row_index = (row_index, )
logging.debug(row_index)
label_string = ", ".join([
f'{index_name}="{row_index[counter]}"' for counter, index_name in enumerate(index_names)
])
logging.debug(label_string)
for column_number, column in enumerate(dataframe.columns):
column_name = column.lower().replace(' ', '_').replace('/', 'per')
lines.append(
f'ssnapshot_{table_name}{{{label_string}, label="{column_name}"}} '
f'{row[column_number]:.6f}')
return '\n'.join(lines) + '\n'
def main():
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
if args.verbose == 0:
coloredlogs_install(level='ERROR')
if args.verbose == 1:
coloredlogs_install(level='WARNING')
if args.verbose == 2:
coloredlogs_install(level='INFO')
if args.verbose >= 3:
coloredlogs_install(level='DEBUG')
output_method = {
'html': generate_html,
'json': generate_json,
'markdown': generate_markdown,
'prometheus': generate_prometheus,
}.get(args.output)
if args.output == 'prometheus':
args.human_readable = False
summary_functions = {
'accounts': [create_account_cpu_usage_summary, create_account_cputime_remaining_summary],
'fairshare': [create_fairshare_summaries],
'nodes': [create_node_summaries],
'partitions': [
create_partition_memory_summary,
create_partition_cpu_count_summary,
create_partition_cpu_load_summary,
create_partition_node_state_summary,
],
'reservations': [create_reservation_summaries],
'topusers': [create_top_users_summaries],
}
while True:
for cache in sinfo_ttl_cache, squeue_ttl_cache, sstat_ttl_cache:
cache.clear()
output = OrderedDict([('header', {'value': 'Slurm Snapshot', 'time': datetime.now()})])
summaries = []
for table in args.tables:
for summary_function in summary_functions.get(table, []):
summaries.append(summary_function())
for summary in summaries:
for table_name, data in summary.items():
output[table_name] = {'type': 'dataframe', 'dataframe': data}
output_string = ''
if output_method:
output_string = output_method(output)
if output_string:
try:
args.outfile.truncate(0)
args.outfile.seek(0, 0)
except OSError: # expected for stdout
pass
args.outfile.write(output_string)
args.outfile.flush()
if args.daemonize:
sleep(args.sleep)
else:
break
if __name__ == '__main__':
main()
| python |
import itk
import numpy as np
from segmantic.prepro import core
from segmantic.prepro.core import make_image
def test_extract_slices(labelfield: core.Image3) -> None:
slices_xy = core.extract_slices(labelfield, axis=2)
assert slices_xy[0].GetSpacing()[0] == labelfield.GetSpacing()[0]
assert slices_xy[0].GetSpacing()[1] == labelfield.GetSpacing()[1]
for k, slice in enumerate(slices_xy):
print(type(slice))
slice_view = itk.array_view_from_image(slice)
assert np.all(slice_view == k)
def test_pad_crop_center(labelfield: core.Image3) -> None:
padded = core.pad(labelfield, target_size=(9, 9, 9))
cropped = core.crop_center(padded, target_size=(5, 5, 5))
assert labelfield.GetSpacing() == cropped.GetSpacing()
assert labelfield.GetOrigin() == cropped.GetOrigin()
assert np.all(core.as_array(cropped) == core.as_array(labelfield))
slice = core.crop_center(labelfield, target_size=(5, 5, 1))
size = itk.size(slice)
assert size[2] == 1
def test_resample() -> None:
image = make_image(shape=(3, 3), spacing=(2.0, 2.0), value=1.0, pixel_type=itk.F)
image[1, 1] = 0.0
# double the resolution from (2.0, 2.0) to (1.0, 1.0)
res = core.resample(image, target_spacing=(1.0, 1.0))
assert list(res.shape) == [2 * s for s in image.shape]
| python |
"""
Fetch dependencies and build a Windows wheel
============================================
This script depends on pycairo being installed to provide cairo.dll; cairo.dll
must have been built with FreeType support.
The cairo headers (and their dependencies) are fetched from the Arch Linux
repositories (the official cairo release tarball contains unbuilt headers (e.g.
missing cairo-features.h) and is huge due to the presence of test baseline
images). The FreeType headers and binary are fetched from the "official"
build__ listed on FreeType's website.
__ https://github.com/ubawurinna/freetype-windows-binaries
"""
from ctypes import (
c_bool, c_char_p, c_ulong, c_void_p, c_wchar_p, POINTER,
byref, create_unicode_buffer, sizeof, windll)
import os
from pathlib import Path
import shutil
import subprocess
import sys
import urllib.request
import cairo # Needed to load the cairo dll.
import setuptools
def enum_process_modules(func_name=None):
k32 = windll.kernel32
psapi = windll.psapi
k32.GetCurrentProcess.restype = c_void_p
k32.GetModuleFileNameW.argtypes = [c_void_p, c_wchar_p, c_ulong]
k32.GetModuleFileNameW.restype = c_ulong
k32.GetProcAddress.argtypes = [c_void_p, c_char_p]
k32.GetProcAddress.restypes = c_void_p
psapi.EnumProcessModules.argtypes = [
c_void_p, POINTER(c_void_p), c_ulong, POINTER(c_ulong)]
psapi.EnumProcessModules.restype = c_bool
process = k32.GetCurrentProcess()
needed = c_ulong()
psapi.EnumProcessModules(process, None, 0, byref(needed))
modules = (c_void_p * (needed.value // sizeof(c_void_p)))()
if not psapi.EnumProcessModules(
process, modules, sizeof(modules), byref(needed)):
raise OSError("Failed to enumerate process modules")
path = create_unicode_buffer(1024)
for module in modules:
if func_name is None or k32.GetProcAddress(module, func_name):
k32.GetModuleFileNameW(module, path, len(path))
yield path.value
# Prepare the directories.
os.chdir(Path(__file__).resolve().parents[1])
Path("build").mkdir(exist_ok=True)
# Download the cairo headers from Arch Linux (<1Mb, vs >40Mb for the official
# tarball, which contains baseline images) from before Arch switched to zstd,
# and the "official" FreeType build.
os.chdir("build")
urls = {
Path("cairo.txz"):
"https://archive.org/download/archlinux_pkg_cairo/"
"cairo-1.17.2%2B17%2Bg52a7c79fd-2-x86_64.pkg.tar.xz",
Path("fontconfig.txz"):
"https://archive.org/download/archlinux_pkg_fontconfig/"
"fontconfig-2%3A2.13.91%2B24%2Bg75eadca-1-x86_64.pkg.tar.xz",
Path("freetype.zip"):
"https://github.com/ubawurinna/freetype-windows-binaries/"
"releases/download/v2.9.1/freetype-2.9.1.zip",
}
for archive_path, url in urls.items():
if not archive_path.exists():
with urllib.request.urlopen(url) as request:
archive_path.write_bytes(request.read())
dest = archive_path.stem
shutil.rmtree(dest, ignore_errors=True)
shutil.unpack_archive(archive_path, dest)
# Get cairo.dll (normally loaded by pycairo), checking that it include
# FreeType support.
Path("cairo/win64").mkdir(parents=True)
cairo_dll, = enum_process_modules(b"cairo_ft_font_face_create_for_ft_face")
shutil.copyfile(cairo_dll, "cairo/win64/cairo.dll")
# Get hold of a CCompiler object, by creating a dummy Distribution with a list
# of extension modules that claims to be truthy (but is actually empty) and
# running its build_ext command. Prior to the deprecation of distutils, this
# was just ``cc = distutils.ccompiler.new_compiler(); cc.initialize()``.
class L(list): __bool__ = lambda self: True
be = setuptools.Distribution({"ext_modules": L()}).get_command_obj("build_ext")
be.finalize_options()
be.run()
cc = be.compiler
cc.initialize()
# Build the import library.
cc.spawn(
["dumpbin", "/EXPORTS", "/OUT:cairo/win64/cairo.exports",
"cairo/win64/cairo.dll"])
with open("cairo/win64/cairo.exports") as raw_exports, \
open("cairo/win64/cairo.def", "x") as def_file:
def_file.write("EXPORTS\n")
for line in raw_exports:
try:
ordinal, hint, rva, name = line.split()
int(ordinal)
int(hint, 16)
int(rva, 16)
except ValueError:
continue
def_file.write(name + "\n")
cc.spawn(
["lib", f"/DEF:{def_file.name}", "/MACHINE:x64",
"/OUT:cairo/win64/cairo.lib"])
# Build the wheel.
os.chdir("..")
subprocess.run(
[sys.executable, "-mpip", "install", "--upgrade", "pip", "wheel"],
check=True)
os.environ.update(
CL=(f"{os.environ.get('CL', '')} "
f"/I{Path()}/build/cairo/usr/include/cairo "
f"/I{Path()}/build/fontconfig/usr/include "
f"/I{Path()}/build/freetype/include "),
LINK=(f"{os.environ.get('LINK', '')} "
f"/LIBPATH:{Path()}/build/cairo/win64 "
f"/LIBPATH:{Path()}/build/freetype/win64 "),
)
subprocess.run(
[sys.executable, "setup.py", "bdist_wheel"],
check=True)
| python |
#!/usr/bin/env python3
# encoding: utf-8
"""
This module contains unit tests for the arc.main module
"""
import os
import shutil
import unittest
from arc.common import ARC_PATH
from arc.exceptions import InputError
from arc.imports import settings
from arc.main import ARC, StatmechEnum, process_adaptive_levels
from arc.species.species import ARCSpecies
servers = settings['servers']
class TestEnumerationClasses(unittest.TestCase):
"""
Contains unit tests for various enumeration classes.
"""
def test_statmech_enum(self):
"""Test the StatmechEnum class"""
self.assertEqual(StatmechEnum('arkane').value, 'arkane')
with self.assertRaises(ValueError):
StatmechEnum('wrong')
class TestARC(unittest.TestCase):
"""
Contains unit tests for the ARC class
"""
@classmethod
def setUpClass(cls):
"""
A method that is run before all unit tests in this class.
"""
cls.maxDiff = None
cls.servers = servers.keys()
cls.job_types1 = {'conformers': True,
'opt': True,
'fine_grid': False,
'freq': True,
'sp': True,
'rotors': False,
'orbitals': False,
'lennard_jones': False,
'bde': True,
}
def test_as_dict(self):
"""Test the as_dict() method of ARC"""
spc1 = ARCSpecies(label='spc1',
smiles='CC',
compute_thermo=False,
)
arc0 = ARC(project='arc_test',
job_types=self.job_types1,
species=[spc1],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)',
three_params=False,
)
arc0.freq_level.args['keyword']['general'] = 'scf=(NDump=30)'
restart_dict = arc0.as_dict()
long_thermo_description = restart_dict['species'][0]['long_thermo_description']
self.assertIn('Bond corrections:', long_thermo_description)
self.assertIn("'C-C': 1", long_thermo_description)
self.assertIn("'C-H': 6", long_thermo_description)
# mol.atoms are not tested since all id's (including connectivity) changes depending on how the test is run.
expected_dict = {'T_count': 50,
'T_max': None,
'T_min': None,
'allow_nonisomorphic_2d': False,
'arkane_level_of_theory': {'basis': 'cc-pvdz-f12',
'method': 'ccsd(t)-f12',
'method_type': 'wavefunction',
'software': 'molpro'},
'calc_freq_factor': True,
'compute_transport': False,
'conformer_level': {'basis': 'def2svp',
'compatible_ess': ['gaussian', 'terachem'],
'method': 'wb97xd',
'method_type': 'dft',
'software': 'gaussian'},
'e_confs': 5.0,
'ess_settings': {'gaussian': ['local', 'server2'],
'molpro': ['local', 'server2'],
'onedmin': ['server1'],
'orca': ['local'],
'qchem': ['server1'],
'terachem': ['server1']},
'freq_level': {'basis': '6-311+g(3df,2p)',
'method': 'b3lyp',
'method_type': 'dft',
'software': 'gaussian'},
'freq_scale_factor': 0.967,
'irc_level': {'basis': 'def2tzvp',
'compatible_ess': ['gaussian', 'terachem'],
'method': 'wb97xd',
'method_type': 'dft',
'software': 'gaussian'},
'job_memory': 14,
'job_types': {'bde': True,
'conformers': True,
'fine': False,
'freq': True,
'irc': True,
'onedmin': False,
'opt': True,
'orbitals': False,
'rotors': False,
'sp': True},
'kinetics_adapter': 'arkane',
'max_job_time': 120,
'n_confs': 10,
'opt_level': {'basis': '6-311+g(3df,2p)',
'method': 'b3lyp',
'method_type': 'dft',
'software': 'gaussian'},
'output': {},
'project': 'arc_test',
'reactions': [],
'running_jobs': {},
'sp_level': {'basis': 'cc-pvdz-f12',
'method': 'ccsd(t)-f12',
'method_type': 'wavefunction',
'software': 'molpro'},
'species': [{'arkane_file': None,
'bond_corrections': {'C-C': 1, 'C-H': 6},
'charge': 0,
'compute_thermo': False,
'consider_all_diastereomers': True,
'force_field': 'MMFF94s',
'is_ts': False,
'label': 'spc1',
'long_thermo_description': long_thermo_description,
'mol': {'atoms': restart_dict['species'][0]['mol']['atoms'],
'multiplicity': 1,
'props': {}},
'multiplicity': 1,
'number_of_rotors': 0}],
'thermo_adapter': 'arkane',
'three_params': False}
# import pprint # left intentionally for debugging
# print(pprint.pprint(restart_dict))
self.assertEqual(restart_dict, expected_dict)
def test_from_dict(self):
"""Test the from_dict() method of ARC"""
restart_dict = {'composite_method': '',
'conformer_level': 'b97-d3/6-311+g(d,p)',
'freq_level': 'wb97x-d3/6-311+g(d,p)',
'freq_scale_factor': 0.96,
'opt_level': 'wb97x-d3/6-311+g(d,p)',
'output': {},
'project': 'testing_from_dict',
'reactions': [],
'scan_level': '',
'sp_level': 'ccsd(t)-f12/cc-pvqz-f12',
'species': [{'bond_corrections': {'C-C': 1, 'C-H': 6},
'charge': 1,
'conformer_energies': [],
'conformers': [],
'external_symmetry': 1,
'compute_thermo': False,
'is_ts': False,
'label': 'testing_spc1',
'mol': '1 C u0 p0 c0 {2,S} {3,S} {4,S} {5,S}\n2 C u0 p0 c0 {1,S} {6,S} {7,S} {8,S}'
'\n3 H u0 p0 c0 {1,S}\n4 H u0 p0 c0 {1,S}\n5 H u0 p0 c0 {1,S}\n6 H u0 p0 '
'c0 {2,S}\n7 H u0 p0 c0 {2,S}\n8 H u0 p0 c0 {2,S}\n',
'multiplicity': 1,
'neg_freqs_trshed': [],
'number_of_rotors': 0,
'opt_level': '',
'optical_isomers': 1,
'rotors_dict': {},
'xyzs': []}],
'three_params': False,
'project_directory': os.path.join(ARC_PATH, 'Projects',
'arc_project_for_testing_delete_after_usage_test_from_dict'),
}
arc1 = ARC(project='wrong', freq_scale_factor=0.95)
self.assertEqual(arc1.freq_scale_factor, 0.95) # user input
arc2 = ARC(**restart_dict)
self.assertEqual(arc2.freq_scale_factor, 0.96) # loaded from the restart dict
self.assertEqual(arc2.project, 'testing_from_dict')
self.assertIn('arc_project_for_testing_delete_after_usage', arc2.project_directory)
self.assertTrue(arc2.job_types['fine'])
self.assertTrue(arc2.job_types['rotors'])
self.assertEqual(arc2.sp_level.simple(), 'ccsd(t)-f12/cc-pvqz-f12')
self.assertEqual(arc2.level_of_theory, '')
self.assertEqual(arc2.species[0].label, 'testing_spc1')
self.assertFalse(arc2.species[0].is_ts)
self.assertEqual(arc2.species[0].charge, 1)
self.assertFalse(arc2.three_params)
def test_from_dict_specific_job(self):
"""Test the from_dict() method of ARC"""
restart_dict = {'specific_job_type': 'bde',
'project': 'unit_test_specific_job',
'project_directory': os.path.join(ARC_PATH, 'Projects', 'unit_test_specific_job'),
}
arc1 = ARC(**restart_dict)
job_type_expected = {'conformers': False, 'opt': True, 'freq': True, 'sp': True, 'rotors': False,
'orbitals': False, 'bde': True, 'onedmin': False, 'fine': True, 'irc': False}
self.assertEqual(arc1.job_types, job_type_expected)
def test_check_project_name(self):
"""Test project name invalidity"""
with self.assertRaises(InputError):
ARC(project='ar c')
with self.assertRaises(InputError):
ARC(project='ar:c')
with self.assertRaises(InputError):
ARC(project='ar<c')
with self.assertRaises(InputError):
ARC(project='ar%c')
def test_determine_model_chemistry_and_freq_scale_factor(self):
"""Test determining the model chemistry and the frequency scaling factor"""
arc0 = ARC(project='arc_model_chemistry_test', level_of_theory='CBS-QB3')
self.assertEqual(str(arc0.arkane_level_of_theory), "cbs-qb3, software: gaussian (composite)")
self.assertEqual(arc0.freq_scale_factor, 1.00386) # 0.99 * 1.014 = 1.00386
arc1 = ARC(project='arc_model_chemistry_test',
level_of_theory='cbs-qb3-paraskevas')
self.assertEqual(str(arc1.arkane_level_of_theory), 'cbs-qb3-paraskevas, software: gaussian (composite)')
self.assertEqual(arc1.freq_scale_factor, 1.00386) # 0.99 * 1.014 = 1.00386
self.assertEqual(arc1.bac_type, 'p')
arc2 = ARC(project='arc_model_chemistry_test',
level_of_theory='ccsd(t)-f12/cc-pvtz-f12//m06-2x/cc-pvtz')
self.assertEqual(str(arc2.arkane_level_of_theory), 'ccsd(t)-f12/cc-pvtz-f12, software: molpro (wavefunction)')
self.assertEqual(arc2.freq_scale_factor, 0.955)
arc3 = ARC(project='arc_model_chemistry_test',
sp_level='ccsd(t)-f12/cc-pvtz-f12', opt_level='wb97xd/def2tzvp')
self.assertEqual(str(arc3.arkane_level_of_theory), 'ccsd(t)-f12/cc-pvtz-f12, software: molpro (wavefunction)')
self.assertEqual(arc3.freq_scale_factor, 0.988)
def test_determine_model_chemistry_for_job_types(self):
"""Test determining the model chemistry specification dictionary for job types"""
# Test conflicted inputs: specify both level_of_theory and composite_method
with self.assertRaises(InputError):
ARC(project='test', level_of_theory='ccsd(t)-f12/cc-pvtz-f12//wb97x-d/aug-cc-pvtz',
composite_method='cbs-qb3')
# Test illegal level of theory specification (method contains multiple slashes)
with self.assertRaises(ValueError):
ARC(project='test', level_of_theory='dlpno-mp2-f12/D/cc-pVDZ(fi/sf/fw)//b3lyp/G/def2svp')
# Test illegal job level specification (method contains multiple slashes)
with self.assertRaises(ValueError):
ARC(project='test', opt_level='b3lyp/d/def2tzvp/def2tzvp/c')
# Test illegal job level specification (method contains empty space)
with self.assertRaises(ValueError):
ARC(project='test', opt_level='b3lyp/def2tzvp def2tzvp/c')
# Test direct job level specification conflicts with level of theory specification
with self.assertRaises(InputError):
ARC(project='test', level_of_theory='b3lyp/sto-3g', opt_level='wb97xd/def2tzvp')
# Test deduce levels from default method from settings.py
arc1 = ARC(project='test')
self.assertEqual(arc1.opt_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(arc1.freq_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(arc1.sp_level.simple(), 'ccsd(t)-f12/cc-pvtz-f12')
# Test deduce levels from composite method specification
arc2 = ARC(project='test', composite_method='cbs-qb3')
self.assertIsNone(arc2.opt_level)
self.assertIsNone(arc2.sp_level)
self.assertIsNone(arc2.orbitals_level)
self.assertEqual(arc2.freq_level.simple(), 'b3lyp/cbsb7')
self.assertEqual(arc2.scan_level.simple(), 'b3lyp/cbsb7')
self.assertEqual(arc2.composite_method.simple(), 'cbs-qb3')
# Test deduce levels from level of theory specification
arc3 = ARC(project='test', level_of_theory='ccsd(t)-f12/cc-pvtz-f12//wb97m-v/def2tzvpd')
self.assertEqual(arc3.opt_level.simple(), 'wb97m-v/def2tzvpd')
self.assertEqual(arc3.freq_level.simple(), 'wb97m-v/def2tzvpd')
self.assertEqual(arc3.sp_level.simple(), 'ccsd(t)-f12/cc-pvtz-f12')
self.assertEqual(arc3.scan_level.simple(), 'wb97m-v/def2tzvpd')
self.assertIsNone(arc3.orbitals_level)
arc4 = ARC(project='test', opt_level='wb97x-d3/6-311++G(3df,3pd)', freq_level='m062x/def2-tzvpp',
sp_level='ccsd(t)f12/aug-cc-pvqz', calc_freq_factor=False)
self.assertEqual(arc4.opt_level.simple(), 'wb97x-d3/6-311++g(3df,3pd)')
self.assertEqual(arc4.freq_level.simple(), 'm062x/def2-tzvpp')
self.assertEqual(arc4.sp_level.simple(), 'ccsd(t)f12/aug-cc-pvqz')
# Test deduce freq level from opt level
arc7 = ARC(project='test', opt_level='wb97xd/aug-cc-pvtz', calc_freq_factor=False)
self.assertEqual(arc7.opt_level.simple(), 'wb97xd/aug-cc-pvtz')
self.assertEqual(arc7.freq_level.simple(), 'wb97xd/aug-cc-pvtz')
# Test a level not supported by Arkane does not raise error if compute_thermo is False
arc8 = ARC(project='test', sp_level='method/unsupported', calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc8.sp_level.simple(), 'method/unsupported')
self.assertEqual(arc8.freq_level.simple(), 'wb97xd/def2tzvp')
# Test that a level not supported by Arkane does raise an error if compute_thermo is True (default)
with self.assertRaises(ValueError):
ARC(project='test', sp_level='method/unsupported', calc_freq_factor=False)
# Test dictionary format specification with auxiliary basis and DFT dispersion
arc9 = ARC(project='test', opt_level={},
freq_level={'method': 'B3LYP/G', 'basis': 'cc-pVDZ(fi/sf/fw)', 'auxiliary_basis': 'def2-svp/C',
'dispersion': 'DEF2-tzvp/c'},
sp_level={'method': 'DLPNO-CCSD(T)-F12', 'basis': 'cc-pVTZ-F12',
'auxiliary_basis': 'aug-cc-pVTZ/C cc-pVTZ-F12-CABS'},
calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc9.opt_level.simple(), 'wb97xd/def2tzvp')
self.assertEqual(str(arc9.freq_level), 'b3lyp/g/cc-pvdz(fi/sf/fw), auxiliary_basis: def2-svp/c, '
'dispersion: def2-tzvp/c, software: gaussian (dft)')
self.assertEqual(str(arc9.sp_level),
'dlpno-ccsd(t)-f12/cc-pvtz-f12, auxiliary_basis: aug-cc-pvtz/c cc-pvtz-f12-cabs, '
'software: orca (wavefunction)')
# Test using default frequency and orbital level for composite job, also forbid rotors job
arc10 = ARC(project='test', composite_method='cbs-qb3', calc_freq_factor=False,
job_types={'rotors': False, 'orbitals': True})
self.assertEqual(arc10.freq_level.simple(), 'b3lyp/cbsb7')
self.assertIsNone(arc10.scan_level)
self.assertEqual(arc10.orbitals_level.simple(), 'b3lyp/cbsb7')
# Test using specified frequency, scan, and orbital for composite job
arc11 = ARC(project='test', composite_method='cbs-qb3', freq_level='wb97xd/6-311g', scan_level='apfd/def2svp',
orbitals_level='hf/sto-3g', job_types={'orbitals': True}, calc_freq_factor=False)
self.assertEqual(arc11.scan_level.simple(), 'apfd/def2svp')
self.assertEqual(arc11.freq_level.simple(), 'wb97xd/6-311g')
self.assertEqual(arc11.orbitals_level.simple(), 'hf/sto-3g')
# Test using default frequency and orbital level for job specified from level of theory, also forbid rotors job
arc12 = ARC(project='test', level_of_theory='b3lyp/sto-3g', calc_freq_factor=False,
job_types={'rotors': False, 'orbitals': True}, compute_thermo=False)
self.assertIsNone(arc12.scan_level)
self.assertEqual(arc12.orbitals_level.simple(), 'wb97x-d3/def2tzvp')
# Test using specified scan level
arc13 = ARC(project='test', level_of_theory='b3lyp/sto-3g', calc_freq_factor=False, scan_level='apfd/def2svp',
job_types={'rotors': True}, compute_thermo=False)
self.assertEqual(arc13.scan_level.simple(), 'apfd/def2svp')
# Test specifying semi-empirical and force-field methods using dictionary
arc14 = ARC(project='test', opt_level={'method': 'AM1'}, freq_level={'method': 'PM6'},
sp_level={'method': 'AMBER'}, calc_freq_factor=False, compute_thermo=False)
self.assertEqual(arc14.opt_level.simple(), 'am1')
self.assertEqual(arc14.freq_level.simple(), 'pm6')
self.assertEqual(arc14.sp_level.simple(), 'amber')
def test_determine_unique_species_labels(self):
"""Test the determine_unique_species_labels method"""
spc0 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
spc1 = ARCSpecies(label='spc1', smiles='CC', compute_thermo=False)
spc2 = ARCSpecies(label='spc2', smiles='CC', compute_thermo=False)
arc0 = ARC(project='arc_test', job_types=self.job_types1, species=[spc0, spc1, spc2],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
self.assertEqual(arc0.unique_species_labels, ['spc0', 'spc1', 'spc2'])
spc3 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
arc0.species.append(spc3)
with self.assertRaises(ValueError):
arc0.determine_unique_species_labels()
def test_add_hydrogen_for_bde(self):
"""Test the add_hydrogen_for_bde method"""
spc0 = ARCSpecies(label='spc0', smiles='CC', compute_thermo=False)
arc0 = ARC(project='arc_test', job_types=self.job_types1, species=[spc0],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
arc0.add_hydrogen_for_bde()
self.assertEqual(len(arc0.species), 1)
spc1 = ARCSpecies(label='spc1', smiles='CC', compute_thermo=False, bdes=['all_h'])
arc1 = ARC(project='arc_test', job_types=self.job_types1, species=[spc1],
level_of_theory='ccsd(t)-f12/cc-pvdz-f12//b3lyp/6-311+g(3df,2p)')
arc1.add_hydrogen_for_bde()
self.assertEqual(len(arc1.species), 2)
self.assertIn('H', [spc.label for spc in arc1.species])
def test_process_adaptive_levels(self):
"""Test processing the adaptive levels"""
adaptive_levels_1 = {(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 15): {('opt', 'freq'): 'b3lyp/cbsb7',
'sp': 'dlpno-ccsd(t)/def2-tzvp'},
(16, 30): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': {'method': 'wb97xd', 'basis': '6-311+g(2d,2p)'}},
(31, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}}
processed_1 = process_adaptive_levels(adaptive_levels_1)
self.assertEqual(processed_1[(6, 15)][('sp',)].simple(), 'dlpno-ccsd(t)/def2-tzvp')
self.assertEqual(processed_1[(16, 30)][('sp',)].simple(), 'wb97xd/6-311+g(2d,2p)')
# test non dict
with self.assertRaises(InputError):
process_adaptive_levels(4)
# wrong atom range
with self.assertRaises(InputError):
process_adaptive_levels({5: {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
# no 'inf
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 75): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
# adaptive level not a dict
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(6, 'inf'): 'b3lyp/6-31g(d,p)'})
# non-consecutive atom ranges
with self.assertRaises(InputError):
process_adaptive_levels({(1, 5): {('opt', 'freq'): 'wb97xd/6-311+g(2d,2p)',
('sp',): 'ccsd(t)-f12/aug-cc-pvtz-f12'},
(15, 'inf'): {('opt', 'freq'): 'b3lyp/6-31g(d,p)',
'sp': 'b3lyp/6-311+g(d,p)'}})
@classmethod
def tearDownClass(cls):
"""
A function that is run ONCE after all unit tests in this class.
Delete all project directories created during these unit tests
"""
projects = ['arc_project_for_testing_delete_after_usage_test_from_dict',
'arc_model_chemistry_test', 'arc_test', 'test', 'unit_test_specific_job', 'wrong']
for project in projects:
project_directory = os.path.join(ARC_PATH, 'Projects', project)
shutil.rmtree(project_directory, ignore_errors=True)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(verbosity=2))
| python |
b = 1
for i in range(100000):
b += i * b
print(b)
| python |
import sys, os, math, random, time, zlib, secrets, threading, time, asyncio
async def say_after(delay, what):
await asyncio.sleep(delay)
return what
async def main():
taskvec=[]
for i in range(10):
taskvec.append(asyncio.create_task(say_after(i,str(i))))
print(f"started at {time.strftime('%X')}")
for task in taskvec:
print(await task)
print(f"finished at {time.strftime('%X')}")
asyncio.run(main()) | python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
DEMNet, WithLossCell and TrainOneStepCell
"""
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.context as context
from mindspore.common.initializer import Normal
from mindspore.ops import operations as P
from mindspore.ops import composite as C
from mindspore.ops import functional as F
from mindspore.parallel._utils import _get_gradients_mean, _get_parallel_mode, _get_device_num
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
class MyTanh(nn.Cell):
def __init__(self):
super(MyTanh, self).__init__()
self.tanh = P.Tanh()
def construct(self, x):
return 1.7159 * self.tanh(2 * x / 3)
class DEMNet1(nn.Cell):
"""cub+att"""
def __init__(self):
super(DEMNet1, self).__init__()
self.relu = nn.ReLU()
self.fc1 = nn.Dense(312, 700, weight_init=Normal(0.0008))
self.fc2 = nn.Dense(700, 1024, weight_init=Normal(0.0012))
def construct(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return x
class DEMNet2(nn.Cell):
"""awa+att"""
def __init__(self):
super(DEMNet2, self).__init__()
self.relu = nn.ReLU()
self.fc1 = nn.Dense(85, 700, weight_init=Normal(0.0005))
self.fc2 = nn.Dense(700, 1024, weight_init=Normal(0.0005))
def construct(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return x
class DEMNet3(nn.Cell):
"""awa+word"""
def __init__(self):
super(DEMNet3, self).__init__()
self.relu = nn.ReLU()
self.fc1 = nn.Dense(1000, 1024, weight_init=Normal(0.0005))
def construct(self, x):
x = self.relu(self.fc1(x))
return x
class DEMNet4(nn.Cell):
"""awa+fusion"""
def __init__(self):
super(DEMNet4, self).__init__()
self.relu = nn.ReLU()
self.tanh = MyTanh()
self.fc1 = nn.Dense(1000, 900, weight_init=Normal(0.0008))
self.fc2 = nn.Dense(85, 900, weight_init=Normal(0.0012))
self.fc3 = nn.Dense(900, 1024, weight_init=Normal(0.0012))
def construct(self, att, word):
word = self.tanh(self.fc1(word))
att = self.tanh(self.fc2(att))
fus = word + 3 * att
fus = self.relu(self.fc3(fus))
return fus
class MyWithLossCell(nn.Cell):
def __init__(self, backbone, loss_fn):
super(MyWithLossCell, self).__init__(auto_prefix=False)
self._backbone = backbone
self._loss_fn = loss_fn
def construct(self, data1, data2, label):
out = self._backbone(data1, data2)
return self._loss_fn(out, label)
class MyTrainOneStepCell(nn.Cell):
"""custom TrainOneStepCell"""
def __init__(self, network, optimizer, sens=1.0):
super(MyTrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.network.add_flags(defer_inline=True)
self.weights = optimizer.parameters
self.optimizer = optimizer
self.grad = C.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.reducer_flag = False
self.grad_reducer = F.identity
self.parallel_mode = _get_parallel_mode()
if self.parallel_mode in (context.ParallelMode.DATA_PARALLEL, context.ParallelMode.HYBRID_PARALLEL):
self.reducer_flag = True
if self.reducer_flag:
mean = _get_gradients_mean()
degree = _get_device_num()
self.grad_reducer = DistributedGradReducer(self.weights, mean, degree)
def construct(self, *inputs):
weights = self.weights
loss = self.network(*inputs)
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(*inputs, sens)
grads = self.grad_reducer(grads)
grads = ops.clip_by_global_norm(grads, 0.2)
self.optimizer(grads)
return loss
| python |
from django.urls import reverse
from rest_framework import status
from django.test import TestCase
from .models import CustomUser
from .serializers import UserDetailsSerializer
from rest_framework.test import APIClient
REGISTRATION_URL = reverse('rest_register')
LOGIN_URL = reverse('rest_login')
PASSWORD_CHANGE_URL = reverse('rest_password_change')
USER_DETAIL_URL = reverse('rest_user_details')
LOGOUT_URL = reverse('rest_logout')
class UsersApiTest(TestCase):
def setUp(self):
self.client = APIClient()
response = self.client.post(REGISTRATION_URL, {
'email': '[email protected]',
'password1': 'hakunamatata',
'password2': 'hakunamatata',
'first_name' : 'john',
'last_name': 'doe'
}, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + response.data['token'])
def test_password_change(self):
response = self.client.post(PASSWORD_CHANGE_URL, {
'new_password1': 'hdgstgehst01',
'new_password2': 'hdgstgehst01',
}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_user_details(self):
response = self.client.get(USER_DETAIL_URL)
user_details = CustomUser.objects.get(email='[email protected]')
serializer = UserDetailsSerializer(user_details)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
def test_logout(self):
response = self.client.post(LOGOUT_URL)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.logout()
response = self.client.get(USER_DETAIL_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| python |
print("Please give termination parameter as zero to proceed with number of iterations.")
x = list()
x1, d, e = map(float, input(
"Enter initial point, delta and termination parameter: ").split())
expr = input("Enter expression for x: ")
print("Please give no. of iterations as considerably a high number to check with termination parameter.")
j = int(input("Enter number of iterations to be performed: "))
x2 = x1+d
x = x1
f1 = eval(expr)
x = x2
f2 = eval(expr)
if(f1 >= f2):
x3 = x1+2*d
else:
x3 = x1-d
x = x3
f3 = eval(expr)
c = 0
while(True):
Fmin = min(f1, f2, f3)
if(Fmin == f1):
xmin = x1
elif(Fmin == f2):
xmin = x2
else:
xmin = x3
a0 = f1
a1 = (f2-f1)/(x2-x1)
a2 = (1/(x3-x2))*(((f3-f1)/(x3-x1))-a1)
xbar = (x1+x2)/2-(a1/(2*a2))
x = xbar
fxbar = eval(expr)
xlist = [x1, x2, x3, xbar]
flist = [f1, f2, f3, fxbar]
sortlist = sorted(flist)
newx = list()
newf = list()
for i in range(3):
# flist.index(sortlist[i]) returns index of corresponding f element in original list
newx.append(xlist[flist.index(sortlist[i])])
newx = sorted(newx)
for i in range(3):
# xlist.index(newx[i]) returns index of corresponding x element in original list
newf.append(flist[xlist.index(newx[i])])
x1, x2, x3 = newx
f1, f2, f3 = newf
#print("x values are",x1," ",x2," ",x3)
newmin = xlist[flist.index(sortlist[0])]
#print("new min is ",newmin)
c += 1
if((abs(Fmin-fxbar) < e and abs(xmin-xbar) < e)or c >= j):
break
print("Point corresponding to x=", round(
newmin, 5), " is the minimum of the function.")
| python |
from bs4 import BeautifulSoup
from faker import Faker
import requests
class faceFarm():
def __init__(self) -> None:
super(faceFarm, self).__init__()
self.requests = requests.Session()
pass
def request(self, method, url, **kwargs):
try:
return self.requests.request(method, url, timeout=(10, 30), **kwargs)
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout) as e:
return e
def identifyEmail(self, email):
url = "https://m.facebook.com/login/identify/"
page = self.request("GET", url, params={
"ctx": "recover",
"c": "/login/",
"search_attempts": "1",
"ars": "facebook_login",
"alternate_search": "0",
"show_friend_search_filtered_list": "0",
"birth_month_search": "0",
"city_search": "0"
})
soup = BeautifulSoup(page.text, "html.parser")
lsd = soup.find("input", {"name": "lsd"})["value"]
jazoest = soup.find("input", {"name": "jazoest"})["value"]
page = self.request("POST", url, params={
"ctx": "recover",
"c": "/login/",
"search_attempts": "1",
"ars": "facebook_login",
"alternate_search": "0",
"show_friend_search_filtered_list": "0",
"birth_month_search": "0",
"city_search": "0"
}, data={
"lsd": lsd,
"jazoest": jazoest,
"email": email,
"did_submit": "Cari"
})
soup = BeautifulSoup(page.text, "html.parser")
login_identify_search_error_msg = soup.find(
"div", {"id": "login_identify_search_error_msg"})
if not login_identify_search_error_msg:
status = soup.find("title").get_text()
print(
"[*] Email Address : {}\n[*] Status : {}\n[+] Saved to 'vuln.txt'.\n".format(email, status))
with open("vuln.txt", "a", encoding="utf-8") as fp:
fp.write(email + "\n")
else:
status = soup.find("title").get_text()
detail_status = login_identify_search_error_msg.get_text()
print("[*] Email Address : {}\n[*] Status : {}\n[*] Detail Status : {}\n".format(
email, status, detail_status))
pass
if __name__ == "__main__":
faceFarmASCII = """ __ ___
/ _|__ _ __ ___| __|_ _ _ _ _ __
| _/ _` / _/ -_) _/ _` | '_| ' \
|_| \__,_\__\___|_|\__,_|_| |_|_|_|
faceFarm - Email Detector for Facebook
"""
print(faceFarmASCII)
faceFarm = faceFarm()
while True:
fake = Faker()
emailAddr = fake.email().split("@")[0] + "@yahoo.com"
faceFarm.identifyEmail(emailAddr)
| python |
var1 = int(input('Digite um número: '))
print('Analizando o valor {}, seu antecessor é o {} e o seu sucessor é {}'.format(var1, var1-1, var1+1))
| python |
import pathlib
import aiosql
queries = aiosql.from_path(pathlib.Path(__file__).parent / "sql", "asyncpg")
| python |
dollars=eval(input("Enter in a value of Dollars:"))
def main():
euros=dollars*0.8007
euros=round(euros,2)
print("That is exactly",euros,"euros.")
main() | python |
import logging
from functools import partial
from typing import TYPE_CHECKING, Optional
from magicgui.widgets import create_widget
from napari.qt.threading import thread_worker
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtCore import QEvent, Qt
from qtpy.QtWidgets import (
QCheckBox,
QFormLayout,
QPushButton,
QSlider,
QSpinBox,
QVBoxLayout,
QWidget,
)
from napari_basicpy._mock_basic import MockBaSiC as BaSiC
if TYPE_CHECKING:
import napari # pragma: no cover
logger = logging.getLogger(__name__)
class BasicWidget(QWidget):
"""Example widget class."""
def __init__(self, viewer: "napari.viewer.Viewer"):
"""Init example widget."""
super().__init__()
self.viewer = viewer
self.setLayout(QVBoxLayout())
self.layer_select = create_widget(
annotation="napari.layers.Layer", label="image_layer"
)
self.layout().addWidget(self.layer_select.native)
settings_layout = QFormLayout()
settings_layout.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow)
settings_layout.addRow("Setting 1", QSpinBox())
settings_layout.addRow("Setting 2", QSlider(Qt.Horizontal))
settings_layout.addRow("Setting 3", QCheckBox())
settings_layout.addRow("Setting 4", QCheckBox())
self.settings_container = QWidget()
self.settings_container.setLayout(settings_layout)
self.run_btn = QPushButton("Run")
self.run_btn.clicked.connect(self._run)
self.cancel_btn = QPushButton("Cancel")
self.layout().addWidget(self.settings_container)
self.layout().addWidget(self.run_btn)
self.layout().addWidget(self.cancel_btn)
def _run(self):
def update_layer(image):
try:
self.viewer.layers["result"].data = image
except KeyError:
self.viewer.add_image(image, name="result")
@thread_worker(
start_thread=False,
connect={"yielded": update_layer, "returned": update_layer},
)
def call_basic(image):
basic = BaSiC()
fit = basic.fit(image, updates=True)
while True:
try:
yield next(fit)
except StopIteration as final:
return final.value
logger.info("Starting BaSiC")
data = self.layer_select.value.data
worker = call_basic(data)
self.cancel_btn.clicked.connect(partial(self._cancel, worker=worker))
worker.finished.connect(self.cancel_btn.clicked.disconnect)
worker.start()
def _cancel(self, worker):
logger.info("Canceling BasiC")
worker.quit()
def showEvent(self, event: QEvent) -> None: # noqa: D102
super().showEvent(event)
self.reset_choices()
def reset_choices(self, event: Optional[QEvent] = None) -> None:
"""Repopulate image list."""
self.layer_select.reset_choices(event)
@napari_hook_implementation
def napari_experimental_provide_dock_widget(): # noqa
return [BasicWidget]
| python |
"""Methods for projecting a feature space to lower dimensionality."""
from .factory import create_projector, IDENTIFIERS, DEFAULT_IDENTIFIER # noqa: F401
from .projector import Projector # noqa: F401
| python |
import numpy as np
from sklearn.metrics import pairwise_distances
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances, haversine_distances, chi2_kernel, \
manhattan_distances
class Similarity(object):
"""
Simple kNN class
"""
def __init__(self, data, user_profile_matrix, item_attribute_matrix, similarity):
self._data = data
self._ratings = data.train_dict
self._user_profile_matrix = user_profile_matrix
self._item_attribute_matrix = item_attribute_matrix
self._similarity = similarity
self._users = self._data.users
self._items = self._data.items
self._private_users = self._data.private_users
self._public_users = self._data.public_users
self._private_items = self._data.private_items
self._public_items = self._data.public_items
def initialize(self):
"""
This function initialize the data model
"""
supported_similarities = ["cosine", "dot", ]
supported_dissimilarities = ["euclidean", "manhattan", "haversine", "chi2", 'cityblock', 'l1', 'l2', 'braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
print(f"\nSupported Similarities: {supported_similarities}")
print(f"Supported Distances/Dissimilarities: {supported_dissimilarities}\n")
self._transactions = self._data.transactions
self._similarity_matrix = np.empty((len(self._users), len(self._items)))
self.process_similarity(self._similarity)
def process_similarity(self, similarity):
if similarity == "cosine":
self._similarity_matrix = cosine_similarity(self._user_profile_matrix, self._item_attribute_matrix)
elif similarity == "dot":
self._similarity_matrix = (self._data.sp_i_train_ratings @ self._data.sp_i_train_ratings.T).toarray()
elif similarity == "euclidean":
self._similarity_matrix = (1 / (1 + euclidean_distances(self._user_profile_matrix, self._item_attribute_matrix)))
elif similarity == "manhattan":
self._similarity_matrix = (1 / (1 + manhattan_distances(self._user_profile_matrix, self._item_attribute_matrix)))
elif similarity == "haversine":
self._similarity_matrix = (1 / (1 + haversine_distances(self._user_profile_matrix, self._item_attribute_matrix)))
elif similarity == "chi2":
self._similarity_matrix = (1 / (1 + chi2_kernel(self._user_profile_matrix, self._item_attribute_matrix)))
elif similarity in ['cityblock', 'l1', 'l2']:
self._similarity_matrix = (1 / (1 + pairwise_distances(self._user_profile_matrix, self._item_attribute_matrix, metric=similarity)))
elif similarity in ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']:
self._similarity_matrix = (1 / (1 + pairwise_distances(self._user_profile_matrix.toarray(), self._item_attribute_matrix.toarray(), metric=similarity)))
else:
raise Exception("Not implemented similarity")
def get_transactions(self):
return self._transactions
def get_user_recs(self, u, k):
user_items = self._ratings[u].keys()
indexed_user_items = [self._public_items[i] for i in user_items]
predictions = {self._private_items[i]: v for i, v in enumerate(self._similarity_matrix[self._public_users[u]]) if i not in indexed_user_items}
indices, values = zip(*predictions.items())
indices = np.array(indices)
values = np.array(values)
partially_ordered_preds_indices = np.argpartition(values, -k)[-k:]
real_values = values[partially_ordered_preds_indices]
real_indices = indices[partially_ordered_preds_indices]
local_top_k = real_values.argsort()[::-1]
return [(real_indices[item], real_values[item]) for item in local_top_k]
def get_model_state(self):
saving_dict = {}
saving_dict['_neighbors'] = self._neighbors
saving_dict['_similarity'] = self._similarity
saving_dict['_num_neighbors'] = self._num_neighbors
return saving_dict
def set_model_state(self, saving_dict):
self._neighbors = saving_dict['_neighbors']
self._similarity = saving_dict['_similarity']
self._num_neighbors = saving_dict['_num_neighbors']
| python |
from enum import Enum
from typing import Optional, Sequence
from PyQt5 import QtCore, QtWidgets
from electroncash.address import Address, AddressError
from electroncash.consolidate import (
MAX_STANDARD_TX_SIZE,
MAX_TX_SIZE,
AddressConsolidator,
)
from electroncash.constants import PROJECT_NAME, XEC
from electroncash.transaction import Transaction
from electroncash.wallet import Abstract_Wallet
from electroncash_gui.qt.multi_transactions_dialog import MultiTransactionsWidget
class TransactionsStatus(Enum):
INTERRUPTED = "cancelled"
NOT_STARTED = "not started"
SELECTING = "selecting coins..."
BUILDING = "building transactions..."
FINISHED = "finished building transactions"
NO_RESULT = "finished without generating any transactions"
class ConsolidateWorker(QtCore.QObject):
finished = QtCore.pyqtSignal()
status_changed = QtCore.pyqtSignal(TransactionsStatus)
transactions_ready = QtCore.pyqtSignal(list)
"""Emits the list of :class:`Transaction` after the last transaction is
generated."""
progress = QtCore.pyqtSignal(int)
"""Emits the number of generated transactions after each new transaction."""
def __init__(
self,
address: Address,
wallet: Abstract_Wallet,
include_coinbase: bool,
include_non_coinbase: bool,
include_frozen: bool,
include_slp: bool,
minimum_value: Optional[int],
maximum_value: Optional[int],
minimum_height: Optional[int],
maximum_height: Optional[int],
output_address: Address,
max_tx_size: int,
):
super().__init__()
self.status_changed.emit(TransactionsStatus.SELECTING)
self.consolidator = AddressConsolidator(
address,
wallet,
include_coinbase,
include_non_coinbase,
include_frozen,
include_slp,
minimum_value,
maximum_value,
minimum_height,
maximum_height,
output_address,
max_tx_size,
)
self.interrupt_mutex = QtCore.QMutex()
self.interrupt: bool = False
def was_interruption_requested(self) -> bool:
self.interrupt_mutex.lock()
do_interrupt = self.interrupt
self.interrupt_mutex.unlock()
return do_interrupt
def request_interruption(self):
"""Stop the worker as soon as possible (i.e. in-between two
transactions).
This causes the :attr:`status_changed` and :attr:`finished` signals to be
emitted. The :attr:`transactions_ready` signal is not emitted if the worker
is interrupted before it has generated the last transaction.
"""
self.interrupt_mutex.lock()
self.interrupt = True
self.interrupt_mutex.unlock()
def build_transactions(self):
self.status_changed.emit(TransactionsStatus.BUILDING)
transactions = []
for i, tx in enumerate(self.consolidator.iter_transactions()):
if self.was_interruption_requested():
self.status_changed.emit(TransactionsStatus.INTERRUPTED)
self.finished.emit()
return
transactions.append(tx)
self.progress.emit(i + 1)
if transactions:
self.status_changed.emit(TransactionsStatus.FINISHED)
# else the transaction page will set the status to NO_RESULT upon receiving
# an empty list of transactions
self.transactions_ready.emit(transactions)
self.finished.emit()
class ConsolidateCoinsWizard(QtWidgets.QWizard):
def __init__(
self,
address: Address,
wallet: Abstract_Wallet,
main_window,
parent: Optional[QtWidgets.QWidget] = None,
):
super().__init__(parent)
self.setWindowTitle(f"Consolidate coins for address {address.to_ui_string()}")
self.tx_thread: Optional[QtCore.QThread] = None
self.address: Address = address
self.wallet: Abstract_Wallet = wallet
self.transactions: Sequence[Transaction] = []
self.coins_page = CoinSelectionPage()
self.addPage(self.coins_page)
self.output_page = OutputsPage(address)
self.addPage(self.output_page)
self.tx_page = TransactionsPage(wallet, main_window)
self.addPage(self.tx_page)
self.currentIdChanged.connect(self.on_page_changed)
def on_page_changed(self, page_id: int):
# The thread is only supposed to be started after reaching the tx_page,
# and must be stopped if the user decides to go back to a previous page
# or close the dialog.
self.stop_thread_if_running()
if self.currentPage() is self.tx_page:
self.tx_page.update_status(TransactionsStatus.NOT_STARTED)
self.tx_thread = QtCore.QThread()
self.worker = ConsolidateWorker(
self.address,
self.wallet,
self.coins_page.include_coinbase_cb.isChecked(),
self.coins_page.include_non_coinbase_cb.isChecked(),
self.coins_page.include_frozen_cb.isChecked(),
self.coins_page.include_slp_cb.isChecked(),
self.coins_page.get_minimum_value(),
self.coins_page.get_maximum_value(),
self.coins_page.minimum_height_sb.value(),
self.coins_page.maximum_height_sb.value(),
self.output_page.get_output_address(),
self.output_page.tx_size_sb.value(),
)
# Connections
self.worker.moveToThread(self.tx_thread)
self.tx_thread.started.connect(self.worker.build_transactions)
self.worker.status_changed.connect(self.tx_page.update_status)
self.worker.progress.connect(self.tx_page.update_progress)
self.worker.transactions_ready.connect(self.on_build_transactions_finished)
self.worker.finished.connect(self.tx_thread.quit)
self.tx_thread.start()
def stop_thread_if_running(self):
if self.tx_thread is not None and self.tx_thread.isRunning():
self.worker.request_interruption()
self.tx_thread.quit()
def on_build_transactions_finished(self, transactions: Sequence[Transaction]):
self.transactions = transactions
self.tx_page.set_unsigned_transactions(self.transactions)
class AmountSpinBox(QtWidgets.QDoubleSpinBox):
def __init__(self):
super().__init__()
self.setToolTip(f"Amount in {XEC}")
# 0.01 XEC is 1 satoshi
self.setDecimals(2)
self.setStepType(QtWidgets.QAbstractSpinBox.AdaptiveDecimalStepType)
self.setMaximum(21_000_000_000_000)
self.setGroupSeparatorShown(True)
# Enough width to display "21 000 000 000,00":
self.setMinimumWidth(170)
class BlockHeightSpinBox(QtWidgets.QSpinBox):
def __init__(self):
super().__init__()
self.setToolTip("Block height")
# This maximum should give us a useful range of ~20,000 years
self.setMaximum(1_000_000_000)
self.setGroupSeparatorShown(True)
class CoinSelectionPage(QtWidgets.QWizardPage):
def __init__(self, parent=None):
super().__init__(parent)
self.setTitle("Filter coins")
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.include_coinbase_cb = QtWidgets.QCheckBox("Include coinbase coins")
self.include_coinbase_cb.setChecked(True)
layout.addWidget(self.include_coinbase_cb)
self.include_non_coinbase_cb = QtWidgets.QCheckBox("Include non-coinbase coins")
self.include_non_coinbase_cb.setChecked(True)
layout.addWidget(self.include_non_coinbase_cb)
self.include_frozen_cb = QtWidgets.QCheckBox("Include frozen coins")
self.include_frozen_cb.setChecked(False)
layout.addWidget(self.include_frozen_cb)
self.include_slp_cb = QtWidgets.QCheckBox("Include coins with SLP tokens")
self.include_slp_cb.setChecked(False)
self.include_slp_cb.toggled.connect(self.warn_burn_tokens)
layout.addWidget(self.include_slp_cb)
self.minimum_amount_sb = AmountSpinBox()
self.minimum_amount_sb.setValue(5.46)
self.minimum_amount_sb.valueChanged.connect(self.on_min_or_max_amount_changed)
self.filter_by_min_value_cb = self.add_filter_by_value_line(
"Minimum amount (XEC)", self.minimum_amount_sb
)
self.maximum_amount_sb = AmountSpinBox()
self.maximum_amount_sb.setValue(21_000_000_000_000)
self.maximum_amount_sb.valueChanged.connect(self.on_min_or_max_amount_changed)
self.filter_by_max_value_cb = self.add_filter_by_value_line(
"Maximum amount (XEC)", self.maximum_amount_sb
)
self.minimum_height_sb = BlockHeightSpinBox()
self.minimum_height_sb.setValue(0)
self.minimum_height_sb.valueChanged.connect(self.on_min_or_max_height_changed)
self.filter_by_min_height_cb = self.add_filter_by_value_line(
"Minimum block height", self.minimum_height_sb
)
self.maximum_height_sb = BlockHeightSpinBox()
self.maximum_height_sb.setValue(1_000_000)
self.maximum_height_sb.valueChanged.connect(self.on_min_or_max_height_changed)
self.filter_by_max_height_cb = self.add_filter_by_value_line(
"Maximum block height", self.maximum_height_sb
)
def add_filter_by_value_line(
self, label_text: str, value_widget: QtWidgets.QWidget
) -> QtWidgets.QCheckBox:
"""Add a line with a checkbox and a widget to specify a value.
The value widget is enabled when the checkbox is checked.
Return the created QCheckBox instance."""
sublayout = QtWidgets.QHBoxLayout()
self.layout().addLayout(sublayout)
checkbox = QtWidgets.QCheckBox(label_text)
sublayout.addWidget(checkbox)
checkbox.setChecked(False)
value_widget.setEnabled(False)
checkbox.toggled.connect(value_widget.setEnabled)
sublayout.addWidget(value_widget)
return checkbox
def warn_burn_tokens(self, include_slp_is_checked: bool):
if include_slp_is_checked:
button = QtWidgets.QMessageBox.warning(
self,
"SLP tokens may be lost",
f"{PROJECT_NAME} does not support transferring SLP tokens. If you "
"include them in the consolidation transaction, they will be burned.",
buttons=QtWidgets.QMessageBox.Cancel | QtWidgets.QMessageBox.Ok,
)
if button == QtWidgets.QMessageBox.Cancel:
self.include_slp_cb.setChecked(False)
def get_minimum_value(self) -> Optional[int]:
"""Return minimum value in satoshis, or None"""
return (
None
if not self.filter_by_min_value_cb.isChecked()
else int(100 * self.minimum_amount_sb.value())
)
def get_maximum_value(self) -> Optional[int]:
"""Return maximum value in satoshis, or None"""
return (
None
if not self.filter_by_max_value_cb.isChecked()
else int(100 * self.maximum_amount_sb.value())
)
def on_min_or_max_amount_changed(self, *args):
"""Warn if the min-max range is empty"""
if self.minimum_amount_sb.value() > self.maximum_amount_sb.value():
self.minimum_amount_sb.setStyleSheet("color: red;")
self.maximum_amount_sb.setStyleSheet("color: red;")
else:
self.minimum_amount_sb.setStyleSheet("")
self.maximum_amount_sb.setStyleSheet("")
def on_min_or_max_height_changed(self, *args):
"""Warn if the min-max range is empty"""
if self.minimum_height_sb.value() > self.maximum_height_sb.value():
self.minimum_height_sb.setStyleSheet("color: red;")
self.maximum_height_sb.setStyleSheet("color: red;")
else:
self.minimum_height_sb.setStyleSheet("")
self.maximum_height_sb.setStyleSheet("")
class OutputsPage(QtWidgets.QWizardPage):
def __init__(self, input_address: Address, parent=None):
super().__init__(parent)
self.inputs_address: Address = input_address
self.output_address: Optional[Address] = None
self.setTitle("Outputs")
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
layout.addWidget(QtWidgets.QLabel("<h2>Destination address</h2>"))
self.same_address_rb = QtWidgets.QRadioButton("Same address as inputs")
self.same_address_rb.setChecked(True)
layout.addWidget(self.same_address_rb)
single_address_sublayout = QtWidgets.QHBoxLayout()
layout.addLayout(single_address_sublayout)
self.single_address_rb = QtWidgets.QRadioButton("Single address")
single_address_sublayout.addWidget(self.single_address_rb)
self.output_address_edit = QtWidgets.QLineEdit()
self.output_address_edit.setPlaceholderText("enter a valid destination address")
self.output_address_edit.setEnabled(False)
single_address_sublayout.addWidget(self.output_address_edit)
layout.addSpacing(20)
layout.addWidget(QtWidgets.QLabel("<h2>Transaction parameters</h2>"))
tx_size_layout = QtWidgets.QHBoxLayout()
layout.addLayout(tx_size_layout)
tx_size_layout.addWidget(QtWidgets.QLabel("Maximum transaction size (bytes)"))
self.tx_size_sb = QtWidgets.QSpinBox()
self.tx_size_sb.setMinimum(192)
self.tx_size_sb.setMaximum(MAX_TX_SIZE)
self.tx_size_sb.setValue(MAX_STANDARD_TX_SIZE)
tx_size_layout.addWidget(self.tx_size_sb)
self.single_address_rb.toggled.connect(self.output_address_edit.setEnabled)
self.single_address_rb.toggled.connect(self.completeChanged.emit)
self.output_address_edit.textChanged.connect(self.validate_address)
def validate_address(self, address_text: str):
previous_address = self.output_address
try:
self.output_address = Address.from_string(address_text)
except AddressError:
self.output_address = None
if self.output_address != previous_address:
self.completeChanged.emit()
def isComplete(self):
return not self.single_address_rb.isChecked() or self.output_address is not None
def get_output_address(self) -> Address:
return (
self.inputs_address
if self.same_address_rb.isChecked()
else self.output_address
)
class TransactionsPage(QtWidgets.QWizardPage):
def __init__(self, wallet, main_window, parent=None):
super().__init__(parent)
self.status: TransactionsStatus = TransactionsStatus.NOT_STARTED
self.setTitle("Transactions")
layout = QtWidgets.QVBoxLayout()
self.setLayout(layout)
self.status_label = QtWidgets.QLabel()
layout.addWidget(self.status_label)
self.multi_tx_display = MultiTransactionsWidget(wallet, main_window)
layout.addWidget(self.multi_tx_display)
def display_work_in_progress(self):
"""Disable buttons, inform the user about the ongoing computation"""
self.multi_tx_display.reset_labels()
self.multi_tx_display.disable_buttons()
self.setCursor(QtCore.Qt.WaitCursor)
def update_status(self, status: TransactionsStatus):
if status == TransactionsStatus.BUILDING:
self.display_work_in_progress()
self.status_label.setText(f"Status: <b>{status.value}</b>")
previous_status, self.status = self.status, status
if previous_status != status and TransactionsStatus.FINISHED in [
previous_status,
status,
]:
self.completeChanged.emit()
def update_progress(self, num_tx: int):
self.multi_tx_display.set_displayed_number_of_transactions(num_tx)
def set_unsigned_transactions(self, transactions: Sequence[Transaction]):
self.unsetCursor()
if not transactions:
self.update_status(TransactionsStatus.NO_RESULT)
return
self.multi_tx_display.set_transactions(transactions)
def isComplete(self) -> bool:
return self.status == TransactionsStatus.FINISHED
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# データセットの交差検証
import pandas as pd
import numpy as np
import dataclasses
from collections import defaultdict
from .utils.folder import folder_create
from tqdm import tqdm
@dataclasses.dataclass
class Stratified_group_k_fold:
"""
データをグループ層化K分割するときのパラメータを保持する
"""
csv_config: dict # 学習に使用するデータの情報が書かれたcsvの情報
split_info_folder : str # 分割されたファイルの内訳を保存するフォルダ名
n_splits: int = 5 # 分割数
shuffle: bool = False # シャッフルするかどうか
random_state: int = None # ランダムシード
def __post_init__(self):
self.filename_column = self.csv_config["image_filename_column"] # ファイル列
self.label_column = self.csv_config["label_column"] # ラベル列
self.group_column = self.csv_config["ID_column"] # グループ列
def split(self, X, y, groups=None):
"""
グループ層化K分割する
Parameters
----------
X : array-like, shape(ファイル数,)
分割するファイル名
y : array-like, shape(ファイル数,)
分割するファイル名のラベル
groups : None or array-like, shape(ファイル数,)
分割するファイルのグループ名
Noneの場合はただの層化K分割となる
Yields
-------
train_index : array-like, shape(分割数, ファイル数)
学習用として分けられたi分割目のXのインデックス
test_index : array-like, shape(分割数, ファイル数)
テスト用として分けられたi分割目のXのインデックス
"""
# 初期化
## グループがない場合はファイル名をグループ名とする
## ユニークなグループ名を取得
if groups is None:
groups = X
unique_group_list = list(set(groups))
## ラベルの数と種類を取得
labels_list = list(set(y))
labels_num = len(labels_list)
y_count = np.zeros(labels_num)
for _y in y:
y_count[labels_list.index(_y)] += 1
## グループとファイル名の対応辞書,ファイル名とラベルの対応辞書,
## グループとラベルの数および種類の対応辞書を作成
group_X_dict = defaultdict(list)
X_y_dict = defaultdict(list)
group_y_count_dict = defaultdict(lambda: np.zeros(labels_num))
for _X, _y, _groups in zip(X, y, groups):
group_X_dict[_groups].append(_X)
idx = labels_list.index(_y)
X_y_dict[_X] = idx
group_y_count_dict[_groups][idx] += 1
## 分割後の情報を保存する変数の初期化
group_X_fold = [[] for i in range(self.n_splits)]
group_y_count_fold = [np.zeros(labels_num)
for i in range(self.n_splits)]
# グループを1単位としてシャッフル
if self.shuffle is True:
np.random.seed(seed=self.random_state)
np.random.shuffle(unique_group_list)
# グループ層化K分割
# 各分割群のラベル数を調べ,
# ラベル数の標準偏差が最小になるようにデータを割り当てる
for unique_group in tqdm(unique_group_list, desc='k-fold_split'):
best_fold = None
min_value = None
for i in range(self.n_splits):
group_y_count_fold[i] += group_y_count_dict[unique_group]
std_per_label = []
for label in range(labels_num):
label_std = np.std([group_y_count_fold[i][label]
/ y_count[label]
for i in range(self.n_splits)])
std_per_label.append(label_std)
group_y_count_fold[i] -= group_y_count_dict[unique_group]
value = np.mean(std_per_label)
if min_value is None or value < min_value:
min_value = value
best_fold = i
group_y_count_fold[best_fold] += group_y_count_dict[unique_group]
group_X_fold[best_fold] += group_X_dict[unique_group]
# i番目の分割群をテストデータ,残りを学習データとする
X_set = set(X)
for i in range(self.n_splits):
X_train = X_set - set(group_X_fold[i])
X_test = set(group_X_fold[i])
train_index = [i for i, _X in enumerate(X) if _X in X_train]
test_index = [i for i, _X in enumerate(X) if _X in X_test]
yield train_index, test_index
def k_fold_classifier(self, df):
"""
分類問題においてグループ層化K分割を行い,分割の内訳をcsvで保存する
Parameters
----------
df : DataFrame(pandas)
学習に使用するデータの情報
Returns
-------
df_train_list : array-like[DataFrame(pandas)], shape(分割数,)
学習用として分けられたデータ
df_test_list : array-like, shape(分割数, ファイル数)
テスト用として分けられたデータ
"""
# グループ層化K分割
folder_create(self.split_info_folder)
X = df[self.filename_column].values
y = list(map(str, df[self.label_column].values))
if self.group_column == 'None':
groups = None
else:
groups = df[self.group_column].values
df_train_list = []
df_test_list = []
for i, (train_index, test_index) in enumerate(self.split(X, y, groups)):
df_train = df.iloc[train_index]
df_test = df.iloc[test_index]
## 分割されたデータの情報を出力
df_train.to_csv(f'{self.split_info_folder}/train_{i}.csv',
index=False, encoding='utf-8')
df_test.to_csv(f'{self.split_info_folder}/test_{i}.csv',
index=False, encoding='utf-8')
df_train_list.append(df_train)
df_test_list.append(df_test)
return df_train_list, df_test_list
def k_fold_regressor(self, df, bins_num=None):
"""
回帰問題においてグループ層化K分割を行い,分割の内訳をcsvで保存する
数値ラベルを数値を基準にグループ化し,分布が均等になるようにK分割する
Parameters
----------
df : DataFrame(pandas)
学習に使用するデータの情報
bins_num : int or None
疑似ラベルの分割数,Noneの場合,分割数はデータ数の平方根となる
Returns
-------
df_train_list : array-like[DataFrame(pandas)], shape(分割数,)
学習用として分けられたデータ
df_test_list : array-like, shape(分割数, ファイル数)
テスト用として分けられたデータ
"""
# グループ層化K分割
folder_create(self.split_info_folder)
X = df[self.filename_column].values
y = df[self.label_column].values
## 数値の分布が均等になるように分割するために疑似ラベルを作成
if bins_num is None:
bins_num = int(len(X) ** 0.5) + 1
bins = np.linspace(min(y), max(y), bins_num)
y_pseudo = np.digitize(y, bins) - 1
y_pseudo[np.argmax(y)] -= 1
if self.group_column == 'None':
groups = None
else:
groups = df[self.group_column].values
df_train_list = []
df_test_list = []
for i, (train_index, test_index) in enumerate(self.split(X, y_pseudo, groups)):
df_train = df.iloc[train_index]
df_test = df.iloc[test_index]
## 分割されたデータの情報を出力
df_train.to_csv(f'{self.split_info_folder}/train_{i}.csv',
index=False, encoding='utf-8')
df_test.to_csv(f'{self.split_info_folder}/test_{i}.csv',
index=False, encoding='utf-8')
df_train_list.append(df_train)
df_test_list.append(df_test)
return df_train_list, df_test_list | python |
import numpy as np
from tqdm import tqdm
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def convert_single_example(tokenizer, example, max_seq_length=256):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
input_ids = [0] * max_seq_length
input_mask = [0] * max_seq_length
segment_ids = [0] * max_seq_length
label = 0
return input_ids, input_mask, segment_ids, label
tokens_a = tokenizer.tokenize(example.text_a)
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0 : (max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids, example.label
def convert_examples_to_features(tokenizer, examples, max_seq_length=256):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
input_ids, input_masks, segment_ids, labels = [], [], [], []
for example in tqdm(examples, desc="Converting examples to features"):
input_id, input_mask, segment_id, label = convert_single_example(
tokenizer, example, max_seq_length
)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
labels.append(label)
return (
np.array(input_ids),
np.array(input_masks),
np.array(segment_ids),
np.array(labels).reshape(-1, 1),
)
def convert_text_to_examples(texts, labels):
"""Create InputExamples"""
InputExamples = []
for text, label in zip(texts, labels):
InputExamples.append(
InputExample(guid=None, text_a=" ".join(text), text_b=None, label=label)
)
return InputExamples
| python |
from typing import List
import metagrad.module as nn
from examples.feedforward import load_dataset
from metagrad.dataloader import DataLoader
from metagrad.dataset import TensorDataset
from metagrad.functions import sigmoid
from metagrad.loss import BCELoss
from metagrad.optim import SGD
from metagrad.paramater import Parameter
from metagrad.tensor import no_grad, Tensor
from metagrad.utils import Animator, run_epoch, regression_classification_metric
class DynamicFFN(nn.Module):
def __init__(self, num_layers, input_size, hidden_size, output_size):
'''
:param num_layers: 隐藏层层数
:param input_size: 输入维度
:param hidden_size: 隐藏层大小
:param output_size: 分类个数
'''
layers = []
layers.append(nn.Linear(input_size, hidden_size)) # 隐藏层,将输入转换为隐藏向量
layers.append(nn.ReLU()) # 激活函数
for i in range(num_layers - 1):
layers.append(nn.Linear(hidden_size, hidden_size // 2))
hidden_size = hidden_size // 2 # 后面的神经元数递减
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_size, output_size)) # 输出层,将隐藏向量转换为输出
self.net = nn.Sequential(*layers)
def forward(self, x: Tensor) -> Tensor:
return self.net(x)
def weights(self) -> List[Parameter]:
parameters = []
for layer in self.net.layers:
if isinstance(layer, nn.Linear):
parameters.append(layer.weight)
return parameters
def bias(self) -> List[Parameter]:
parameters = []
for layer in self.net.layers:
if isinstance(layer, nn.Linear):
parameters.append(layer.bias)
return parameters
def train_model(model, opt, train_dl, val_dl, num_epochs=20):
loss = BCELoss(reduction=None)
val_losses = []
for epoch in range(num_epochs):
train_loss, _ = run_epoch(model, train_dl, loss, opt, activate_func=sigmoid,
evaluate_func=regression_classification_metric)
with no_grad():
val_loss, _ = run_epoch(model, val_dl, loss, opt=None, activate_func=sigmoid,
evaluate_func=regression_classification_metric)
val_losses.append(val_loss)
print(f'epoch:{epoch + 1}, train loss:{train_loss:.4f}, validation loss:{val_loss:.4f}')
return val_losses
def compare_model(train_dl, val_dl, original_model, new_model, original_opt, new_opt,
original_label='Simple model', new_label='Complex model', ):
num_epochs = 20
print(f'Training {original_label}:')
original_losses = train_model(original_model, original_opt, train_dl, val_dl, num_epochs)
print(f'Training {new_label}:')
new_losses = train_model(new_model, new_opt, train_dl, val_dl, num_epochs)
animator = Animator(xlabel='epoch', ylabel='validation loss', yscale='log',
xlim=[1, num_epochs], ylim=[1e-3, 1e2],
legend=[original_label, new_label], saved_file='animator')
for epoch in range(num_epochs):
animator.add(epoch + 1, (original_losses[epoch], new_losses[epoch]))
animator.show()
def simple_and_complex(input_size, output_size, train_dl, val_dl):
'''
比较简单模型和复杂模型
:param input_size:
:param output_size:
:param train_dl:
:param val_dl:
:return:
'''
simple_model = DynamicFFN(1, input_size, 4, output_size)
simple_opt = SGD(simple_model.parameters(), lr=0.1)
complex_model = DynamicFFN(4, input_size, 128, output_size)
complex_opt = SGD(complex_model.parameters(), lr=0.1)
compare_model(train_dl, val_dl, simple_model, complex_model, simple_opt, complex_opt)
def complex_with_l2_or_not(input_size, output_size, train_dl, val_dl):
'''
比较有L2正则化的复杂模型和无L2正则化的复杂模型
:param input_size:
:param output_size:
:param train_dl:
:param val_dl:
:return:
'''
complex_model = DynamicFFN(1, input_size, 256, output_size)
complex_opt = SGD(complex_model.parameters(), lr=0.1)
complex_l2_model = DynamicFFN(1, input_size, 256, output_size)
# 只为权重设置L2惩罚
complex_l2_opt = SGD([
{"params": complex_l2_model.weights(), 'weight_decay': 0.01},
{"params": complex_l2_model.bias()}], lr=0.1
)
compare_model(train_dl, val_dl, complex_model, complex_l2_model, complex_opt, complex_l2_opt, "Complex model",
"Complex Model(L2)")
if __name__ == '__main__':
X_train, X_test, y_train, y_test, X_val, y_val = load_dataset()
batch_size = 512
train_ds = TensorDataset(X_train, y_train)
train_dl = DataLoader(train_ds, batch_size=batch_size)
val_ds = TensorDataset(X_val, y_val)
val_dl = DataLoader(val_ds, batch_size=batch_size)
input_size = 10000
output_size = 1
complex_with_l2_or_not(input_size, output_size, train_dl, val_dl)
| python |
from functools import wraps
from ..exceptions import BeeSQLError
def primary_keyword(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.table:
raise BeeSQLError('No table selected. Use Query.on to select a table first')
statement = func(self, *args, **kwargs)
self.set_statement(statement)
return statement
return wrapper
def secondary_keyword(func):
""" Convert a statement method into a secondary keyword generator. """
@wraps(func)
def wrapper(self, *args, **kwargs):
keyword = func(self, *args, **kwargs)
self.add_secondary_keyword(keyword)
return self
return wrapper
def logical_operator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
if not self.is_condition_set():
raise BeeSQLError('No condition set.')
return func(self, *args, **kwargs)
return wrapper
def aggregation(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
aggregation_ = func(self, *args, **kwargs)
self.add_aggregation(aggregation_)
return self
return wrapper
def complete_condition(query_part_name):
""" Works with ColumnSelector class. """
def decorator(func):
@wraps(func)
def wrapper(self, value, **kwargs):
operator = self.get_operator(query_part_name, value)
return self.complete(operator)
return wrapper
return decorator
| python |
"""
Relationship pseudo-model.
"""
class Relationship:
def __init__(self, start_id, end_id, type, properties):
"""
A relationship (edge) in a property graph view of data.
:param {str} start_id: unique id of the 'from' node in the graph this relationship is associated with
:param {str} end_id: unique id of the 'to' node in the graph this relationship is associated with
:param {list} type: a qualified relationship 'type' to use, typically corresponding to some enumeration
:param {dict} properties: any scalar attributes ("properties") associated with the relationship.
"""
self.start_id = start_id
self.end_id = end_id
self.type = type
self.properties = properties
| python |
# Number of Islands
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if not any(grid): return 0
m, n = len(grid), len(grid[0])
count = 0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
count += 1
self.dfs(i, j, grid)
return count
def dfs(self, i, j, grid):
m, n = len(grid), len(grid[0])
grid[i][j] = '0'
for x, y in [(i - 1, j), (i, j - 1), (i + 1, j), (i, j + 1)]:
if 0 <= x < m and 0 <= y < n and grid[x][y] == '1':
self.dfs(x, y, grid)
# O(mn) time, O(max(m, n)) space for recursive stacks
# follow up: how to find the number of lakes?
# a lake is an area of water surrounded horizonatally and vertically
# by the same island
# solution:
# 1. use num_islands() to mark islands with different ids
# 2. iterate through the grid, if it's water then dfs to see if
# it's surrounded by lands of the same id
| python |
from parameterized import parameterized
from combinatrix.testintegration import load_parameter_sets
from doajtest.helpers import DoajTestCase
from doajtest.fixtures import JournalFixtureFactory, ArticleFixtureFactory
from doajtest.mocks.store import StoreMockFactory
from doajtest.mocks.model_Cache import ModelCacheMockFactory
from portality.lib.paths import rel2abs
from portality.lib import dates
from portality.background import BackgroundApi
from portality.tasks.public_data_dump import PublicDataDumpBackgroundTask
from portality import models, store
from portality.core import app
import os, shutil, tarfile, json
from StringIO import StringIO
def load_cases():
return load_parameter_sets(rel2abs(__file__, "..", "matrices", "tasks.public_data_dump"), "data_dump", "test_id",
{"test_id" : []})
class TestPublicDataDumpTask(DoajTestCase):
def setUp(self):
super(TestPublicDataDumpTask, self).setUp()
self.store_tmp_imp = app.config.get("STORE_TMP_IMPL")
self.store_imp = app.config.get("STORE_IMPL")
self.discovery_records_per_file = app.config.get("DISCOVERY_RECORDS_PER_FILE")
self.store_local_dir = app.config["STORE_LOCAL_DIR"]
self.store_tmp_dir = app.config["STORE_TMP_DIR"]
self.cache = models.Cache
app.config["STORE_IMPL"] = "portality.store.StoreLocal"
app.config["STORE_LOCAL_DIR"] = rel2abs(__file__, "..", "tmp", "store", "main")
app.config["STORE_TMP_DIR"] = rel2abs(__file__, "..", "tmp", "store", "tmp")
os.makedirs(app.config["STORE_LOCAL_DIR"])
os.makedirs(app.config["STORE_TMP_DIR"])
models.cache.Cache = ModelCacheMockFactory.in_memory()
def tearDown(self):
app.config["STORE_TMP_IMPL"] = self.store_tmp_imp
app.config["STORE_IMPL"] = self.store_imp
app.config["DISCOVERY_RECORDS_PER_FILE"] = self.discovery_records_per_file
shutil.rmtree(rel2abs(__file__, "..", "tmp"))
app.config["STORE_LOCAL_DIR"] = self.store_local_dir
app.config["STORE_TMP_DIR"] = self.store_tmp_dir
models.cache.Cache = self.cache
super(TestPublicDataDumpTask, self).tearDown()
@parameterized.expand(load_cases)
def test_public_data_dump(self, name, kwargs):
clean_arg = kwargs.get("clean")
prune_arg = kwargs.get("prune")
types_arg = kwargs.get("types")
journals_arg = kwargs.get("journals")
articles_arg = kwargs.get("articles")
batch_size_arg = kwargs.get("batch_size")
tmp_write_arg = kwargs.get("tmp_write")
store_write_arg = kwargs.get("store_write")
status_arg = kwargs.get("status")
###############################################
## set up
clean = True if clean_arg == "yes" else False if clean_arg == "no" else None
prune = True if prune_arg == "yes" else False if prune_arg == "no" else None
types = types_arg if types_arg != "-" else None
journal_count = int(journals_arg)
article_count = int(articles_arg)
batch_size = int(batch_size_arg)
journal_file_count = 0 if journal_count == 0 else (journal_count / batch_size) + 1
article_file_count = 0 if article_count == 0 else (article_count / batch_size) + 1
first_article_file_records = 0 if article_count == 0 else batch_size if article_count > batch_size else article_count
first_journal_file_records = 0 if journal_count == 0 else batch_size if journal_count > batch_size else journal_count
# add the data to the index first, to maximise the time it has to become available for search
sources = JournalFixtureFactory.make_many_journal_sources(journal_count, in_doaj=True)
jids = []
for i in range(len(sources)):
source = sources[i]
journal = models.Journal(**source)
journal.save()
jids.append((journal.id, journal.last_updated))
aids = []
for i in range(article_count):
source = ArticleFixtureFactory.make_article_source(
eissn="{x}000-0000".format(x=i),
pissn="0000-{x}000".format(x=i),
with_id=False,
doi="10.123/{x}".format(x=i),
fulltext="http://example.com/{x}".format(x=i)
)
article = models.Article(**source)
article.save()
aids.append((article.id, article.last_updated))
# construct some test data in the local store
container_id = app.config["STORE_PUBLIC_DATA_DUMP_CONTAINER"]
localStore = store.StoreLocal(None)
localStoreFiles = []
if clean or prune:
for i in range(5):
localStore.store(container_id, "doaj_article_data_2018-01-0" + str(i) + ".tar.gz",
source_stream=StringIO("test"))
localStore.store(container_id, "doaj_journal_data_2018-01-0" + str(i) + ".tar.gz",
source_stream=StringIO("test"))
localStoreFiles = localStore.list(container_id)
app.config["DISCOVERY_RECORDS_PER_FILE"] = batch_size
# set the mocks for store write failures
if tmp_write_arg == "fail":
app.config["STORE_TMP_IMPL"] = StoreMockFactory.no_writes_classpath()
if store_write_arg == "fail":
app.config["STORE_IMPL"] = StoreMockFactory.no_writes_classpath()
# block until all the records are saved
for jid, lu in jids:
models.Journal.block(jid, lu, sleep=0.05)
for aid, lu in aids:
models.Article.block(aid, lu, sleep=0.05)
###########################################################
# Execution
job = PublicDataDumpBackgroundTask.prepare("testuser", clean=clean, prune=prune, types=types)
task = PublicDataDumpBackgroundTask(job)
BackgroundApi.execute(task)
# make sure we have a fresh copy of the job
job = task.background_job
assert job.status == status_arg
if job.status != "error":
article_url = models.cache.Cache.get_public_data_dump().get("article", {}).get("url")
if types_arg in ["-", "all", "article"]:
assert article_url is not None
else:
assert article_url is None
journal_url = models.cache.Cache.get_public_data_dump().get("journal", {}).get("url")
if types_arg in ["-", "all", "journal"]:
assert journal_url is not None
else:
assert journal_url is None
assert localStore.exists(container_id)
files = localStore.list(container_id)
if types_arg in ["-", "all"]:
assert len(files) == 2
else:
assert len(files) == 1
day_at_start = dates.today()
if types_arg in ["-", "all", "article"]:
article_file = "doaj_article_data_" + day_at_start + ".tar.gz"
assert article_file in files
stream = localStore.get(container_id, article_file)
tarball = tarfile.open(fileobj=stream, mode="r:gz")
members = tarball.getmembers()
assert len(members) == article_file_count
if len(members) > 0:
f = tarball.extractfile(members[0])
data = json.loads(f.read())
assert len(data) == first_article_file_records
record = data[0]
for key in record.keys():
assert key in ["admin", "bibjson", "id", "last_updated", "created_date"]
if "admin" in record:
for key in record["admin"].keys():
assert key in ["ticked", "seal"]
if types_arg in ["-", "all", "journal"]:
journal_file = "doaj_journal_data_" + day_at_start + ".tar.gz"
assert journal_file in files
stream = localStore.get(container_id, journal_file)
tarball = tarfile.open(fileobj=stream, mode="r:gz")
members = tarball.getmembers()
assert len(members) == journal_file_count
if len(members) > 0:
f = tarball.extractfile(members[0])
data = json.loads(f.read())
assert len(data) == first_journal_file_records
record = data[0]
for key in record.keys():
assert key in ["admin", "bibjson", "id", "last_updated", "created_date"]
if "admin" in record:
for key in record["admin"].keys():
assert key in ["ticked", "seal"]
else:
# in the case of an error, we expect the tmp store to have been cleaned up
tmpStore = store.TempStore()
assert not tmpStore.exists(container_id)
# in the case of an error, we expect the main store not to have been touched
# (for the errors that we are checking for)
if prune and not clean:
# no matter what the error, if we didn't specify clean then we expect everything
# to survive
survived = localStore.list(container_id)
assert localStoreFiles == survived
elif clean:
# if we specified clean, then it's possible the main store was cleaned before the
# error occurred, in which case it depends on the error. This reminds us that
# clean shouldn't be used in production
if tmp_write_arg == "fail":
assert not localStore.exists(container_id)
else:
survived = localStore.list(container_id)
assert localStoreFiles == survived
else:
# otherwise, we expect the main store to have survived
assert not localStore.exists(container_id) | python |
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/', methods=['GET'])
def hello_world():
#return 'Hello, World!'
response = ""
term = request.args['term']
if term:
items = [ "c++", "java", "php", "coldfusion", "javascript", "asp", "ruby", "perl", "ocaml", "haskell", "rust", "go" ]
response = jsonify([item for item in items if item.startswith(term)])
response.headers.add('Access-Control-Allow-Origin', '*')
return response | python |
"""Flsqls module."""
from pineboolib.core import decorators
from pineboolib.core.utils import utils_base
from pineboolib.application.metadata import pntablemetadata
from pineboolib import logging
from pineboolib.fllegacy import flutil
from pineboolib.interfaces import isqldriver
from sqlalchemy.orm import sessionmaker # type: ignore [import] # noqa: F821
from typing import Optional, Union, List, Any, TYPE_CHECKING
if TYPE_CHECKING:
from sqlalchemy.engine import ( # type: ignore [import] # noqa: F401, F821
base, # noqa: F401
result, # noqa: F401
) # noqa: F401 # pragma: no cover
from pineboolib.interfaces import isession
LOGGER = logging.get_logger(__name__)
class FLPYMSSQL(isqldriver.ISqlDriver):
"""FLPYMSSQL class."""
def __init__(self):
"""Inicialize."""
super().__init__()
self.version_ = "0.9"
self.name_ = "FLPYMSSQL"
self.error_list = []
self.alias_ = "SQL Server (PYMSSQL)"
self.default_port = 1433
self.savepoint_command = "SAVE TRANSACTION"
self.rollback_savepoint_command = "ROLLBACK TRANSACTION"
self.commit_transaction_command = "COMMIT"
self._like_true = "1"
self._like_false = "0"
self._safe_load = {"pymssql": "pymssql", "sqlalchemy": "sqlAlchemy"}
self._database_not_found_keywords = ["20018"]
self._text_like = ""
self._sqlalchemy_name = "mssql+pymssql"
self._create_isolation = False
def getAlternativeConn(self, name: str, host: str, port: int, usern: str, passw_: str) -> Any:
"""Return connection."""
self._queqe_params["connect_args"] = {"autocommit": True}
conn_ = self.getConn("master", host, port, usern, passw_)
del self._queqe_params["connect_args"]
# conn_.execute("set transaction isolation level read uncommitted;")
return conn_
def session(self) -> "isession.PinebooSession":
"""Create a sqlAlchemy session."""
while True:
session_class = sessionmaker(bind=self.connection(), autoflush=False, autocommit=True)
new_session = session_class()
if new_session.connection().connection is not None:
break
else:
LOGGER.warning("Conexión invalida capturada.Solicitando nueva")
setattr(new_session, "_conn_name", self.db_._name)
session_key = utils_base.session_id(self.db_._name, True)
self.db_._conn_manager._thread_sessions[session_key] = new_session
return new_session
def existsTable(self, table_name: str) -> bool:
"""Return if exists a table specified by name."""
sql = (
"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE "
+ "TABLE_NAME = N'%s' AND TABLE_CATALOG = '%s'" % (table_name, self._dbname)
)
cur = self.execute_query(sql)
return True if cur and cur.fetchone() else False
def nextSerialVal(self, table_name: str, field_name: str) -> int:
"""Return next serial value."""
if self.is_open():
cur = self.execute_query("SELECT NEXT VALUE FOR %s_%s_seq" % (table_name, field_name))
if cur and cur.returns_rows:
return cur.fetchone()[0] # type: ignore [index] # noqa: F821
LOGGER.warning("not exec sequence")
return 0
def releaseSavePoint(self, num: int) -> bool:
"""Set release savepoint."""
return True
def setType(self, type_: str, leng: int = 0) -> str:
"""Return type definition."""
type_ = type_.lower()
res_ = ""
if type_ in ("int", "serial"):
res_ = "INT"
elif type_ == "uint":
res_ = "BIGINT"
elif type_ in ("bool", "unlock"):
res_ = "BIT"
elif type_ == "double":
res_ = "DECIMAL"
elif type_ == "time":
res_ = "TIME"
elif type_ == "date":
res_ = "DATE"
elif type_ in ("pixmap", "stringlist"):
res_ = "TEXT"
elif type_ == "string":
res_ = "VARCHAR"
elif type_ == "bytearray":
res_ = "NVARCHAR"
elif type_ == "timestamp":
res_ = "DATETIME2"
elif type_ == "json":
res_ = "NVARCHAR"
else:
LOGGER.warning("seType: unknown type %s", type_)
leng = 0
return "%s(%s)" % (res_, leng) if leng else res_
def sqlCreateTable(
self, tmd: "pntablemetadata.PNTableMetaData", create_index: bool = True
) -> Optional[str]:
"""Return a create table query."""
if tmd.isQuery():
return self.sqlCreateView(tmd)
util = flutil.FLUtil()
primary_key = ""
sql = "CREATE TABLE %s (" % tmd.name()
seq = None
field_list = tmd.fieldList()
unlocks = 0
for number, field in enumerate(field_list):
sql += field.name()
type_ = field.type()
if type_ == "serial":
seq = "%s_%s_seq" % (tmd.name(), field.name())
if self.is_open() and create_index:
try:
self.execute_query("CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1" % seq)
except Exception as error:
LOGGER.error("%s::sqlCreateTable:%s", __name__, str(error))
sql += " INT"
elif type_ == "double":
sql += " DECIMAL(%s,%s)" % (
int(field.partInteger()) + int(field.partDecimal()),
int(field.partDecimal()),
)
else:
if type_ == "unlock":
unlocks += 1
if unlocks > 1:
LOGGER.warning(
u"FLManager : No se ha podido crear la tabla %s ", tmd.name()
)
LOGGER.warning(
u"FLManager : Hay mas de un campo tipo unlock. Solo puede haber uno."
)
return None
sql += " %s" % self.setType(type_, field.length())
if field.isPrimaryKey():
if not primary_key:
sql = sql + " PRIMARY KEY"
primary_key = field.name()
else:
LOGGER.warning(
util.translate(
"application",
"FLManager : Tabla-> %s ." % tmd.name()
+ "Se ha intentado poner una segunda clave primaria para el campo %s ,pero el campo %s ya es clave primaria."
% (primary_key, field.name())
+ "Sólo puede existir una clave primaria en FLTableMetaData, use FLCompoundKey para crear claves compuestas.",
)
)
raise Exception(
"A primary key (%s) has been defined before the field %s.%s -> %s"
% (primary_key, tmd.name(), field.name(), sql)
)
else:
sql += " UNIQUE" if field.isUnique() else ""
sql += " NULL" if field.allowNull() else " NOT NULL"
if number != len(field_list) - 1:
sql += ","
sql += ")"
return sql
def decodeSqlType(self, type_: Union[int, str]) -> str:
"""Return the specific field type."""
ret = str(type_).lower()
if type_ == "bit":
ret = "bool"
elif type_ == "bigint":
ret = "uint"
elif type_ == "decimal":
ret = "double"
elif type_ == "date":
ret = "date"
elif type_ == "time":
ret = "time"
elif type_ == "varchar":
ret = "string"
elif type_ == "nvarchar":
ret = "bytearray"
elif type_ == "text":
ret = "stringlist"
elif type_ == "datetime2":
ret = "timestamp"
elif type_ == "json":
ret = "json"
return ret
def tables(self, type_name: str = "", table_name: str = "") -> List[str]:
"""Return a tables list specified by type."""
table_list: List[str] = []
result_list: List[Any] = []
if self.is_open():
where: List[str] = []
if type_name in ("Tables", ""):
where.append("xtype ='U'")
if type_name in ("Views", ""):
where.append("xtype ='V'")
if type_name in ("SystemTables", ""):
where.append("xtype ='S'")
if where:
and_name = " AND name ='%s'" % (table_name) if table_name else ""
cursor = self.execute_query(
"SELECT name FROM SYSOBJECTS where %s%s ORDER BY name ASC"
% (" OR ".join(where), and_name)
)
result_list += cursor.fetchall() if cursor else []
table_list = [item[0] for item in result_list]
return table_list
def declareCursor(
self, curname: str, fields: str, table: str, where: str, conn_db: "base.Connection"
) -> Optional["result.ResultProxy"]:
"""Set a refresh query for database."""
if not self.is_open():
raise Exception("declareCursor: Database not open")
sql = "DECLARE %s CURSOR STATIC FOR SELECT %s FROM %s WHERE %s " % (
curname,
fields,
table,
where,
)
try:
conn_db.execute(sql)
conn_db.execute("OPEN %s" % curname)
except Exception as error:
LOGGER.error("refreshQuery: %s", error)
LOGGER.info("SQL: %s", sql)
LOGGER.trace("Detalle:", stack_info=True)
return None
def deleteCursor(self, cursor_name: str, cursor: Any) -> None:
"""Delete cursor."""
if not self.is_open():
raise Exception("deleteCursor: Database not open")
try:
sql_exists = "SELECT CURSOR_STATUS('global','%s')" % cursor_name
cursor.execute(sql_exists)
if cursor.fetchone()[0] < 1:
return
cursor.execute("CLOSE %s" % cursor_name)
except Exception as exception:
LOGGER.error("finRow: %s", exception)
LOGGER.warning("Detalle:", stack_info=True)
# def fix_query(self, query: str) -> str:
# """Fix string."""
# # ret_ = query.replace(";", "")
# return query
@decorators.not_implemented_warn
def alterTable(self, new_metadata: "pntablemetadata.PNTableMetaData") -> bool:
"""Modify a table structure."""
return True
def recordInfo2(self, tablename: str) -> List[List[Any]]:
"""Return info from a database table."""
info = []
sql = (
"SELECT COLUMN_NAME, DATA_TYPE, IS_NULLABLE, COLUMN_DEFAULT, NUMERIC_PRECISION_RADIX,"
+ " CHARACTER_MAXIMUM_LENGTH FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = '%s'"
% tablename.lower()
)
data = self.execute_query(sql)
res = data.fetchall() if data else []
for columns in res:
field_size = int(columns[5]) if columns[5] else 0
# field_precision = columns[4] or 0
field_name = columns[0]
field_type = self.decodeSqlType(columns[1])
field_allow_null = columns[2] == "YES"
field_default_value = columns[3]
info.append(
[
field_name,
field_type,
not field_allow_null,
field_size,
None,
field_default_value,
None, # field_pk
]
)
return info
def vacuum(self) -> None:
"""Vacuum tables."""
return
def sqlLength(self, field_name: str, size: int) -> str:
"""Return length formated."""
return "LEN(%s)=%s" % (field_name, size)
| python |
"""
Copyright 2021 Gabriele Pisciotta - [email protected]
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted,
provided that the above copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
OF THIS SOFTWARE.
"""
__author__ = "Gabriele Pisciotta"
import networkx as nx
from oc_ocdm import Storer
from oc_ocdm.graph import GraphSet
from oc_ocdm.graph.entities.bibliographic.agent_role import AgentRole
from oc_ocdm.graph.entities.bibliographic.bibliographic_resource import BibliographicResource
from oc_ocdm.graph.entities.bibliographic.responsible_agent import ResponsibleAgent
from oc_ocdm.graph.graph_entity import GraphEntity
from oc_ocdm.prov import ProvSet
from rdflib import URIRef
class InstanceMatching:
def __init__(self, g_set: GraphSet,
graph_filename="matched.rdf",
provenance_filename="provenance.rdf",
resp_agent='https://w3id.org/oc/meta/prov/pa/4',
debug=False):
self.g_set = g_set
self.graph_filename = graph_filename
self.provenance_filename = provenance_filename
self.debug = debug
self.resp_agent = resp_agent
self.prov = ProvSet(self.g_set, self.resp_agent)
def match(self):
""" Start the matching process that will do, in sequence:
- match the ARs
- match the BRs
- match the IDs
In the end, this process will produce:
- `matched.rdf` that will contain the graph set specified previously without the duplicates.
- `provenance.rdf` that will contain the provenance, tracking record of all the changes done.
"""
self.instance_matching_ar()
self.instance_matching_br()
self.instance_matching_ids()
self.save()
return self.g_set
def save(self):
""" Serialize the graph set into the specified RDF file,
and the provenance in another specified RDF file.
"""
gs_storer = Storer(self.g_set, output_format="nt11")
gs_storer.store_graphs_in_file(self.graph_filename, "")
prov_storer = Storer(self.prov, output_format="nquads")
prov_storer.store_graphs_in_file(self.provenance_filename, "")
def instance_matching_ar(self):
""" Discover all the ARs that share the same identifier's literal, creating a graph of them.
Then merge each connected component (cluster of ARs linked by the same identifier) into one.
For each couple of AR that are going to be merged, substitute the references of the AR that
will no longer exist, by removing the AR from each of its referred BR and add, instead, the merged one)
If the RA linked by the AR that will no longer exist is not linked by any other AR, then
it will be marked as to be deleted, otherwise not.
In the end, generate the provenance and commit pending changes in the graph set"""
merge_graph: nx.Graph = nx.Graph()
associated_ar_ra = self.__get_association_ar_ra()
associated_ar_br = self.__get_association_ar_br()
identifiers = {}
for ar in self.g_set.get_ar():
role = ar.get_role_type()
# Extract Authors and Publishers, with their info and their identifiers
if role == GraphEntity.iri_author or role == GraphEntity.iri_publisher:
for i in ar.get_identifiers():
if identifiers.get(i.get_scheme()) is None:
identifiers[i.get_scheme()] = {}
ra_first: ResponsibleAgent = identifiers[i.get_scheme()].get(i.get_literal_value())
if ra_first is None:
identifiers[i.get_scheme()][i.get_literal_value()] = ar
else:
merge_graph.add_edge(ra_first, ar)
if self.debug:
print("[IM-RA] Will merge {} and {} due to {}:{} in common".format(ar.res,
ra_first.res,
i.get_scheme().split(
"/")[-1],
i.get_literal_value()))
# Get the connected components of the graph (clusters of "to-be-merged"):
clusters = sorted(nx.connected_components(merge_graph), key=len, reverse=True)
print("[IM-RA] N° of clusters: {}".format(len(clusters)))
for n, cluster in enumerate(clusters):
clusters_dict = {}
clusters_str_list = []
for k in cluster:
clusters_dict[str(k)] = k
clusters_str_list.append(str(k))
clusters_str_list.sort()
entity_first: AgentRole = clusters_dict[clusters_str_list[0]]
if self.debug:
print("[IM-RA] Merging cluster #{}, with {} entities".format(n, len(cluster)))
for entity in clusters_str_list[1:]:
other_entity = clusters_dict[entity]
if self.debug:
print(f"\tMerging agent role {entity} in agent role {entity_first}")
# The other entity has been merged in the first entity: at this point we need to change all the
# occurrencies of the other entity with the first entity by looking at all the BRs referred
if associated_ar_br.get(other_entity) is not None:
for other_br in associated_ar_br.get(other_entity):
other_br.remove_contributor(other_entity)
other_br.has_contributor(entity_first)
if self.debug:
print(f"\tUnset {other_entity} as contributor of {other_br}")
print(f"\tSet {entity_first} as contributor of {other_br} ")
ra_to_delete = entity_first.get_is_held_by()
entity_first.merge(other_entity)
if entity_first.get_is_held_by() != ra_to_delete:
if associated_ar_ra.get(ra_to_delete) is not None and len(associated_ar_ra.get(ra_to_delete)) == 1:
ra_to_delete.mark_as_to_be_deleted()
else:
other_entity.mark_as_to_be_deleted(False)
other_entity.mark_as_to_be_deleted()
if self.debug:
print(f"\tMarking to delete: {other_entity} ")
self.prov.generate_provenance()
self.g_set.commit_changes()
def instance_matching_br(self):
""" Discover all the BRs that share the same identifier's literal, creating a graph of them.
Then merge each connected component (cluster of Be RA associated to the Rs linked by the same identifier) into one.
For each couple of BR that are going to be merged, merge also:
- their containers by matching the proper type (issue of BR1 -> issue of BR2)
- their publisher
NB: when two BRs are merged, you'll have the union of their ARs. You could have duplicates if the duplicates
don't have any ID in common or if the method `instance_matching_ar` wasn't called before.
In the end, generate the provenance and commit pending changes in the graph set"""
merge_graph: nx.Graph = nx.Graph()
identifiers = {}
for br in self.g_set.get_br():
for i in br.get_identifiers():
if identifiers.get(i.get_scheme()) is None:
identifiers[i.get_scheme()] = {}
br_first: BibliographicResource = identifiers[i.get_scheme()].get(i.get_literal_value())
if br_first is None:
identifiers[i.get_scheme()][i.get_literal_value()] = br
else:
merge_graph.add_edge(br_first, br)
if self.debug:
print("[IM-BR] Will merge {} into {} due to {}:{} in common".format(br.res,
br_first.res,
i.get_scheme().split("/")[
-1],
i.get_literal_value()))
# Get the connected components of the graph (clusters of "to-be-merge"):
clusters = sorted(nx.connected_components(merge_graph), key=len, reverse=True)
print("[IM-BR] N° of clusters: {}".format(len(clusters)))
for n, cluster in enumerate(clusters):
clusters_dict = {}
clusters_str_list = []
for k in cluster:
clusters_dict[str(k)] = k
clusters_str_list.append(str(k))
clusters_str_list.sort()
entity_first: BibliographicResource = clusters_dict[clusters_str_list[0]]
publisher_first: ResponsibleAgent = self.__get_publisher(entity_first)
entity_first_partofs = self.__get_part_of(entity_first)
if self.debug:
print("[IM-BR] Merging cluster #{}, with {} entities".format(n, len(cluster)))
entity: BibliographicResource
for entity in clusters_str_list[1:]:
entity = clusters_dict[entity]
# Merge containers
partofs = self.__get_part_of(entity)
p1: BibliographicResource;
p2: BibliographicResource
for p1 in entity_first_partofs:
p1types = p1.get_types()
p1types.remove(URIRef('http://purl.org/spar/fabio/Expression'))
for p2 in partofs:
p2types = p2.get_types()
p2types.remove(URIRef('http://purl.org/spar/fabio/Expression'))
intersection_of_types = set(p2types).intersection(set(p1types))
if intersection_of_types is not None and len(intersection_of_types) != 0:
p1.merge(p2)
if self.debug:
print(f"\tMerging container {p2} in container {p1} ({intersection_of_types})")
# Merge publisher
publisher = self.__get_publisher(entity)
if publisher is not None and publisher_first is not None and publisher != publisher_first:
publisher_first.merge(publisher)
if self.debug:
print(f"\tMerging publisher {publisher} in publisher {publisher_first}")
# Merge authors
# contributors = entity.get_contributors()
# Merging the two BRs
entity_first.merge(entity)
# for ar in contributors:
# print(f"\tRemoving agent role {ar} from bibliographic resource {entity_first}")
# entity_first.remove_contributor(ar)
self.prov.generate_provenance()
self.g_set.commit_changes()
def instance_matching_ids(self):
""" Discover all the IDs that share the same schema and literal, then merge all into one
and substitute all the reference with the merged one.
In the end, generate the provenance and commit pending changes in the graph set"""
literal_to_id = {}
id_to_resources = {}
entities = list(self.g_set.get_br())
entities.extend(list(self.g_set.get_ar()))
for e in entities:
for i in e.get_identifiers():
literal = i.get_scheme() + "#" + i.get_literal_value()
if i in id_to_resources:
id_to_resources[i].append(e)
else:
id_to_resources[i] = [e]
if literal in literal_to_id:
literal_to_id[literal].append(i)
else:
literal_to_id[literal] = [i]
for k, v in literal_to_id.items():
if len(v) > 1:
schema, lit = k.split('#')
print(
f"[IM-ID] Will merge {len(v) - 1} identifiers into {v[0]} because they share literal {lit} and schema {schema}")
for actual_id in v[1:]:
v[0].merge(actual_id)
entities = id_to_resources[actual_id]
# Remove, from all the entities, the ID that has been merged
# Setting, instead, the merged one as new ID
for e in entities:
e.remove_identifier(actual_id)
if v[0] not in e.get_identifiers():
e.has_identifier(v[0])
actual_id.mark_as_to_be_deleted()
self.prov.generate_provenance()
self.g_set.commit_changes()
@staticmethod
def __get_part_of(br):
""" Given a BR in input (e.g.: a journal article), walk the full 'part-of' chain.
Returns a list of BR that are the hierarchy of of containers (e.g: given an article-> [issue, journal])"""
partofs = []
e = br
ended = False
while not ended:
partof = e.get_is_part_of()
if partof is not None:
partofs.append(partof)
e = partof
else:
ended = True
return partofs
@staticmethod
def __get_publisher(br):
""" Given a BR as input, returns the AR that is a publisher """
for ar in br.get_contributors():
role = ar.get_role_type()
if role == GraphEntity.iri_publisher:
return ar
def __get_association_ar_ra(self):
""" Returns the dictionary:
key-> RA
value-> list of AR
This let you take all the ARs associated to the same RA
"""
association = {}
for ar in self.g_set.get_ar():
if ar.get_is_held_by() is not None and ar.get_is_held_by() not in association:
association[ar.get_is_held_by()] = [ar]
elif ar.get_is_held_by() is not None and ar.get_is_held_by() in association:
association[ar.get_is_held_by()].append(ar)
return association
def __get_association_ar_br(self):
""" Returns the dictionary:
key-> AR
value-> list of BR
This let you take all the BRs associated to the same AR
"""
association = {}
for br in self.g_set.get_br():
for ar in br.get_contributors():
if ar.get_is_held_by() is not None and ar not in association:
association[ar] = [br]
elif ar.get_is_held_by() is not None and ar in association:
association[ar].append(br)
return association
| python |
import re
from .models import Profile, Link
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.module_loading import import_string
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import User
from django.contrib.auth import get_user_model
from django.template.defaultfilters import filesizeformat
from rest_framework import serializers
from rest_framework import serializers, exceptions
from rest_auth.registration.serializers import RegisterSerializer as RS
from rest_auth.serializers import LoginSerializer as LS
from rest_auth.models import TokenModel
from avatar.models import Avatar
from avatar.signals import avatar_updated
from allauth.account.forms import ResetPasswordForm, default_token_generator
from allauth.account.utils import send_email_confirmation, user_pk_to_url_str
from allauth.account.forms import UserTokenForm
from allauth.account.adapter import get_adapter
from allauth.utils import email_address_exists
from allauth.account.models import EmailAddress
from allauth.account import app_settings as allauth_settings
from allauth.account.utils import setup_user_email
UserModel = get_user_model()
class UserSocialLinksSerializer(serializers.ModelSerializer):
class Meta:
model = Link
fields = ('facebook', 'twitter', 'youtube', 'instagram')
class ProfileSerializer(serializers.ModelSerializer):
"""a serializer for our user profile objects"""
link = UserSocialLinksSerializer(read_only=True)
class Meta:
model = Profile
fields = ( 'first_name','last_name','displayed_name','bio', 'location', 'birth_date','link')
extra_kwargs = {
'first_name':{'write_only':True},
'last_name':{'write_only':True},
'displayed_name':{'read_only':True}}
def validate(self, data):
pattern = "^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]+[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]+[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]*$"
compiler = re.compile(pattern)
if not compiler.match(data["first_name"]):
raise serializers.ValidationError(
_("Make sure it contains only letters."))
if not compiler.match(data["last_name"]):
raise serializers.ValidationError(
_("Make sure it contains only letters."))
return data
class DisplayUserName(serializers.ModelSerializer):
display_name = serializers.ReadOnlyField(source='displayed_name')
class Meta:
model = Profile
fields = ('display_name',)
class UserSerializer(serializers.ModelSerializer):
displayed_name = serializers.ReadOnlyField(source='profile.displayed_name')
avatar_url = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('username', 'email', 'displayed_name', 'avatar_url')
#extra_kwargs = {'password': {'write_only': True}}
def get_avatar_url(self, obj, size=settings.AVATAR_DEFAULT_SIZE):
for provider_path in settings.AVATAR_PROVIDERS:
provider = import_string(provider_path)
avatar_url = provider.get_avatar_url(obj, size)
if avatar_url:
return self.context['request'].build_absolute_uri(avatar_url)
class RegisterSerializer(serializers.Serializer):
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(required=True, write_only=True)
password2 = serializers.CharField(required=True, write_only=True)
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(
_("The two password fields didn't match."))
return data
def get_cleaned_data(self):
return {
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
setup_user_email(request, user, [])
user.profile.save()
return user
class LoginSerializer(LS):
def validate(self, attrs):
username = attrs.get('username')
email = attrs.get('email')
password = attrs.get('password')
user = self._validate_username_email(username, email, password)
# Did we get back an active user?
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
# If required, is the email verified?
email_address = user.emailaddress_set.get(email=user.email)
if not email_address.verified:
pass
#raise exceptions.PermissionDenied('not verified')
attrs['user'] = user
return attrs
class PasswordResetSerializer(serializers.Serializer):
email = serializers.EmailField()
def validate_email(self, email):
email = get_adapter().clean_email(email)
if not email_address_exists(email):
raise serializers.ValidationError(_("The e-mail address is not assigned "
"to any user account"))
return email
def save(self, *args, **kwargs):
request = self.context.get('request')
current_site = get_current_site(request)
email = self.validated_data["email"]
user = UserModel.objects.get(email__iexact=email)
token_generator = kwargs.get("token_generator", default_token_generator)
temp_key = token_generator.make_token(user)
path = "/reset-password/{}/{}".format(user_pk_to_url_str(user), temp_key)
url = request.build_absolute_uri(path)
context = {"current_site": current_site,
"user": user,
"password_reset_url": url,
"request": request}
get_adapter().send_mail(
'account/email/password_reset_key',
email,
context)
return email
class PasswordResetConfirmSerializer(serializers.Serializer):
new_password1 = serializers.CharField(max_length=128)
new_password2 = serializers.CharField(max_length=128)
uid = serializers.CharField()
key = serializers.CharField()
def validate_new_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, attrs):
self.user_token_form = UserTokenForm(data={'uidb36': attrs['uid'], 'key': attrs['key']})
if not self.user_token_form.is_valid():
raise serializers.ValidationError(_("Invalid Token"))
if attrs['new_password1'] != attrs['new_password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
self.password = attrs['new_password1']
return attrs
def save(self):
user = self.user_token_form.reset_user
get_adapter().set_password(user, self.password)
return user
class ResendConfirmSerializer(serializers.Serializer):
email = serializers.EmailField()
password_reset_form_class = ResetPasswordForm
def validate(self, attrs):
self.reset_form = self.password_reset_form_class(
data=self.initial_data)
if not self.reset_form.is_valid():
raise serializers.ValidationError(self.reset_form.errors)
return attrs
def save(self):
request = self.context.get('request')
User = get_user_model()
email = self.reset_form.cleaned_data["email"]
user = User.objects.get(email__iexact=email)
send_email_confirmation(request, user, True)
return email
from posts.serializers import PostSerializer
class UserDetailsSerializer(serializers.ModelSerializer):
email_status = serializers.SerializerMethodField()
avatar_url = serializers.SerializerMethodField()
profile = ProfileSerializer()
avatar = serializers.ImageField(write_only=True, required=False)
class Meta:
model = UserModel
fields = ('username', 'email', 'email_status', 'profile', 'avatar', 'avatar_url')
def get_email_status(self, obj):
email_address = EmailAddress.objects.get(user=obj)
return email_address.verified
def get_avatar_url(self, obj, size=settings.AVATAR_DEFAULT_SIZE):
for provider_path in settings.AVATAR_PROVIDERS:
provider = import_string(provider_path)
avatar_url = provider.get_avatar_url(obj, size)
if avatar_url:
return avatar_url
def validate_name(self, name):
pattern = "^[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]+[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]+[a-zA-ZàáâäãåąčćęèéêëėįìíîïłńòóôöõøùúûüųūÿýżźñçčšžÀÁÂÄÃÅĄĆČĖĘÈÉÊËÌÍÎÏĮŁŃÒÓÔÖÕØÙÚÛÜŲŪŸÝŻŹÑßÇŒÆČŠŽ∂ðء-ي]*$"
compiler = re.compile(pattern)
if not compiler.match(name):
raise serializers.ValidationError(
_("Make sure it contains only letters and spaces."))
return name
def validate_avatar(self, avatar):
if settings.AVATAR_ALLOWED_FILE_EXTS:
root, ext = os.path.splitext(avatar.name.lower())
if ext not in settings.AVATAR_ALLOWED_FILE_EXTS:
valid_exts = ", ".join(settings.AVATAR_ALLOWED_FILE_EXTS)
error = _("%(ext)s is an invalid file extension. "
"Authorized extensions are : %(valid_exts_list)s")
raise serializers.ValidationError(error %
{'ext': ext,
'valid_exts_list': valid_exts})
if avatar.size > settings.AVATAR_MAX_SIZE:
error = _("Your file is too big: %(size)s, "
"the maximum allowed size is: %(max_valid_size)s")
raise serializers.ValidationError(error % {
'size': filesizeformat(avatar.size),
'max_valid_size': filesizeformat(settings.AVATAR_MAX_SIZE)
})
def validate_email(self, email):
email = get_adapter().clean_email(email)
if email and email_address_exists(email, exclude_user=self.context.get('request').user):
raise serializers.ValidationError(_("A user is already registered with this e-mail address."))
return email
def update(self, instance, validated_data):
request = self.context.get('request')
profile = validated_data.get('profile', None)
instance.username = validated_data.get('username', instance.username)
instance.first_name = validated_data.get(
'first_name', instance.first_name)
if profile :
bio = profile.get("bio")
location = profile.get("location")
birth_date = profile.get("birth_date")
first_name = profile.get("first_name")
last_name = profile.get("last_name")
if bio and bio != instance.profile.bio :
instance.profile.bio = bio
if location and location != instance.profile.location:
instance.profile.location = location
if birth_date and birth_date != instance.profile.birth_date:
instance.profile.birth_date = birth_date
if first_name and first_name != instance.profile.first_name:
instance.profile.first_name = first_name
if last_name and last_name != instance.profile.last_name:
instance.profile.last_name = last_name
email = validated_data.get('email', None)
if email and email != instance.email:
adapter = get_adapter()
adapter.send_mail('account/email/email_change', instance.email, {})
email_address = EmailAddress.objects.get(user=instance, verified=True)
email_address.change(request, email, True)
instance.email = email
if 'avatar' in request.FILES:
avatar = Avatar(user=instance, primary=True)
image_file = request.FILES['avatar']
avatar.avatar.save(image_file.name, image_file)
avatar.save()
avatar_updated.send(sender=Avatar, user=instance, avatar=avatar)
instance.save()
# sync_sso(instance)
return instance
class TokenSerializer(serializers.ModelSerializer):
user = UserDetailsSerializer()
class Meta:
model = TokenModel
fields = ('key', 'user') | python |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
from pathlib import Path
from subprocess import run
import nltk
def rebuild_custom_ops():
"""The objective of this script is to:
1.) Delete the existing custom ops if it exists
2.) Perform the make command
3.) Validate a custom_ops.so now does exist"""
model_path = Path(__file__).resolve().parent
custom_ops_path = Path(model_path, "custom_ops.so")
if custom_ops_path.exists():
print(f"\nDeleting: {custom_ops_path}")
os.remove(custom_ops_path)
print("\nBuilding Custom Ops")
run(["make"], cwd=custom_ops_path.parent)
assert custom_ops_path.exists()
def get_nltk_data():
"""Gets the NLTK data using the NLTK python module."""
nltk.download("cmudict")
def pytest_sessionstart(session):
get_nltk_data()
rebuild_custom_ops()
| python |
from tortoise import Tortoise
from tortoise.contrib import test
from tortoise.exceptions import OperationalError, ParamsError
from tortoise.tests.testmodels import Event, EventTwo, TeamTwo, Tournament
from tortoise.transactions import in_transaction, start_transaction
class TestTwoDatabases(test.SimpleTestCase):
async def setUp(self):
if Tortoise._inited:
await self._tearDownDB()
first_db_config = test.getDBConfig(
app_label="models", modules=["tortoise.tests.testmodels"]
)
second_db_config = test.getDBConfig(
app_label="events", modules=["tortoise.tests.testmodels"]
)
merged_config = {
"connections": {**first_db_config["connections"], **second_db_config["connections"]},
"apps": {**first_db_config["apps"], **second_db_config["apps"]},
}
await Tortoise.init(merged_config, _create_db=True)
await Tortoise.generate_schemas()
self.db = Tortoise.get_connection("models")
self.second_db = Tortoise.get_connection("events")
async def tearDown(self):
await Tortoise._drop_databases()
async def test_two_databases(self):
tournament = await Tournament.create(name="Tournament")
await EventTwo.create(name="Event", tournament_id=tournament.id)
with self.assertRaises(OperationalError):
await self.db.execute_query("SELECT * FROM eventtwo")
results = await self.second_db.execute_query("SELECT * FROM eventtwo")
self.assertEqual(dict(results[0].items()), {"id": 1, "name": "Event", "tournament_id": 1})
async def test_two_databases_relation(self):
tournament = await Tournament.create(name="Tournament")
event = await EventTwo.create(name="Event", tournament_id=tournament.id)
with self.assertRaises(OperationalError):
await self.db.execute_query("SELECT * FROM eventtwo")
results = await self.second_db.execute_query("SELECT * FROM eventtwo")
self.assertEqual(dict(results[0].items()), {"id": 1, "name": "Event", "tournament_id": 1})
teams = []
for i in range(2):
team = await TeamTwo.create(name="Team {}".format(i + 1))
teams.append(team)
await event.participants.add(team)
self.assertEqual(await TeamTwo.all().order_by("name"), teams)
self.assertEqual(await event.participants.all().order_by("name"), teams)
self.assertEqual(
await TeamTwo.all().order_by("name").values("id", "name"),
[{"id": 1, "name": "Team 1"}, {"id": 2, "name": "Team 2"}],
)
self.assertEqual(
await event.participants.all().order_by("name").values("id", "name"),
[{"id": 1, "name": "Team 1"}, {"id": 2, "name": "Team 2"}],
)
async def test_two_databases_transactions_switch_db(self):
async with in_transaction("models"):
tournament = await Tournament.create(name="Tournament")
await Event.create(name="Event1", tournament=tournament)
async with in_transaction("events"):
event = await EventTwo.create(name="Event2", tournament_id=tournament.id)
team = await TeamTwo.create(name="Team 1")
await event.participants.add(team)
saved_tournament = await Tournament.filter(name="Tournament").first()
self.assertEqual(tournament.id, saved_tournament.id)
saved_event = await EventTwo.filter(tournament_id=tournament.id).first()
self.assertEqual(event.id, saved_event.id)
async def test_two_databases_transaction_paramerror(self):
with self.assertRaisesRegex(
ParamsError,
"You are running with multiple databases, so you should specify connection_name",
):
await start_transaction()
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.