content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from Instrucciones.Instruccion import Instruccion
from Instrucciones.Declaracion import Declaracion
from Expresion.Terminal import Terminal
from Tipo import Tipo
class Procedure(Instruccion):
def __init__(self,nombre,params,instrucciones):
self.nombre=nombre
self.params=params
self.instrucciones=instrucciones
def ejecutar(self, ent):
''
def traducir(self,ent):
'traduccion proc'
nl=ent.newlabel()
cad='goto ' + nl+'\n'
cad+='label '+ent.newlabel('p_'+self.nombre)+'\n'
cont=0
lenparams=0
if self.params != None:
lenparams=len(self.params)
for i in range(0,lenparams):
val='stack['+str(i)+']'
term=Terminal(Tipo('staesqck',None,-1,-1),val)
d=Declaracion(self.params[i].nombre,False,self.params[i].tipo,term)
c3d=d.traducir(ent).codigo3d
cad+=c3d
cont=i
if self.instrucciones!=None:
for inst in self.instrucciones:
if inst !=None:
c3d= inst.traducir(ent).codigo3d
cad+=c3d
cad+='stack=[]\n'
cad+='goto temp\n'
cad+='label ' +nl+'\n'
self.codigo3d=cad
return self
class Parametro():
def __init__(self,nombre,modo,tipo):
self.nombre=nombre
self.modo=modo
self.tipo=tipo
| python |
"""
This example shows how to upload a model with customized csv schedules
Put all the relevant schedules under one folder
and then add the folder directory to the add_files parameter.
"""
import BuildSimHubAPI as bshapi
import BuildSimHubAPI.postprocess as pp
bsh = bshapi.BuildSimHubAPIClient()
project_api_key = 'f98aadb3-254f-428d-a321-82a6e4b9424c'
# 1. define the absolute directory of your energy model
file_dir = '/Users/weilixu/Desktop/data/schedule/5ZoneTDV.idf'
wea_dir = "/Users/weilixu/Desktop/data/jsontest/in.epw"
new_sj = bsh.new_simulation_job(project_api_key)
results = new_sj.run(file_dir=file_dir, epw_dir=wea_dir,
add_files='/Users/weilixu/Desktop/data/schedule/csv', track=True)
if results:
load_data = results.zone_load()
print(load_data)
zl = pp.ZoneLoad(load_data)
print(zl.pandas_df())
| python |
# Generated by Django 3.2.11 on 2022-01-12 08:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('index', '0004_auto_20201221_1213'),
]
operations = [
migrations.AlterField(
model_name='broadcast',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='feedbackreport',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='term',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='track',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='useracceptedterms',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='usermeta',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| python |
from pyrosm.data_manager import get_osm_data
from pyrosm.frames import prepare_geodataframe
import warnings
def get_network_data(node_coordinates, way_records, tags_as_columns,
network_filter, bounding_box):
# Tags to keep as separate columns
tags_as_columns += ["id", "nodes", "timestamp", "changeset", "version"]
# Call signature for fetching network data
nodes, ways, relation_ways, relations = get_osm_data(node_arrays=None,
way_records=way_records,
relations=None,
tags_as_columns=tags_as_columns,
data_filter=network_filter,
filter_type="exclude",
# Keep only records having 'highway' tag
osm_keys="highway",
)
# If there weren't any data, return empty GeoDataFrame
if ways is None:
warnings.warn("Could not find any buildings for given area.",
UserWarning,
stacklevel=2)
return None
# Prepare GeoDataFrame
gdf = prepare_geodataframe(nodes, node_coordinates, ways,
relations, relation_ways,
tags_as_columns, bounding_box)
return gdf
| python |
class UnionFind(object):
def __init__(self, n):
self.u = list(range(n))
def union(self, a, b):
ra, rb = self.find(a), self.find(b)
if ra != rb:
self.u[ra] = rb
def find(self, a):
while self.u[a] != a:
a = self.u[a]
return a
class Solution(object):
def findCircleNum(self, M):
if not M:
return 0
s = len(M)
uf = UnionFind(s)
for r in range(s):
for c in range(r, s):
if M[r][c] == 1:
uf.union(r, c)
return len(set([uf.find(i) for i in range(s)]))
'''
Ideas/thoughts:
sanity check is ,if len is empty, just return 0 friend groups.
Iterating thru the each person frnd list, go to check upto len of frnd list , It would be a square matrix.
The two important functions union and find, union will add elements and find will check element.
''' | python |
for _ in range(int(input())):
n = int(input())
s = input()
alpha = set(s)
ans = n
countImpossible = 0
for i in alpha:
curr = 0
lb, ub = 0, n - 1
while lb < ub:
if s[lb] == s[ub]:
lb += 1
ub -= 1
continue
else:
if s[lb] == i:
lb += 1
curr += 1
continue
elif s[ub] == i:
ub -= 1
curr += 1
continue
else:
curr = n + 1
lb += 1
ub -= 1
continue
dup = s
dup = dup.replace(i, '')
if dup != dup[::-1]:
countImpossible += 1
ans = min(ans, curr)
if countImpossible == len(alpha):
ans = -1
print(ans) | python |
#!/usr/bin/env python
# Copyright (c) 2020, Xiaotian Derrick Yang
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Package build and install script."""
from setuptools import find_packages, setup
def get_readme():
"""Load README.rst for display on PyPI."""
with open("README.md") as f:
return f.read()
setup(
name="quanbit",
version="0.0.1",
description="Python library for simulating quantum computor and algorithm.",
long_description=get_readme(),
long_description_content_type="text/markdown",
author="Xiaotian Derrick Yang",
author_email="[email protected]",
url="https://github.com/tczorro/quanbit",
package_dir={"": "src"},
packages=find_packages(where="src"),
zip_safe=False,
python_requires=">=3.6",
install_requires=["numpy>=1.16",],
keywords=["Quantum Computing", "Quantum Algorithm"],
classifiers=[
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Description
"""
import torch
from scipy import stats
from ptranking.metric.adhoc_metric import torch_ap_at_ks, torch_nDCG_at_ks, torch_kendall_tau, torch_nerr_at_ks
def test_ap():
''' todo-as-note: the denominator should be carefully checked when using AP@k '''
# here we assume that there five relevant documents, but the system just retrieves three of them
sys_sorted_labels = torch.Tensor([1.0, 0.0, 1.0, 0.0, 1.0])
std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 1.0, 1.0])
ap_at_ks = torch_ap_at_ks(sys_sorted_labels, std_sorted_labels, ks=[1, 3, 5])
print(ap_at_ks) # tensor([1.0000, 0.5556, 0.4533])
sys_sorted_labels = torch.Tensor([1.0, 0.0, 1.0, 0.0, 1.0])
std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 0.0, 0.0])
ap_at_ks = torch_ap_at_ks(sys_sorted_labels, std_sorted_labels, ks=[1, 3, 5])
print(ap_at_ks) # tensor([1.0000, 0.5556, 0.7556])
# here we assume that there four relevant documents, the system just retrieves four of them
sys_sorted_labels = torch.Tensor([1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0])
std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
ap_at_ks = torch_ap_at_ks(sys_sorted_labels, std_sorted_labels, ks=[1, 2, 3, 5, 7])
print(ap_at_ks) # tensor([1.0000, 1.0000, 0.6667, 0.6875, 0.8304])
def test_ndcg():
sys_sorted_labels = torch.Tensor([1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 1.0])
std_sorted_labels = torch.Tensor([1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0])
ndcg_at_ks = torch_nDCG_at_ks(sys_sorted_labels, std_sorted_labels, ks=[1, 2, 3, 4, 5, 6, 7])
print(ndcg_at_ks) # tensor([1.0000, 1.0000, 0.7654, 0.8048, 0.8048, 0.8048, 0.9349])
def test_nerr():
sys_sorted_labels = torch.Tensor([3.0, 2.0, 4.0])
std_sorted_labels = torch.Tensor([4.0, 3.0, 2.0])
nerr_at_ks = torch_nerr_at_ks(sys_sorted_labels, std_sorted_labels, ks=[1, 2, 3])
print(nerr_at_ks) # tensor([0.4667, 0.5154, 0.6640])
def test_kendall_tau():
reference = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0])
sys_1 = torch.Tensor([2.0, 1.0, 5.0, 3.0, 4.0, 6.0, 7.0, 9.0, 8.0, 10.0])
sys_2 = torch.Tensor([10.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 1.0])
tau_1 = torch_kendall_tau(sys_1, natural_ascending_as_reference=True)
print('tau_1', tau_1)
tau_2 = torch_kendall_tau(sys_2, natural_ascending_as_reference=True)
print('tau_2', tau_2)
tau, p = stats.kendalltau(reference.data.data.numpy(), sys_1)
print('scipy-1', tau, p)
tau, p = stats.kendalltau(reference.data.numpy(), sys_2)
print('scipy-2', tau, p)
print()
print('-----------------------')
res_reference, _ = torch.sort(reference, dim=0, descending=True)
tau_1 = torch_kendall_tau(sys_1, natural_ascending_as_reference=False)
print('tau_1', tau_1)
tau_2 = torch_kendall_tau(sys_2, natural_ascending_as_reference=False)
print('tau_2', tau_2)
tau, p = stats.kendalltau(res_reference.data.numpy(), sys_1)
print('scipy-1', tau, p)
tau, p = stats.kendalltau(res_reference.data.numpy(), sys_2)
print('scipy-2', tau, p)
if __name__ == '__main__':
#1
#test_ap()
#2
test_nerr()
#3
#test_kendall_tau()
| python |
import logging
from kubernetes import client
from kubernetes.client import V1beta1CustomResourceDefinition, V1ObjectMeta, V1beta1CustomResourceDefinitionSpec, \
V1Deployment, V1DeploymentSpec, V1LabelSelector, V1PodTemplateSpec, V1PodSpec, V1Service, V1ServiceSpec, \
V1ServicePort, V1DeleteOptions, V1PersistentVolumeClaim, V1PersistentVolumeClaimSpec, V1ResourceRequirements
from nifi_web.models import K8sCluster
logger = logging.getLogger(__name__)
def auth_gcloud_k8s(credentials):
c = K8sCluster.objects.get(id=1)
configuration = client.Configuration()
configuration.host = f"https://{c.endpoint}:443"
configuration.verify_ssl = False
configuration.api_key = {"authorization": "Bearer " + credentials.token}
client.Configuration.set_default(configuration)
def ensure_custom_object(api: client.CustomObjectsApi, custom_object, group, plural, version, namespace, name):
if len(api.list_namespaced_custom_object(namespace=namespace,
field_selector=f'metadata.name={name}', group=group,
plural=plural, version=version)['items']) == 0:
logger.info(f'creating custom object: {namespace}/{name}')
api.create_namespaced_custom_object(
body=custom_object,
namespace=namespace,
group=group,
plural=plural,
version=version
)
else:
logger.info(f'custom object exists: {namespace}/{name}')
def destroy_custom_object(api: client.CustomObjectsApi, group, plural, version, namespace, name):
if len(api.list_namespaced_custom_object(namespace=namespace,
field_selector=f'metadata.name={name}', group=group,
plural=plural, version=version)['items']) == 1:
logger.info(f'destroying custom object: {namespace}/{name}')
api.delete_namespaced_custom_object(
namespace=namespace,
group=group,
plural=plural,
version=version,
name=name,
body=V1DeleteOptions()
)
else:
logger.info(f'cannot find custom object to destroy: {namespace}/{name}')
def ensure_deployment(api: client.AppsV1Api, deployment, namespace, name):
if len(api.list_namespaced_deployment(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating Deployment: {namespace}/{name}')
api.create_namespaced_deployment(
body=deployment,
namespace=namespace
)
else:
logger.info(f'Deployment exists: {namespace}/{name}')
def ensure_namespace(api: client.CoreV1Api, namespace):
if len(api.list_namespace(field_selector=f'metadata.name={namespace}').items) == 0:
logger.info(f'creating namespace: {namespace}')
body = client.V1Namespace(
metadata=V1ObjectMeta(name=namespace)
)
api.create_namespace(
body=body
)
else:
logger.info(f'namespace exists: {namespace}')
def ensure_statefulset(api: client.AppsV1Api, stateful_set, namespace, name):
if len(api.list_namespaced_stateful_set(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating StatefulSet: {namespace}/{name}')
api.create_namespaced_stateful_set(
body=stateful_set,
namespace=namespace
)
else:
logger.info(f'StatefulSet exists: {namespace}/{name}')
def destroy_deployment(api: client.AppsV1Api, namespace, name):
if len(api.list_namespaced_deployment(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 1:
logger.info(f'destroying Deployment: {namespace}/{name}')
api.delete_namespaced_deployment(
name=name,
namespace=namespace
)
else:
logger.info(f'cannot find Deployment to destroy: {namespace}/{name}')
def destroy_statefulset(api: client.AppsV1Api, core_api: client.CoreV1Api, namespace, name):
for pvc in core_api.list_namespaced_persistent_volume_claim(namespace=namespace,
label_selector=f'app={name}').items:
core_api.delete_namespaced_persistent_volume_claim(
name=pvc.metadata.name,
namespace=namespace
)
if len(api.list_namespaced_stateful_set(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 1:
logger.info(f'destroying StatefulSet: {namespace}/{name}')
api.delete_namespaced_stateful_set(
name=name,
namespace=namespace
)
else:
logger.info(f'cannot find StatefulSet to destroy: {namespace}/{name}')
def ensure_service(api: client.CoreV1Api, service, namespace, name):
if len(api.list_namespaced_service(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating Service: {namespace}/{name}')
api.create_namespaced_service(
body=service,
namespace=namespace
)
else:
logger.info(f'Service exists: {namespace}/{name}')
def destroy_service(api: client.CoreV1Api, namespace, name):
if len(api.list_namespaced_service(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 1:
logger.info(f'destroying Service: {namespace}/{name}')
api.delete_namespaced_service(
name=name,
namespace=namespace
)
else:
logger.info(f'cannot find Service to destroy: {namespace}/{name}')
def destroy_namespace(api: client.CoreV1Api, name):
if len(api.list_namespace(field_selector=f'metadata.name={name}').items) == 1:
logger.info(f'destroying namespace: {name}')
api.delete_namespace(
name=name
)
else:
logger.info(f'cannot find namespace to destroy: {name}')
def ensure_service_account(api: client.CoreV1Api, account, name, namespace):
if len(api.list_namespaced_service_account(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating ServiceAccount: {name}')
api.create_namespaced_service_account(
namespace=namespace,
body=account
)
else:
logger.info(f'ServiceAccount exists: {name}')
def ensure_secret(api: client.CoreV1Api, secret, name, namespace):
if len(api.list_namespaced_secret(namespace=namespace,
field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating secret: {name}')
api.create_namespaced_secret(
namespace=namespace,
body=secret
)
else:
logger.info(f'secret exists: {name}')
def ensure_role(api: client.RbacAuthorizationV1beta1Api, role, name):
if len(api.list_cluster_role(field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating ClusterRole: {name}')
api.create_cluster_role(role)
else:
logger.info(f'ClusterRole exists: {name}')
def ensure_role_binding(api: client.RbacAuthorizationV1beta1Api, role_binding, name):
if len(api.list_cluster_role_binding(field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating ClusterRoleBinding: {name}')
api.create_cluster_role_binding(role_binding)
else:
logger.info(f'ClusterRoleBinding exists: {name}')
def ensure_storage_class(api: client.StorageV1Api, cls, name):
if len(api.list_storage_class(field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating StorageClass: {name}')
api.create_storage_class(cls)
else:
logger.info(f'StorageClass exists: {name}')
def ensure_crd(api, name, group, kind, plural, singular, scope):
if len(api.list_custom_resource_definition(field_selector=f'metadata.name={name}').items) == 0:
logger.info(f'creating CustomResourceDefinition: {name}')
try:
api.create_custom_resource_definition(V1beta1CustomResourceDefinition(
api_version='apiextensions.k8s.io/v1beta1',
kind='CustomResourceDefinition',
metadata=V1ObjectMeta(name=name),
spec=V1beta1CustomResourceDefinitionSpec(
group=group,
version='v1alpha1',
names={
'kind': kind,
'plural': plural,
'singular': singular
},
scope=scope
),
))
except ValueError:
# unforunate workaround due to client library bug
# https://github.com/kubernetes-client/python/issues/415
logger.warning(f'swallowed ValueError when creating CRD {name} to workaround API client issue')
pass
else:
logger.info(f'CustomResourceDefinition exists: {name}')
def ensure_single_container_deployment(api_apps_v1, container, name, namespace, replicas=1):
ensure_deployment(
api=api_apps_v1,
deployment=V1Deployment(
api_version="apps/v1",
metadata=V1ObjectMeta(
name=name,
labels={'app': name}
),
spec=V1DeploymentSpec(
replicas=replicas,
selector=V1LabelSelector(
match_labels={'app': name}
),
template=V1PodTemplateSpec(
metadata=V1ObjectMeta(
name=name,
labels={'app': name}
),
spec=V1PodSpec(
containers=[
container
]
)
)
)
),
name=name,
namespace=namespace
)
def ensure_ingress_routed_svc(api_core_v1: client.CoreV1Api,
api_custom: client.CustomObjectsApi,
domain,
hostname,
name,
target_name,
namespace,
port_name,
svc_port,
target_port):
ensure_service(
api=api_core_v1,
service=V1Service(
api_version="v1",
metadata=V1ObjectMeta(
name=name
),
spec=V1ServiceSpec(
type='ClusterIP',
ports=[
V1ServicePort(
protocol='TCP',
port=svc_port,
name=port_name,
target_port=target_port
),
],
selector={
'app': target_name
}
)
),
name=name,
namespace=namespace
)
ensure_custom_object(
api=api_custom,
custom_object={
'apiVersion': 'traefik.containo.us/v1alpha1',
'kind': 'IngressRoute',
'metadata': {
'name': name,
},
'spec': {
'entryPoints': [
'websecure'
],
'routes': [
{
'match': f'Host(`{hostname}.{domain}`)',
'kind': 'Rule',
'services': [
{
'name': name,
'port': svc_port
}
],
'middlewares': [
{
'name': 'traefik-forward-auth',
'namespace': 'default'
}
]
}
],
'tls': {
'certResolver': 'default'
}
}
},
group='traefik.containo.us',
plural='ingressroutes',
version='v1alpha1',
name=hostname,
namespace=namespace
)
def destroy_ingress_routed_svc(api_core_v1, api_custom, name, namespace):
destroy_service(
api=api_core_v1,
name=name,
namespace=namespace
)
destroy_custom_object(
api=api_custom,
group='traefik.containo.us',
plural='ingressroutes',
version='v1alpha1',
name=name,
namespace=namespace
)
def ensure_statefulset_with_containers(api_apps_v1,
name,
namespace,
containers,
volume_paths,
replicas=1,
init_containers=None,
volumes=None):
if volumes is None:
volumes = []
if init_containers is None:
init_containers = []
volume_claim_templates = [V1PersistentVolumeClaim(
metadata=V1ObjectMeta(
name=path[0]
),
spec=V1PersistentVolumeClaimSpec(
access_modes=['ReadWriteOnce'],
resources=V1ResourceRequirements(
requests={
'storage': path[2]
}
),
storage_class_name=path[3]
)
) for path in volume_paths]
ss = client.V1StatefulSet(
api_version="apps/v1",
kind="StatefulSet",
metadata=client.V1ObjectMeta(
name=name,
labels={'app': name}
),
spec=client.V1StatefulSetSpec(
replicas=replicas,
service_name=name,
template=V1PodTemplateSpec(
metadata=V1ObjectMeta(labels={"app": name}),
spec=V1PodSpec(
containers=containers,
volumes=volumes,
init_containers=init_containers
)
),
selector={'matchLabels': {'app': name}},
volume_claim_templates=volume_claim_templates
)
)
ensure_statefulset(
api_apps_v1,
stateful_set=ss,
namespace=namespace,
name=name
)
| python |
# Generated by Django 2.1.5 on 2019-01-28 03:31
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('configurations', '0017_d3mconfiguration_description'),
]
operations = [
migrations.AddField(
model_name='d3mconfiguration',
name='env_values',
field=jsonfield.fields.JSONField(blank=True, help_text='D3M env values for running Docker TA2s'),
),
]
| python |
from agrirouter.auth.enums import BaseEnum
class CertificateTypes(BaseEnum):
PEM = "PEM"
P12 = "P12"
class GateWays(BaseEnum):
MQTT = "2"
REST = "3"
| python |
"""yaml templates for DataFrame plotting."""
from os.path import (join, dirname)
import yaml
_filename = join(dirname(__file__), 'palette.yaml')
with open(_filename, 'r') as f:
lineplot_dict = yaml.load(f, Loader=yaml.SafeLoader)
style_overide = lineplot_dict.pop('style_overide', {})
__all__ = ['lineplot_dict', 'style_overide']
| python |
#!/usr/bin/env python3
# Importation des librairies TM1637 et time
from tm1637 import TM1637
from time import sleep
# Stockage de la duree dans des variables
print("- Duree du minuteur -")
minutes = int(input("Minutes : "))
secondes = int(input("Secondes : "))
print("- Demarage du minuteur : " + str(minutes) + ":" + str(secondes) + " -")
# Initialisation de l'afficheur
afficheur = TM1637(clk=23, dio=24)
# Definition de la luminosite (0-7)
afficheur.brightness(1)
# Affichage du temps du minuteur sur le module avant demarage
# .numbers(x, y) : Affiche x sur les deux premiers 7 segments et y sur les deux suivants
# -10 < x(resp. y) < 100
afficheur.numbers(minutes, secondes)
# Boucle du minuteur
i = minutes
j = secondes
while i >= 0:
while j >= 0:
afficheur.numbers(i, j)
sleep(1)
j -= 1
i -= 1
j = 59
print("- Temps ecoule ! -")
# Animation de fin : on fait clignoter 00:00
for n in range(0, 20):
afficheur.brightness(0)
sleep(0.25)
afficheur.brightness(7)
sleep(0.25)
| python |
from unittest import TestCase
from daily_solutions.year_2020.day_5 import parse_seat_id
class Day5TestCase(TestCase):
def test_parse_row_column(self) -> None:
self.assertEqual(567, parse_seat_id("BFFFBBFRRR"))
| python |
from flask_wtf import FlaskForm
from wtforms import (
widgets,
HiddenField,
BooleanField,
TextField,
PasswordField,
SubmitField,
SelectField,
SelectMultipleField,
DateTimeField,
)
from wtforms.validators import Email, Length, Required, EqualTo, Optional
day_map = {
"0": "Mon",
"1": "Tue",
"2": "Wed",
"3": "Thu",
"4": "Fri",
"5": "Sat",
"6": "Sun",
}
class Login(FlaskForm):
# form to login users; subclass of base form class
email = TextField("Email", [Required(), Email(), Length(min=4, max=50)])
pwd = PasswordField("Password", [Required(), Length(min=6, max=25)])
remember_me = BooleanField(default=True)
submit = SubmitField("Login")
class Register(Login):
# form to register users; subclass of login plus confirm
confirm = PasswordField(
"Confirm Password",
[
Required(),
Length(min=6, max=25),
EqualTo("pwd", message="Passwords must match"),
],
)
submit = SubmitField("Register")
class MultiCheckbox(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class Pattern(FlaskForm):
# required fields
path = SelectField("Path")
pattern = TextField("Pattern", [Required(), Length(min=1, max=255)])
name = TextField("Name", [Required(), Length(min=1, max=255)])
# scheduling fields: recipients, time, and days
recipients = TextField("Recipients", [Optional(), Length(max=255)])
time = DateTimeField("Time", [Optional()], format="%H:%M")
# create sorted list of days to choose
choices = [(k, v) for k, v in sorted(day_map.items())]
days = MultiCheckbox("Days", [Optional()], choices=choices)
# hidden field for pattern_id
pattern_id = HiddenField("pattern_id", [Optional()])
# create two submit fields
save = SubmitField("Save")
delete = SubmitField("Delete")
| python |
# File: __init__.py
# Aim: Package initial
# Package version: 1.0
# %%
from .defines import Config
CONFIG = Config()
# CONFIG.reload_logger(name='develop')
# %%
| python |
from dataclasses import dataclass
from enum import Enum
class TokenEnum(Enum):
LPAREN = 0
RPAREN = 1
NUMBER = 2
PLUS = 3
MINUS = 4
MULTIPLY = 5
DIVIDE = 6
INTEGRAL_DIVIDE = 7
EXPONENTIAL = 8
@dataclass
class Token:
type: TokenEnum
val: any = None
def __repr__(self):
if self.val != None:
return self.type.name + f":{self.val}"
else:
return ""
| python |
#-------------------------------------------------------------------------------
import collections
import copy
import warnings
import inspect
import logging
import math
#-------------------------------------------------------------------------------
class MintError(Exception): pass
class MintIndexError(MintError): pass
class MintValueError(MintError): pass
class MintConnectionError(MintError): pass
class MintModelDoesNotExist(MintError): pass
#-------------------------------------------------------------------------------
class Dir:
I = 'input'
O = 'output'
IO = 'inout'
ANY = '_any_dir_'
class Default:
port_dir = Dir.ANY
scalar_port_template = '{I}_{n}'
vector_port_template = '{i}_{n}'
net_template = '{I}_{n}'
net_template = '{I}_{n}'
#-------------------------------------------------------------------------------
class Net(object):
""" Base class for net types. """
def _handle_cmp_ops(self, other, op, dir):
if isinstance(other, ModInstBase):
other.bind_net(self, dir=dir)
return True
raise TypeError("unsupported operand type(s) for %s: '%s' and '%s'" %
(op, type(self), type(other)))
def __ne__(self, other):
return self._handle_cmp_ops(other, '<>', Dir.IO)
def __gt__(self, other):
return self._handle_cmp_ops(other, '>', Dir.I)
def __lt__(self, other):
return self._handle_cmp_ops(other, '<', Dir.O)
def __mul__(self, other):
if isinstance(other, int):
clones = []
for i in range(other):
clone = copy.copy(self)
clone.parent = clone
clones.append(clone)
return clones
else:
return NotImplemented
def __rmul__(self, other):
return self.__mul__(other)
class Wire(Net):
def __init__(self, name=None, size=None, indices=None, parent=None):
"""
Initialize the Wire instance.
- name = base name for the wire
- size = None for scalar, int for vector.
- indices = tuple of indices, but size takes precedence if defined.
- parent points to parent wire for slices.
"""
self._name = name
if size is not None:
self.indices = tuple(range(size))
else:
self.indices = indices # 'None' for scalar
self.parent = parent or self
# Template used for full/formatted name
self.template = "{name}"
def __call__(self, name=None):
"""
Additional initializations for the Wire instance.
- name = base name for the wire
"""
self.name = name or self.name
return self
@property
def name(self):
return self._name or self.parent._name
@name.setter
def name(self, val):
self._name = val
@property
def fname(self):
""" Return full/formatted name """
return self.template.format(name=self.name)
def formatted_repr(self, fmt0="{name}",
fmt1="{name}[{index}]",
fmt2="{name}[{index}]"):
""" Return formatted representation
- fmt0 : format for scalars
- fmt1 : format for 1 bit vectors
- fmt2 : format for >= 2 bit vectors
Following replacement strings can be specified:
- name, index, msb, lsb
"""
name = self.fname
#name = self.name.format(**kwargs)
if self.indices is None:
index = msb = lsb = ''
return fmt0.format(name=name, index=index, msb=msb, lsb=lsb)
elif len(self.indices) == 1:
index = self.indices[0]
msb = lsb = index
return fmt1.format(name=name, index=index, msb=msb, lsb=lsb)
else:
lsb = self.indices[0]
msb = self.indices[-1]
index = "%s:%s" % (msb, lsb)
return fmt2.format(name=name, index=index, msb=msb, lsb=lsb)
def __getitem__(self, key):
""" Verilog like indexing syntax is used:
[index] => python [index]
[msb:lsb] => python [lsb:msb+1]
"""
if self.indices is None:
raise MintIndexError("scalar wire is not indexable")
valid_range = range(len(self.indices))
if isinstance(key, int):
if key not in valid_range:
raise MintIndexError("wire index out of range")
indices = (self.indices[key],)
elif isinstance(key, slice):
msb, lsb, step = key.start, key.stop, key.step
if msb is None: msb = valid_range[-1]
if lsb is None: lsb = valid_range[0]
if msb not in valid_range or lsb not in valid_range:
raise MintIndexError("wire index out of range")
if msb < lsb:
raise MintIndexError("msb less than lsb")
indices = self.indices[lsb : msb + 1 : step]
return Wire(indices=indices, parent=self.parent)
def __len__(self):
if self.indices is None:
return 1
else:
return len(self.indices)
def __repr__(self):
return "Wire(%s)" % self.formatted_repr()
class Const(Net):
def __init__(self, size, val, fmt='hex'):
self.size = size
if val < 0 or val >= 2**size:
raise MintValueError("constant value out of range")
self.val = val
self.fmt = fmt
#@property
#def name(self):
# return self.formatted_repr()
def formatted_repr(self, fmt=None):
fmt = fmt or self.fmt
if fmt == 'bin':
return "{size}'b{0:0>{width}b}".format(self.val, size=self.size,
width=self.size)
elif fmt == 'hex':
width = int(math.ceil(self.size/4))
return "{size}'h{0:0>{width}x}".format(self.val, size=self.size,
width=width)
else:
return "{size}'d{0}".format(self.val, size=self.size)
def __len__(self):
return self.size
def __repr__(self):
return "Const(%s)" % self.formatted_repr()
class Concat(Net):
def __init__(self, nets):
self.nets = nets
#@property
#def name(self):
# return self.formatted_repr()
@property
def wires(self):
return [wire for wire in self.nets if isinstance(wire, Wire)]
def formatted_repr(self):
return "{%s}" % ', '.join([net.formatted_repr() for net in self.nets])
def __len__(self):
size = 0
for net in self.nets:
size += len(net)
return size
def __repr__(self):
return "Concat(%s)" % self.formatted_repr()
#-------------------------------------------------------------------------------
class InstBase(object):
def __div__(self, other):
" Supports inst_exp/template expressions "
if isinstance(other, str):
templatized = self.templatize(other)
else:
raise TypeError('unsupported operand type(s) for /: %s and %s' %
(type(self), type(other)))
return templatized
class InstScalar(InstBase):
def __init__(self, name=None, index=None):
self.name = name
# This would be set if part of a vector
self.index = index
# Set by obj/template expression.
self.template = None
# Which model to build
self.model = None
# Set to True if this instance is a port
self.isport = False
def formatted_repr(self, fmt0="{name}",
fmt1="{name}[{index}]"):
""" Return formatted representation
- fmt0 : format for scalars
- fmt1 : format for 1 bit vectors (part of vector)
Following replacement strings can be specified:
- name, index
"""
if self.index is None:
return fmt0.format(name=self.name, index=self.index)
else:
return fmt1.format(name=self.name, index=self.index)
def __iter__(self):
return iter([self])
def __len__(self):
return 1
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__, self.formatted_repr(),
self.template)
class InstList(InstBase):
def __init__(self, inst_scalars, name=None):
self.scalars = []
index = 0
for inst_scalar in inst_scalars:
inst_scalar.index = index
index += 1
self.scalars.append(inst_scalar)
self._name = name
# Set by obj/template expression.
self.template = None
# Which model to build
self._model = None
# Set to True if this instance is a port
self.isport = False
@property
def name(self):
# Confirm all scalars have same name
assert all(self._name == scalar.name for scalar in self),\
"all scalars should have same name: %s" % self
return self._name
@name.setter
def name(self, value):
self._name = value
for scalar in self.scalars:
scalar.name = value
#@property
#def template(self):
# return self._template
@property
def model(self):
# Confirm all scalars have same model
assert all(self._model == scalar.model for scalar in self),\
"all scalars should have same model: %s" % self
return self._model
@model.setter
def model(self, value):
for scalar in self.scalars:
scalar.model = value
self._model = value
def make(self, model=None):
self.model = model or self.model
for scalar in self:
scalar.make(self.model)
def __getitem__(self, key):
""" Verilog like indexing syntax is used:
[index] => python [index]
[msb:lsb] => python [lsb:msb+1]
"""
valid_range = range(len(self.scalars))
if isinstance(key, int):
if key not in valid_range:
raise MintIndexError("inst index out of range")
return self.scalars[key]
elif isinstance(key, slice):
msb, lsb, step = key.start, key.stop, key.step
if msb is None: msb = valid_range[-1]
if lsb is None: lsb = valid_range[0]
if msb not in valid_range or lsb not in valid_range:
raise MintIndexError("inst index out of range")
if msb < lsb:
raise MintIndexError("msb less than lsb")
sliced = copy.copy(self)
sliced.scalars = self.scalars[lsb : msb + 1 : step]
return sliced
def __iter__(self):
return iter(self.scalars)
def __len__(self):
return len(self.scalars)
def __contains__(self, value):
return value in self.scalars
def __repr__(self):
#r = "InstList("
r = "%s(%s)[" % (self.__class__.__name__, self.name)
for i, e in enumerate(self.scalars):
if i: r += ', ' + str(e)
else: r += str(e)
r += "]"
return r
#-------------------------------------------------------------------------------
class ModInstBase(object):
def _handle_cmp_ops(self, other, op, dir):
if isinstance(other, IntfInstBase):
self.bind_intf(other, modport=0, dir_filter=dir)
return True
if isinstance(other, Net):
self.bind_net(other, dir=dir)
return True
raise TypeError("unsupported operand type(s) for %s: '%s' and '%s'" %
(op, type(self), type(other)))
def __eq__(self, other):
return self._handle_cmp_ops(other, '==', Dir.ANY)
def __ne__(self, other):
return self._handle_cmp_ops(other, '<>', Dir.IO)
def __gt__(self, other):
return self._handle_cmp_ops(other, '>', Dir.O)
def __lt__(self, other):
return self._handle_cmp_ops(other, '<', Dir.I)
class ModInstScalar(InstScalar, ModInstBase):
# InsGen.__getattr__ expects "obj" (module in this case) as first arg
def __init__(self, module, name=None, index=None):
super(ModInstScalar, self).__init__(name, index)
self.module = module
# Bind relationships with interfaces represented as Interface Pins
self.intfpins = []
# Bind relationships with wires represented as Pins
self.pins = []
def templatize(self, template):
# Important - we make a copy, not a deepcopy. This ensures that the
# copy's instance variables point to the same object as the original
templatized = copy.copy(self)
templatized.template = template
return templatized
def bind_intf(self, intfinst, modport, dir_filter):
for intfinst_scalar in intfinst:
intfpin = IntfPin(modinst=self, intfinst=intfinst_scalar,
modport=modport, dir_filter=dir_filter,
template=self.template)
#print 'IntfPin:', intfpin
self.intfpins.append(intfpin)
def bind_net(self, net, dir):
pin = Pin(dir=dir, inst=self, net=net, name=self.template,
intfinst='_IF_')
self.pins.append(pin)
def make(self, model=None):
self.model = model or self.model
self.module.make(self.model)
def get_pins(self):
pins = []
for intfpin in self.intfpins:
pins += intfpin.get_pins()
pins += self.pins
return pins
def __repr__(self):
return "ModInstScalar(%s, %s, %s)" % (self.formatted_repr(),
self.module.name, self.template)
class ModInstList(InstList, ModInstBase):
def templatize(self, template):
scalars = []
for scalar in self:
scalars += [scalar.templatize(template)]
templatized = copy.copy(self)
templatized.scalars = scalars
templatized.template = template
return templatized
def bind_intf(self, intfinst, modport, dir_filter):
#if len(intfinst) == 1:
if isinstance(intfinst, IntfInstScalar):
# v - s
for modinst_scalar in self:
intfpin = IntfPin(modinst=modinst_scalar, intfinst=intfinst,
modport=modport, dir_filter=dir_filter,
template=self.template)
#print 'IntfPin:', intfpin
modinst_scalar.intfpins.append(intfpin)
else:
# v - v
if len(self) != len(intfinst):
raise MintConnectionError("vector sizes differ: %s(%s), %s(%s)" %
(self, len(self), intfinst, len(intfinst)))
for modinst_scalar, intfinst_scalar in zip(self, intfinst):
intfpin = IntfPin(modinst=modinst_scalar,
intfinst=intfinst_scalar,
modport=modport, dir_filter=dir_filter,
template=self.template)
#print 'IntfPin:', intfpin
modinst_scalar.intfpins.append(intfpin)
def bind_net(self, net, dir):
for modinst_scalar in self:
pin = Pin(dir=dir, inst=modinst_scalar, net=net, name=self.template)
modinst_scalar.pins.append(pin)
#-------------------------------------------------------------------------------
class IntfInstBase(object):
def _handle_cmp_ops(self, other, op, dir_filter):
if isinstance(other, ModInstBase):
other.bind_intf(self, modport=1, dir_filter=dir_filter)
return True
raise TypeError("unsupported operand type(s) for %s: '%s' and '%s'" %
(op, type(self), type(other)))
def __eq__(self, other):
return self._handle_cmp_ops(other, '==', Dir.ANY)
def __ne__(self, other):
return self._handle_cmp_ops(other, '<>', Dir.IO)
def __gt__(self, other):
return self._handle_cmp_ops(other, '>', Dir.I)
def __lt__(self, other):
return self._handle_cmp_ops(other, '<', Dir.O)
class IntfInstScalar(InstScalar, IntfInstBase):
# InsGen.__getattr__ expects "obj" (interface in this case) as first arg
def __init__(self, interface, name=None, index=None):
super(IntfInstScalar, self).__init__(name, index)
self.interface = interface
def templatize(self, template):
self.template = template
return self
def make(self, model=None):
self.model = model or self.model
self.interface.make(self.model)
def __repr__(self):
return "IntfInstScalar(%s, %s, %s)" % (self.formatted_repr(),
self.interface.name, self.template)
class IntfInstList(InstList, IntfInstBase):
def templatize(self, template):
for scalar in self:
scalar.template = template
return self
#-------------------------------------------------------------------------------
class Pin(object):
"""
P = port name, dir
I = inst/modport
N = net
PIN = I.P(N) = inst I has port P that connects to net N
"""
def __init__(self, dir, inst, net, name=None, intfinst=None):
self.dir = dir
self.modinst = inst
self.net = net
# This may be defined by "inst/'name'" expression, else net name
self._name = name
self.intfinst = intfinst
# Template used for full/formatted name
self.template = "{name}"
@property
def name(self):
if self._name:
return self._name
try:
return self.net.name
except AttributeError:
# This will happen if net is a Const or Concat and port name is not
# specified
raise MintConnectionError("port name not specified for '%s' and '%s'" %
(self.inst, self.net))
@name.setter
def name(self, value):
self._name = value
@property
def fname(self):
""" Return full/formatted name """
return self.template.format(name=self.name)
def __repr__(self):
r = '{self.dir}: {self.modinst.name}.{self.fname}({self.net.fname})'
return r.format(self=self)
class IntfPin(object):
"""
Interface Pin binds a modinst to a view/filter of the interface instance
P = port template, dir
I = inst
N = interface inst, modport
PIN = I.P(N) = inst I has port P that connects to net N
"""
def __init__(self, modinst, intfinst, modport, dir_filter, template=None):
self.modinst = modinst
self.intfinst = intfinst
self.modport = modport # this could int(position) or str(name)
self.dir_filter = dir_filter
# This may be defined by "inst/template" expression, else default
self._template = template
#@property
#def name(self):
# return self.intfinst.name # ???
@property
def template(self):
if self._template is not None:
return self._template
else:
if self.modinst.index is None:
return Default.scalar_port_template
else:
return Default.vector_port_template
#@template.setter
#def template(self, value):
# self._template = value
def get_pins(self):
interface = self.intfinst.interface
# TODO: consider replacing with named tuple
if isinstance(self.modport, int):
modport_name = interface.port_at_pos[self.modport]
else:
modport_name = self.modport
modport = interface.module_instances[modport_name]
# Get the pins form the modport that match the direction criteria and
# compute the port and wire names based on naming rules
pins = []
#for pin in modport.pins:
for pin in modport.get_pins():
if self.dir_filter in (Dir.ANY, pin.dir):
i = self.intfinst.name
k = self.intfinst.formatted_repr(fmt0="", fmt1="{index}")
I = self.intfinst.formatted_repr(fmt0="{name}",
fmt1="{name}{index}")
# Inplace pin template change
pin_template = self.template
pin.template = pin_template.format(i=i, k=k, I=I, n='{name}')
# Inplace wire template change
net_template = self.intfinst.template or Default.net_template
if hasattr(pin.net, 'template'):
pin.net.template = net_template.format(i=i, k=k, I=I, n='{name}')
pin.intfinst = I
pins.append(pin)
return pins
def __repr__(self):
r = '{self.dir_filter}: {self.modinst.name}.{self.name}'
r += '({self.intfinst.name}.{self.modport})'
return r.format(self=self)
#-------------------------------------------------------------------------------
class MintObject(object):
def __init__(self, name=None, model=None):
self._name = name or self.__class__.__name__
self.model = model
self.module_instances = collections.OrderedDict()
self.interface_instances = collections.OrderedDict()
self.port_at_pos = []
# TODO add shadow dict for self.intstances
if model:
self.make(model)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def add(self, obj):
if obj.name is None:
raise MintValueError, "obj %s has no name" % obj
if isinstance(obj, ModInstBase):
self.module_instances[obj.name] = obj
elif isinstance(obj, IntfInstBase):
self.interface_instances[obj.name] = obj
def make(self, model):
try:
model_method = getattr(self, model)
except AttributeError:
raise MintModelDoesNotExist("'%s' of '%s'" % (model, self.name))
model_method(self)
def get_module_instances(self, flatten=False):
mod_insts = []
for mod_inst in self.module_instances.values():
if isinstance(mod_inst, ModInstList):
if flatten == True:
for mod_inst_scalar in mod_inst:
mod_insts += [mod_inst_scalar]
else:
mod_insts += [mod_inst]
else:
mod_insts += [mod_inst]
return mod_insts
def get_interface_instances(self, flatten=False):
intf_insts = []
for intf_inst in self.interface_instances.values():
if isinstance(intf_inst, IntfInstList):
if flatten == True:
for intf_inst_scalar in intf_inst:
intf_insts += [intf_inst_scalar]
else:
intf_insts += [intf_inst]
else:
intf_insts += [intf_inst]
return intf_insts
class Module(MintObject): pass
class Interface(MintObject): pass
| python |
"""Remote"""
from os import path
import uuid
import time
import json
import tornado.ioloop
import tornado.websocket
import tornado.web
from models.led_strip import LedStrip
from models.color import Color
strip = LedStrip(14)
def start():
"""animation"""
strip.stop_animation()
print("start_animation")
strip.start_animation()
def stop():
"""stop"""
print("stop animation")
strip.stop_animation()
def change(effects):
"""change"""
strip.remove_all_effects()
for effect in effects:
strip.add_effect_by_name(effect['name'], options=effect['options'])
for key in clients:
print(clients[key].uuid)
clients[key].send_led_strip_info()
json.dump(effects, open("./effect.store", "w"))
clients = {}
class MainHandler(tornado.web.RequestHandler): # pylint: disable=W0223
"""MainHandler"""
def get(self):
"""get"""
file = open("{}/index.html".format(path.dirname(path.abspath(__file__))), "r")
self.write(file.read())
file.close()
class LedStripWebsocket(tornado.websocket.WebSocketHandler): # pylint: disable=W0223
"""LedStripWebsocket"""
def simple_init(self):
""" Initialize Socket """
self.last = time.time()
self.stop = False
self.uuid = uuid.uuid1()
def check_origin(self, origin):
"""check_origin"""
return True
def send_led_strip_info(self):
"""check_origin"""
result = {}
result['ledstrip'] = strip.to_json()
effects = strip.get_effects()
result['effects'] = []
for effect in effects:
result['effects'].append(effect.to_json())
result_json = "{}"
try:
result_json = json.dumps(result)
except Exception as error:
print(error)
self.write_message(u"{}".format(result_json))
def open(self): # pylint: disable=W0221
"""open"""
print("Websocket Opened")
self.simple_init()
clients[self.uuid] = self
self.send_led_strip_info()
self.loop = tornado.ioloop.PeriodicCallback(self.keep_alive, 1000)
self.loop.start()
def keep_alive(self):
"""Keep alive"""
if time.time() - self.last > 10:
self.write_message(u'{"message":"keep Alive"}')
self.last = time.time()
def on_message(self, message):
"""on_message"""
print("LedStripWebsocket")
print(message)
data = json.loads(message)
if data['action'] == 'stop':
stop()
if data['action'] == 'start':
start()
if data['action'] == 'change':
if 'effects' in data:
change(data['effects'])
self.write_message(u'{"message":"Changes done!"}')
def on_close(self):
"""on_close"""
print("Websocket Closed")
try:
self.loop.stop()
del clients[self.uuid]
except KeyError:
print("Could not remove {}".format(self.uuid))
except Exception:
print("Exception {}".format(self.uuid))
def make_app():
"""Make App"""
return tornado.web.Application([
(r"/", MainHandler),
(r"/index", MainHandler),
(r"/index.html", MainHandler),
(r"/ledstrip", LedStripWebsocket),
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
try:
effects = json.load(open("./effect.store", "r"))
change(effects)
except Exception as error:
print('Could not load from file, error: {}',format(error))
strip.add_effect_by_name("rainbow", options={"hue_end": 60})
strip.set_background_color(Color(0,0,0))
start()
try:
tornado.ioloop.IOLoop.current().start()
finally:
stop()
| python |
# -*- coding: utf-8 -*-
from unittest import TestCase
class DataTest(TestCase):
"""Obey the testing goat."""
def test_something(self):
"""
A testing template -- make to update tests.yml if you change the
testing name.
"""
matches = True
expected_matches = True
self.assertEqual(matches, expected_matches)
| python |
from selenium.webdriver.common.by import By
from seleniumpm.webpage import Webpage
from seleniumpm.webelements.textfield import TextField
from seleniumpm.locator import Locator
class GooglePage(Webpage):
"""
This is an Google page that extends SeleniumPM WebPage. This class acts as a container for the different
WebElements on the page that an engineer may want to interact with.
"""
def __init__(self, driver, url=None):
super(GooglePage, self).__init__(driver, url)
self.search_field = TextField(driver, Locator.by_name('q'))
def get_result_links(self):
"""
Returns a list of links from a Google search.
:return: Returns a list of links from a Google search.
"""
links = []
elements = self.driver.find_elements(By.XPATH, "//h3[contains(@class, 'r')]/a")
for element in elements:
links.append(element.get_attribute("href"))
return links
| python |
import unittest
from ArrayQueue import ArrayQueue, Empty
class TestArrayQueue(unittest.TestCase):
def setUp(self):
self.q = ArrayQueue()
self.q.enqueue(1)
self.q.enqueue(2)
self.q.enqueue(3)
def test_instantiation(self):
print('Can create an instance')
self.assertIsInstance(self.q, ArrayQueue)
def test_length_checking(self):
print('Can check the length of the queue')
self.assertEqual(len(self.q), 3)
def test_first_method(self):
print('Can return the first element of the queue')
self.assertEqual(self.q.first(), 1)
def test_enqueue_method(self):
print('Can add elements to the queue')
self.q.enqueue(4)
self.q.enqueue(5)
self.assertEqual(len(self.q), 5)
self.assertEqual(self.q.first(), 1)
def test_dequeue_method(self):
print('Can remove elements from the front of the queue')
self.q.enqueue(4)
self.q.enqueue(5)
self.q.dequeue()
self.assertEqual(self.q.dequeue(), 2)
self.assertEqual(len(self.q), 3)
self.assertEqual(self.q.first(), 3)
def test_is_empty_method(self):
print('Can check if the queue is empty')
self.q.dequeue()
self.q.dequeue()
self.q.dequeue()
self.assertEqual(self.q.is_empty(), True)
def test_exception_raising(self):
print('Can raise exception while performing action(s) on an empty queue')
self.q.dequeue()
self.q.dequeue()
self.q.dequeue()
with self.assertRaises(Empty):
self.q.first()
self.q.dequeue()
if __name__ == '__main__':
unittest.main()
| python |
"""Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
import unittest
from test import support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.__init__, [])
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
self.assertEqual(memio.read(4), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
self.assertEqual(memio.readline(5), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
self.assertEqual(memio.readlines(15), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, '__next__'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, memio.__next__)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertRaises(ValueError, memio.writable)
self.assertRaises(ValueError, memio.readable)
self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explictly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
PickleTestMemIO.__qualname__ = PickleTestMemIO.__name__
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj2.close()
self.assertRaises(ValueError, pickle.dumps, obj2, proto)
del __main__.PickleTestMemIO
class BytesIOMixin:
def test_getbuffer(self):
memio = self.ioclass(b"1234567890")
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
memio.seek(5)
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
# Trying to change the size of the BytesIO while a buffer is exported
# raises a BufferError.
self.assertRaises(BufferError, memio.write, b'x' * 100)
self.assertRaises(BufferError, memio.truncate)
self.assertRaises(BufferError, memio.close)
self.assertFalse(memio.closed)
# Mutating the buffer updates the BytesIO
buf[3:6] = b"abc"
self.assertEqual(bytes(buf), b"123abc7890")
self.assertEqual(memio.getvalue(), b"123abc7890")
# After the buffer gets released, we can resize and close the BytesIO
# again
del buf
support.gc_collect()
memio.truncate()
memio.close()
self.assertRaises(ValueError, memio.getbuffer)
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin,
BytesIOMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array('b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tobytes(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array('b', list(buf))
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(OSError, memio.seek, -1, 1)
self.assertRaises(OSError, memio.seek, 3, 1)
self.assertRaises(OSError, memio.seek, -3, 1)
self.assertRaises(OSError, memio.seek, -1, 2)
self.assertRaises(OSError, memio.seek, 1, 1)
self.assertRaises(OSError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_default(self):
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
memio = self.ioclass()
self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
self.assertEqual(memio.getvalue(), "a\nb\nc\nd")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
self.assertEqual(memio.getvalue(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\n")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
memio = self.ioclass(newline="\n")
self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\nb\r\nc\rd")
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd")
memio = self.ioclass(newline="\r")
self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.readlines(), ["a\r", "b\r", "\r", "c\r", "d"])
self.assertEqual(memio.getvalue(), "a\rb\r\rc\rd")
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
memio.seek(0)
self.assertEqual(memio.readlines(), ["a\r\n", "b\r\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd")
memio = self.ioclass(newline="\r\n")
self.assertEqual(memio.write("a\nb\r\nc\rd"), 8)
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
self.assertEqual(memio.getvalue(), "a\r\nb\r\r\nc\rd")
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
self.assertEqual(memio.getvalue(), "a\nb\n")
def test_newline_argument(self):
self.assertRaises(TypeError, self.ioclass, newline=b"\n")
self.assertRaises(ValueError, self.ioclass, newline="error")
# These should not raise an error
for newline in (None, "", "\n", "\r", "\r\n"):
self.ioclass(newline=newline)
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = str
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
def test_lone_surrogates(self):
# Issue #20424
memio = self.ioclass('\ud800')
self.assertEqual(memio.read(), '\ud800')
memio = self.ioclass()
memio.write('\ud800')
self.assertEqual(memio.getvalue(), '\ud800')
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = str
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
if state[2] is not None:
self.assertIsInstance(state[2], dict)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
basesize = support.calcobjsize('P2nN2Pn')
check = self.check_sizeof
self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
check(io.BytesIO(), basesize )
check(io.BytesIO(b'a'), basesize + 1 + 1 )
check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], str)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
if state[3] is not None:
self.assertIsInstance(state[3], dict)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
| python |
# Generated by Django 3.1.3 on 2022-01-18 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('onlinecourse', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='choice',
name='question_id',
),
migrations.AddField(
model_name='choice',
name='question_id',
field=models.ManyToManyField(related_name='choices', to='onlinecourse.Question'),
),
]
| python |
"""Tools to turn atmospheric profiles into their record representation.
MonoRTM takes gas amounts either as column density in molecules/cm² or
as molecular/volume mixing ratios in molecules/molecules. Internally the
two are separated by checking if the given value is smaller or larger than
one (monortm.f90, lines 421-422). Mixing ratios of all constituents are
relative to dry air.
Conversion between column density and mixing ratio is given by
column density = mixing ratio · dz · p / k / T
The broadening gases in element 8 of record 2.1.2 must always be given as
a column density. I cannot find anywhere in the documentation what these
broadening gases are but it seems that they are the nobel gases since the
example profiles have mixing ratios of about 0.009 that are fairly constant
with height.
"""
from monortm.records import (Record21, Record211_IFORM0, Record211_IFORM1,
Record212_first, Record212_other)
# Molecular/Volume mixing ratios
# Source: https://en.wikipedia.org/wiki/Atmosphere_of_Earth#Composition
mixing_ratio_N2 = 0.78084
mixing_ratio_O2 = 0.20946
mixing_ratio_Ar = 0.00934
mixing_ratio_CO2 = 0.00036 # Remaining parts
boltzmann = 1.3806485e-23
avogadro = 6.02214e23
Rdry = 287.
Rwat = 461.5
def layer(zs, ps, Ts, qvap, qliq, IFORM=1):
"""Create the records for an atmospheric layer.
Contains only a minimal set of species. Make sure to set NMOL to 22.
"""
assert IFORM == 0 or IFORM == 1
assert len(zs) == 2
assert len(ps) == 2
assert len(Ts) == 2
dz = zs[1] - zs[0]
assert dz > 0
pave = 0.5 * sum(ps)
Tave = 0.5 * sum(Ts)
Rave = (1-qvap)*Rdry + qvap*Rwat
ρave = 100*pave / Tave / Rave
# Calculate column number density of water from specific humidity
H2O = (qvap # Specific humidity [kg/kg]
* ρave # Density of water vapor → [kg/m³]
/ 0.018 # 0.018 kg of water is 1 mol → [mol/m³]
* avogadro # Number density → [molecules/m³]
* dz # Column number density → [molecules/m²]
* 1.0e-4 # MonoRTM wants cm² → [molecules/cm²]
)
# Cloud amout in mm contained in column
CLW = (qliq # Specific CLW [kg/kg]
* ρave # Density of CLW [kg/m³]
* dz # Column CLW [kg/m²], corresponds to [mm]
)
if CLW == 0: CLW = None
# Broadening gas amount must be given as column density (see __doc__) ↓cm²
broadening = mixing_ratio_Ar * dz * (pave*100) / Tave / boltzmann * 1.0e-4
# Give species 1 (H2O), 2 (CO2), 7 (O2) and 22 (N2)
row1 = [H2O, mixing_ratio_CO2, 0., 0., 0., 0., mixing_ratio_O2]
row2 = [ 0., 0., 0., 0., 0., 0., 0., 0.]
row3 = [ 0., 0., 0., 0., 0., 0., mixing_ratio_N2, None]
# Select Record matching IFORM parameter
Record211 = Record211_IFORM0 if IFORM == 0 else Record211_IFORM1
return [Record211(PAVE=pave, TAVE=Tave, ALTZB=zs[0]/1000, PZB=ps[0],
TZB=Ts[0], ALTZT=zs[1]/1000, PZT=ps[1], TZT=Ts[1],
CLW=CLW), # z in km
Record212_first(WKL=row1, WBROADL=broadening),
Record212_other(WKL=row2),
Record212_other(WKL=row3)
]
def from_mwrt_profile(z, p, T, lnq):
"""Output records for building MONORTM_PROF.IN from z, p, T, lnq.
Uses the partioning scheme from mwrt.
"""
from mwrt.fap import partition_lnq
qvap, qliq = partition_lnq(p, T, lnq)
zs = [(float(zb), float(zt)) for zb, zt in zip(z[:-1], z[1:])]
ps = [(float(pb), float(pt)) for pb, pt in zip(p[:-1], p[1:])]
Ts = [(float(Tb), float(Tt)) for Tb, Tt in zip(T[:-1], T[1:])]
qvaps = [0.5*(qb + qt) for qb, qt in zip(qvap[:-1], qvap[1:])]
qliqs = [0.5*(qb + qt) for qb, qt in zip(qliq[:-1], qliq[1:])]
out = []
H1 = z[0] / 1000.
H2 = z[-1] / 1000.
out.append(Record21(IFORM=1, NLAYRS=len(zs), NMOL=22, SECNTO=1.,
H1=H1, H2=H2, ANGLE=0., LEN=0))
for z, p, T, qvap, qliq in zip(zs, ps, Ts, qvaps, qliqs):
out.extend(layer(z, p, T, qvap, qliq, IFORM=1))
return out
| python |
import numpy as np
import argparse
import cv2
from cnn.neural_network import CNN
from keras.utils import np_utils
from keras.optimizers import SGD
from sklearn.datasets import fetch_mldata
from sklearn.model_selection import train_test_split
# Parse the Arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--save_model", type=int, default=-1)
ap.add_argument("-l", "--load_model", type=int, default=-1)
ap.add_argument("-w", "--save_weights", type=str)
args = vars(ap.parse_args())
# Read/Download MNIST Dataset
print('Loading MNIST Dataset...')
dataset = fetch_mldata('MNIST Original')
# Read the MNIST data as array of 784 pixels and convert to 28x28 image matrix
mnist_data = dataset.data.reshape((dataset.data.shape[0], 28, 28))
mnist_data = mnist_data[:, np.newaxis, :, :]
# Divide data into testing and training sets.
train_img, test_img, train_labels, test_labels = train_test_split(mnist_data/255.0, dataset.target.astype("int"), test_size=0.1)
# Now each image rows and columns are of 28x28 matrix type.
img_rows, img_columns = 28, 28
# Transform training and testing data to 10 classes in range [0,classes] ; num. of classes = 0 to 9 = 10 classes
total_classes = 10 # 0 to 9 labels
train_labels = np_utils.to_categorical(train_labels, 10)
test_labels = np_utils.to_categorical(test_labels, 10)
# Defing and compile the SGD optimizer and CNN model
print('\n Compiling model...')
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
clf = CNN.build(width=28, height=28, depth=1, total_classes=10, Saved_Weights_Path=args["save_weights"] if args["load_model"] > 0 else None)
clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
# Initially train and test the model; If weight saved already, load the weights using arguments.
b_size = 128 # Batch size
num_epoch = 20 # Number of epochs
verb = 1 # Verbose
# If weights saved and argument load_model; Load the pre-trained model.
if args["load_model"] < 0:
print('\nTraining the Model...')
clf.fit(train_img, train_labels, batch_size=b_size, epochs=num_epoch,verbose=verb)
# Evaluate accuracy and loss function of test data
print('Evaluating Accuracy and Loss Function...')
loss, accuracy = clf.evaluate(test_img, test_labels, batch_size=128, verbose=1)
print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))
# Save the pre-trained model.
if args["save_model"] > 0:
print('Saving weights to file...')
clf.save_weights(args["save_weights"], overwrite=True)
# Show the images using OpenCV and making random selections.
for num in np.random.choice(np.arange(0, len(test_labels)), size=(5,)):
# Predict the label of digit using CNN.
probs = clf.predict(test_img[np.newaxis, num])
prediction = probs.argmax(axis=1)
# Resize the Image to 100x100 from 28x28 for better view.
image = (test_img[num][0] * 255).astype("uint8")
image = cv2.merge([image] * 3)
image = cv2.resize(image, (100, 100), interpolation=cv2.INTER_LINEAR)
cv2.putText(image, str(prediction[0]), (5, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# Show and print the Actual Image and Predicted Label Value
print('Predicted Label: {}, Actual Value: {}'.format(prediction[0],np.argmax(test_labels[num])))
cv2.imshow('Digits', image)
cv2.waitKey(0)
#---------------------- EOC ---------------------
| python |
# -*- coding: utf-8 -*-
"""
Integrate with Google using openid
:copyright: (c) 2014 by Pradip Caulagi.
:license: MIT, see LICENSE for more details.
"""
import logging
from flask import Flask, render_template, request, g, session, flash, \
redirect, url_for, abort
from flask import Blueprint
from flask_oauth import OAuth
from app.project import config
from app.users.models import User
from app.bets.models import Bet
oauth = OAuth()
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=config.FACEBOOK_APP_ID,
consumer_secret=config.FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'}
)
# setup logger
logger = logging.getLogger('shakuni-users')
# set up blueprint
users_blueprint = Blueprint('users_blueprint', __name__)
def get_or_create_user(data):
"""Store this user"""
try:
u = User.objects.get(email = data.get('email'))
u.access_token = session['oauth_token'][0]
return u.save()
except User.DoesNotExist:
return User.objects.create(
facebook_id = data.get('id'),
name = data.get('name'),
first_name = data.get('first_name'),
last_name = data.get('first_name'),
email = data.get('email'),
gender = data.get('gender'),
provider = "facebook",
access_token = session['oauth_token'][0],
)
def init(application):
@application.before_request
def before_request():
g.user = None
if 'oauth_token' in session:
g.user = User.objects(access_token = session['oauth_token'][0]).first()
@users_blueprint.route('/login')
def login():
return render_template("users/login.html")
@users_blueprint.route('/fb-login')
def fb_login():
return facebook.authorize(callback=url_for('users_blueprint.facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@users_blueprint.route('/fb-login/authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
print me.data
g.user = get_or_create_user(me.data)
return redirect(url_for("users_blueprint.me"))
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
@users_blueprint.route('/logout')
def logout():
session.pop('oauth_token', None)
flash(u'You have been signed out')
return redirect(url_for("users_blueprint.login"))
@users_blueprint.route('/me')
def me():
if g.user is None:
abort(401)
bets = Bet.objects(user = g.user)
return render_template('users/me.html', user=g.user, bets=bets)
| python |
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""PyPI Package definition for greentea-host (htrun)."""
import os
from io import open
from distutils.core import setup
from setuptools import find_packages
DESCRIPTION = (
"greentea-host (htrun) is a command line tool "
"that enables automated testing on embedded platforms."
)
OWNER_NAMES = "Mbed team"
OWNER_EMAILS = "[email protected]"
repository_dir = os.path.dirname(__file__)
def read(fname):
"""Read the string content of a file.
Args:
name: the name of the file to read relative to this file's directory.
Returns:
String content of the opened file.
"""
with open(os.path.join(repository_dir, fname), mode="r") as f:
return f.read()
with open(os.path.join(repository_dir, "requirements.txt")) as fh:
requirements = fh.readlines()
with open(os.path.join(repository_dir, "test_requirements.txt")) as fh:
test_requirements = fh.readlines()
python_requires = ">=3.5.*,<4"
setup(
name="greentea-host",
description=DESCRIPTION,
long_description=read("README.md"),
long_description_content_type="text/markdown",
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url="https://github.com/ARMmbed/greentea",
packages=find_packages("src"),
package_dir={"": "src"},
license="Apache-2.0",
test_suite="test",
entry_points={
"console_scripts": ["htrun=htrun.htrun:main"],
},
classifiers=(
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python",
"Topic :: Software Development :: Build Tools",
"Topic :: Software Development :: Embedded Systems",
"Topic :: Software Development :: Testing",
),
include_package_data=True,
use_scm_version=True,
python_requires=python_requires,
install_requires=requirements,
tests_require=test_requirements,
extras_require={"pyocd": ["pyocd>=0.32.0"]},
)
| python |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""create_gt_txt_from_mat.py"""
import os
import argparse
import tqdm
import numpy as np
from scipy.io import loadmat
from cython_bbox import bbox_overlaps
_MAP = {
'0': '0--Parade',
'1': '1--Handshaking',
'2': '2--Demonstration',
'3': '3--Riot',
'4': '4--Dancing',
'5': '5--Car_Accident',
'6': '6--Funeral',
'7': '7--Cheering',
'8': '8--Election_Campain',
'9': '9--Press_Conference',
'10': '10--People_Marching',
'11': '11--Meeting',
'12': '12--Group',
'13': '13--Interview',
'14': '14--Traffic',
'15': '15--Stock_Market',
'16': '16--Award_Ceremony',
'17': '17--Ceremony',
'18': '18--Concerts',
'19': '19--Couple',
'20': '20--Family_Group',
'21': '21--Festival',
'22': '22--Picnic',
'23': '23--Shoppers',
'24': '24--Soldier_Firing',
'25': '25--Soldier_Patrol',
'26': '26--Soldier_Drilling',
'27': '27--Spa',
'28': '28--Sports_Fan',
'29': '29--Students_Schoolkids',
'30': '30--Surgeons',
'31': '31--Waiter_Waitress',
'32': '32--Worker_Laborer',
'33': '33--Running',
'34': '34--Baseball',
'35': '35--Basketball',
'36': '36--Football',
'37': '37--Soccer',
'38': '38--Tennis',
'39': '39--Ice_Skating',
'40': '40--Gymnastics',
'41': '41--Swimming',
'42': '42--Car_Racing',
'43': '43--Row_Boat',
'44': '44--Aerobics',
'45': '45--Balloonist',
'46': '46--Jockey',
'47': '47--Matador_Bullfighter',
'48': '48--Parachutist_Paratrooper',
'49': '49--Greeting',
'50': '50--Celebration_Or_Party',
'51': '51--Dresses',
'52': '52--Photographers',
'53': '53--Raid',
'54': '54--Rescue',
'55': '55--Sports_Coach_Trainer',
'56': '56--Voter',
'57': '57--Angler',
'58': '58--Hockey',
'59': '59--people--driving--car',
'61': '61--Street_Battle'
}
def get_gt_boxes(gt_dir):
""" gt dir: (wider_face_val.mat, wider_easy_val.mat, wider_medium_val.mat, wider_hard_val.mat)"""
gt_mat = loadmat(os.path.join(gt_dir, 'wider_face_val.mat'))
hard_mat = loadmat(os.path.join(gt_dir, 'wider_hard_val.mat'))
medium_mat = loadmat(os.path.join(gt_dir, 'wider_medium_val.mat'))
easy_mat = loadmat(os.path.join(gt_dir, 'wider_easy_val.mat'))
facebox_list = gt_mat['face_bbx_list']
event_list = gt_mat['event_list']
file_list = gt_mat['file_list']
hard_gt_list = hard_mat['gt_list']
medium_gt_list = medium_mat['gt_list']
easy_gt_list = easy_mat['gt_list']
return facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list
def norm_score(pred):
""" norm score
pred {key: [[x1,y1,x2,y2,s]]}
"""
max_score = 0
min_score = 1
for _, k in pred.items():
for _, v in k.items():
if v:
_min = np.min(v[:, -1])
_max = np.max(v[:, -1])
max_score = max(_max, max_score)
min_score = min(_min, min_score)
else:
continue
diff = max_score - min_score
for _, k in pred.items():
for _, v in k.items():
if v:
v[:, -1] = (v[:, -1] - min_score) / diff
else:
continue
def image_eval(pred, gt, ignore, iou_thresh):
""" single image evaluation
pred: Nx5
gt: Nx4
ignore:
"""
_pred = pred.copy()
_gt = gt.copy()
pred_recall = np.zeros(_pred.shape[0])
recall_list = np.zeros(_gt.shape[0])
proposal_list = np.ones(_pred.shape[0])
_pred[:, 2] = _pred[:, 2] + _pred[:, 0]
_pred[:, 3] = _pred[:, 3] + _pred[:, 1]
_gt[:, 2] = _gt[:, 2] + _gt[:, 0]
_gt[:, 3] = _gt[:, 3] + _gt[:, 1]
overlaps = bbox_overlaps(_pred[:, :4], _gt)
for h in range(_pred.shape[0]):
gt_overlap = overlaps[h]
max_overlap, max_idx = gt_overlap.max(), gt_overlap.argmax()
if max_overlap >= iou_thresh:
if ignore[max_idx] == 0:
recall_list[max_idx] = -1
proposal_list[h] = -1
elif recall_list[max_idx] == 0:
recall_list[max_idx] = 1
r_keep_index = np.where(recall_list == 1)[0]
pred_recall[h] = len(r_keep_index)
return pred_recall, proposal_list
def img_pr_info(thresh_num, pred_info, proposal_list, pred_recall):
"""
img_pr_info
"""
pr_info = np.zeros((thresh_num, 2)).astype('float')
for t in range(thresh_num):
thresh = 1 - (t + 1) / thresh_num
r_index = np.where(pred_info[:, 4] >= thresh)[0]
if r_index:
r_index = r_index[-1]
p_index = np.where(proposal_list[:r_index + 1] == 1)[0]
pr_info[t, 0] = len(p_index)
pr_info[t, 1] = pred_recall[r_index]
else:
pr_info[t, 0] = 0
pr_info[t, 1] = 0
return pr_info
def dataset_pr_info(thresh_num, pr_curve, count_face):
_pr_curve = np.zeros((thresh_num, 2))
for i in range(thresh_num):
_pr_curve[i, 0] = pr_curve[i, 1] / pr_curve[i, 0]
_pr_curve[i, 1] = pr_curve[i, 1] / count_face
return _pr_curve
def voc_ap(rec, prec):
"""
voc_ap
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def evaluation(pred, gt_path, iou_thresh=0.5):
"""
evaluation
"""
facebox_list, event_list, file_list, hard_gt_list, medium_gt_list, easy_gt_list = get_gt_boxes(gt_path)
event_num = len(event_list)
settings = ['easy', 'medium', 'hard']
setting_gts = [easy_gt_list, medium_gt_list, hard_gt_list]
for setting_id in range(3):
# different setting
gt_list = setting_gts[setting_id]
# [hard, medium, easy]
pbar = tqdm.tqdm(range(event_num))
outputTxtDir = './bbx_gt_txt/'
if not os.path.exists(outputTxtDir):
os.makedirs(outputTxtDir)
outputTxtFile = outputTxtDir + settings[setting_id] + '.txt'
if os.path.exists(outputTxtFile):
os.remove(outputTxtFile)
for i in pbar:
pbar.set_description('Processing {}'.format(settings[setting_id]))
img_list = file_list[i][0]
sub_gt_list = gt_list[i][0]
gt_bbx_list = facebox_list[i][0]
for j in range(len(img_list)):
gt_boxes = gt_bbx_list[j][0]
keep_index = sub_gt_list[j][0]
imgName = img_list[j][0][0]
imgPath = _MAP[imgName.split('_')[0]] + '/' + imgName + '.jpg'
faceNum = len(keep_index)
with open(outputTxtFile, 'a') as txtFile:
txtFile.write(imgPath + '\n')
txtFile.write(str(faceNum) + '\n')
if faceNum == 0:
txtFile.write(str(faceNum) + '\n')
for index in keep_index:
curI = index[0] - 1
bbox = gt_boxes[curI]
txtFile.write('%d %d %d %d\n' % (bbox[0], bbox[1], bbox[2], bbox[3]))
txtFile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pred')
parser.add_argument('-g', '--gt', default='./eval_tools/ground_truth/')
args = parser.parse_args()
evaluation(args.pred, args.gt)
| python |
import protocol
import helpers
import hashes as h
import bloom_filter as bf
import garbled_bloom_filter as gbf
import PySimpleGUI as sg
sg.change_look_and_feel('DarkBlue2')
perform_protocol = sg.ReadButton('Start Simulation', font=('Segoe UI', 12), key='-RUN-')
stepTracker = 0
Protocol = None
disableChecks = False
layout = [
[sg.Text('Efficient Multi-Party PSI', size=(50,1), justification='left', font=('Segoe UI', 30))],
[sg.Text('By Malia Kency and John Owens', font=('Segoe UI', 13))],
[sg.Text('These parameters are meant for illustration and fast execution, they are not considered secure or optimal', font=('Segoe UI', 12, 'italic'))],
[
sg.Frame('', [
[
sg.Checkbox('Let me break stuff', font=('Segoe UI', 10), key='-DISABLECHECKS-', enable_events=True)
],
[
sg.Text('Number of players: ', font=('Segoe UI', 10)),
sg.Input('3', key='-NUMPLAYERS-', font=('Segoe UI', 10), disabled=True),
sg.Text(' Player input size:', font=('Segoe UI', 10)),
sg.Input('20', key='-INPUTSIZE-', font=('Segoe UI', 10), disabled=True)],
[
sg.Text('Weight of chosen 1s: ', font=('Segoe UI', 10)),
sg.Input('0.27', key='-A-', font=('Segoe UI', 10), disabled=True),
sg.Text('Cut-and-Choose Prob:', font=('Segoe UI', 10)),
sg.Input('0.3', key='-C-', font=('Segoe UI', 10), disabled=True)],
[
sg.Text('Number of max ones: ', font=('Segoe UI', 10)),
sg.Input('80', key='-NMAXONES-', font=('Segoe UI', 10), disabled=True)
],
]),
],
[
sg.Text('Constant protocol parameters that will be used:', font=('Segoe UI', 12), size=(72,1),),
sg.Text('Parameters that will be calculated:', font=('Segoe UI', 12)),
],
[ sg.Listbox(
values = [
'NumPlayers = Total number of players, P\N{LATIN SUBSCRIPT SMALL LETTER I}',
'PlayerInputSize = Size of the players input sets',
'SecParam (kappa) = 40 = Security Parameter',
'bitLength = 128 = length of random generated strings',
'Nmaxones = Max number of ones a player is allowed after cut-and-choose',
'p = 0.3 = Percentage of total messages to be used for cut-and-choose',
'a = 0.27 = Sampling weight of 1s vs. 0s for every P\N{LATIN SUBSCRIPT SMALL LETTER I}'],
size=(85,8), font=('Consolas', 10)),
sg.Listbox(
values = [
'Not = Total number of Random Oblivious Transfer',
'Nbf = Size of the player\'s bloom_filter. Calculated on initalization',
'k = Number of hash functions to use. Calculated on initalization',
'm\N{LATIN SUBSCRIPT SMALL LETTER h} = The number of 1s a player chooses',
'gamma = Verifies the correct relationship between p, k, m\N{LATIN SUBSCRIPT SMALL LETTER h}',
'gammaStar = Verifies the correct relationship between p, k, Not'],
size=(85,8), font=('Consolas', 10))
],
[sg.Multiline(key='-OUTPUT-', size=(200, 20), font=('Consolas', 10), autoscroll=True, text_color='white')],
[sg.Button('Reset', font=('Segoe UI', 12)), perform_protocol, sg.Button('Exit', font=('Segoe UI', 12))],
]
window = sg.Window('Private Set Intersection', layout, location=(100,40), resizable=True)
while True:
# Read the event that happened and the values dictionary
event, values = window.read()
# print(event, values)
if event in (None, 'Exit'):
break
if event == 'Reset':
window['-OUTPUT-'].Update('')
perform_protocol.Update("Start Simulation")
stepTracker = 0
if event == '-DISABLECHECKS-':
if values['-DISABLECHECKS-']:
window['-NUMPLAYERS-'].update(disabled=False)
window['-INPUTSIZE-'].update(disabled=False)
window['-A-'].update(disabled=False)
window['-C-'].update(disabled=False)
window['-NMAXONES-'].update(disabled=False)
disableChecks = True
else:
window['-NUMPLAYERS-'].update(disabled=True)
window['-INPUTSIZE-'].update(disabled=True)
window['-A-'].update(disabled=True)
window['-C-'].update(disabled=True)
window['-NMAXONES-'].update(disabled=True)
disableChecks = False
if event == '-RUN-':
NumPlayers = 3
PlayerInputSize = 30 # 10
SecParam = 40
bitLength = 128
Nmaxones = 80 # 40
p = 0.3
a = 0.27
if disableChecks:
PlayerInputSize = int(values['-INPUTSIZE-'])
NumPlayers = int(values['-NUMPLAYERS-'])
Nmaxones = int(values['-NMAXONES-'])
p = float(values['-C-'])
a = float(values['-A-'])
wOut = window['-OUTPUT-']
if stepTracker == 0:
window['-OUTPUT-'].update('')
stepTracker += 1
if stepTracker == 1:
# Initialize the protocol by calculating parameters,
# creating the players, and generating random inputs
# Note: at least 1 shared value is guaranteed
# PlayerInputSize = int(values['-INPUTSIZE-'])
Protocol = protocol.new(NumPlayers, Nmaxones, PlayerInputSize, SecParam, bitLength, p, a, disableChecks)
wOut.print("\nStarting protocol...")
wOut.print("k = {}".format(Protocol.params.k))
wOut.print("Not = {}".format(Protocol.params.Not))
wOut.print("gamma = {}".format(Protocol.params.gamma))
wOut.print("gammaStar = {} \n".format(Protocol.params.gammaStar))
wOut.print("\nSimulating players joining protocol. Total: {}".format(Protocol.params.NumPlayers), background_color='#284050', text_color='white')
wOut.print("At least one intersection will occur at the value: {}".format(Protocol.params.shared_random), background_color="red", text_color="white")
wOut.print("\nStep " + str(stepTracker-1) +" finished\n", background_color='#284050', text_color='white')
perform_protocol.Update("Step {}: Perform Random Oblivious Transfers".format(stepTracker))
if stepTracker == 2:
# Perform the random oblivious transfer simulation for P0...Pt
wOut.print("\nPerforming Random Oblivious Transfer simulation. {} transfers in total:".format(Protocol.params.Not))
Protocol.perform_RandomOT()
output = Protocol.print_PlayerROTTable()
wOut.print(output)
wOut.print("\nCounting each player's \"1s\":")
output = Protocol.print_PlayerMessageStats()
wOut.print(output + "\n\nStep " + str(stepTracker-1) +" finished\n")
perform_protocol.Update("Step {}: Perform Cut-and-Choose".format(stepTracker))
elif stepTracker == 3:
# Perform cut-and-choose simulation for P0...Pt
wOut.print("\nPerforming Cut and Choose simulation. Size of c: {}. Size of j: {}".format(Protocol.params.C, Protocol.params.Not - Protocol.params.C), background_color='#284050', text_color='white')
wOut.print("\nStep " + str(stepTracker-1) +" finished\n", background_color='#284050', text_color='white')
Protocol.perform_CutandChoose()
perform_protocol.Update("Step {}: Create Bloom Filters".format(stepTracker))
elif stepTracker == 4:
# Create bloom filters using j messages for P1...Pt
wOut.print("\nCreating Bloom Filters. BF length: {}".format(Protocol.params.Nbf))
output = Protocol.create_BloomFilters()
wOut.print(output)
wOut.print("\nStep " + str(stepTracker-1) +" finished\n")
perform_protocol.Update("Step {}: Create Injective functions".format(stepTracker))
elif stepTracker == 5:
# Create P1...Pt's injective functions
wOut.print("\nCreating injective functions for every Pi:", background_color='#284050', text_color='white')
output = Protocol.create_InjectiveFunctions()
wOut.print(output, background_color='#284050', text_color='white')
wOut.print("\nStep " + str(stepTracker-1) +" finished\n", background_color='#284050', text_color='white')
perform_protocol.Update("Step {}: Perform XOR sums and RGBF".format(stepTracker))
elif stepTracker == 6:
# Instantiate P0's and P1's rGBF objects
wOut.print("\nCreating randomized GBF for every Pi")
Protocol.create_RandomizedGBFs()
# P0 performs XOR summation on its own j_messages[injective_func] where bit=1
# P1 performs XOR summation on all P1...Pt's j_messages[injective_func] where bit = P1...Pt's choice
output = Protocol.perform_XORsummation()
wOut(output)
# P0 calculates summary values for all elements of its input set
# P1 calculates summary values for all elements of its input set (Every P1...Pt input values)
Protocol.perform_SummaryValues()
wOut.print("\nStep " + str(stepTracker-1) +" finished\n")
perform_protocol.Update("Step {}: Finish protocol".format(stepTracker))
elif stepTracker == 7:
# P1 receives P0s summary values, compares them to its own
# Intersections are recorded and output
output, intersections = Protocol.perform_Output()
wOut.print(output, background_color='#284050', text_color='white')
wOut.print(intersections, background_color="red", text_color="white")
wOut.print("\nStep " + str(stepTracker-1) +" finished\n", background_color='#284050', text_color='white')
perform_protocol.Update("Restart Simulation")
stepTracker = 0
window.close() | python |
def reverses(array, a, b):
while a < b:
array[a], array[b] = array[b], array[a]
a += 1
b -= 1
def rotate(nums, k):
n = len(nums)
k = k % n
reverses(nums, 0, n-k-1)
reverses(nums, n-k, n-1)
reverses(nums, 0, n-1)
return nums
if __name__ == '__main__':
nums = [i for i in range(1, 8)]
k = 3
print(rotate(nums, k)) | python |
""" XVM (c) www.modxvm.com 2013-2017 """
# PUBLIC
def getAvgStat(key):
return _data.get(key, {})
# PRIVATE
_data = {}
| python |
import logging
logger = logging.getLogger(__name__)
import click, sys
from threatspec import app
def validate_logging(ctx, param, value):
levels = {
"none": 100,
"crit": logging.CRITICAL,
"error": logging.ERROR,
"warn": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
if value.lower() in levels:
return levels[value.lower()]
raise click.BadParameter("Log level must be one of: {}".format(", ".join(levels.keys())))
def configure_logger(level, verbose):
if verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s', level=level)
else:
logging.basicConfig(format='%(message)s', level=level)
@click.group()
@click.option("--log-level", "-l", callback=validate_logging, default="info", help="Set the log level. Must be one of: crit, error, warn, info, debug, none.")
@click.option("--verbose/--no-verbose", default=False, help="Makes logging more verbose.")
@click.version_option()
def cli(log_level, verbose):
"""
threatspec - threat modeling as code
threatspec is an open source project that aims to close the gap between
development and security by bringing the threat modelling process further
into the development process. This is achieved by having developers and
security engineers write threat specifications alongside code, then
dynamically generating reports and data-flow diagrams from the code. This
allows engineers to capture the security context of the code they write,
as they write it.
Usage:
# Initialise threatspec in the current directory
$ threatspec init
# Configure the source code paths
$ $EDITOR threatspec.yaml
# Run threatspec against the source code paths
$ threatspec run
# Generate the threat mode report
$ threatspec report
For more information for each subcommand use --help. For everything else,
visit the website at https://threatspec.org
"""
configure_logger(log_level, verbose)
@cli.command()
def init():
"""
Initialise threatspec in the current directory.
This will create a project configuration file called threatspec.yaml. Edit
this file to configure the project name and description as well the source
code paths for threatspec to scan.
This command will also create the threatmodel directory in the current
path. This directory contains the json output files from threatspec run.
The following file contains the collection of mitigations, acceptances,
connections etc identified as annotations in code:
threatmodel/threatmodel.json
The following three threat model library files are loaded each time threatspec
is run. If new threats, controls or components are found, they are added to these
files.
This allows threats, controls and components to be used across projects
and allows you to create threat library files, for example from OWASP or CWE
data. When threatspec loads paths configured in threatspec.yaml, it checks
each path to see if a threatspec.yaml file exists. If so, it attempts to load the
below files.
threatmodel/threats.json threatmodel/controls.json threatmodel/components.json
"""
threatspec = app.ThreatSpecApp()
threatspec.init()
@cli.command()
def run():
"""
Run threatspec against source code files.
This command loads the configuration file and for each configured path it first
checks to see if a threatspec.yaml file exists in the path. If it does, it loads
the three library json files.
Once all the library files have been loaded from the paths, threatspec run will
recursively parse each file in the path, looking for threatspec annotations.
You can exclude patterns from being searched (for example 'node_modules') using the
'ignore' key for the paths in the configuration file. See the documentation for
more information.
After all the source files have parsed, threatspec run will generate the
threatmodel/threatmodel.json file as well as the three library files:
threatmodel/threats.json threatmodel/controls.json threatmodel/components.json
"""
threatspec = app.ThreatSpecApp()
threatspec.run()
@cli.command()
def report():
"""
Generate the threatspec threat model report.
This will use Graphviz to generate a visualisation of the threat model, and
embed it in a threat model markdown document in the current directory:
ThreatModel.md
This document contains tables of mitigations etc (including any tests), as
well as connections and reviews.
"""
threatspec = app.ThreatSpecApp()
threatspec.report()
if __name__ == '__main__':
cli(None, None)
| python |
import torch
import numpy as np
import re
from collections import Counter
import string
import pickle
import random
from torch.autograd import Variable
import copy
import ujson as json
import traceback
import bisect
from torch.utils.data import Dataset, DataLoader
IGNORE_INDEX = -100
NUM_OF_PARAGRAPHS = 10
MAX_PARAGRAPH_LEN = 400
RE_D = re.compile('\d')
def has_digit(string):
return RE_D.search(string)
def prepro(token):
return token if not has_digit(token) else 'N'
def pad_data(data, sizes, dtype=np.int64, out=None):
res = np.zeros(sizes, dtype=dtype) if out is None else out
if len(sizes) == 1:
res[:min(len(data), sizes[0])] = data[:sizes[0]]
elif len(sizes) == 2:
for i, x in enumerate(data):
if i >= sizes[0]: break
res[i, :min(len(x), sizes[1])] = data[i][:sizes[1]]
elif len(sizes) == 3:
for i, x in enumerate(data):
if i >= sizes[0]: break
for j, y in enumerate(x):
if j >= sizes[1]: break
res[i, j, :min(len(y), sizes[2])] = data[i][j][:sizes[2]]
return res#torch.from_numpy(res)
class HotpotDataset(Dataset):
def __init__(self, buckets):
self.buckets = buckets
self.cumlens = []
for i, b in enumerate(self.buckets):
last = 0 if i == 0 else self.cumlens[-1]
self.cumlens.append(last + len(b))
def __len__(self):
return self.cumlens[-1]
def __getitem__(self, i):
bucket_id = bisect.bisect_right(self.cumlens, i)
offset = 0 if bucket_id == 0 else self.cumlens[bucket_id-1]
return self.buckets[bucket_id][i - offset]
class DataIterator(DataLoader):
def __init__(self, dataset, para_limit, ques_limit, char_limit, sent_limit, **kwargs):
if kwargs.get('collate_fn', None) is None:
kwargs['collate_fn'] = self._collate_fn
if para_limit is not None and ques_limit is not None:
self.para_limit = para_limit
self.ques_limit = ques_limit
else:
para_limit, ques_limit = 0, 0
for bucket in buckets:
for dp in bucket:
para_limit = max(para_limit, dp['context_idxs'].size(0))
ques_limit = max(ques_limit, dp['ques_idxs'].size(0))
self.para_limit, self.ques_limit = para_limit, ques_limit
self.char_limit = char_limit
self.sent_limit = sent_limit
super().__init__(dataset, **kwargs)
def _collate_fn(self, batch_data):
# Change: changing the dimensions of context_idxs
batch_size = len(batch_data)
max_sent_cnt = max(len([y for x in batch_data[i]['start_end_facts'] for y in x]) for i in range(len(batch_data)))
context_idxs = np.zeros((batch_size, NUM_OF_PARAGRAPHS, MAX_PARAGRAPH_LEN), dtype=np.int64)
ques_idxs = np.zeros((batch_size, self.ques_limit), dtype=np.int64)
context_char_idxs = np.zeros((batch_size, NUM_OF_PARAGRAPHS, MAX_PARAGRAPH_LEN, self.char_limit), dtype=np.int64)
ques_char_idxs = np.zeros((batch_size, self.ques_limit, self.char_limit), dtype=np.int64)
y1 = np.zeros(batch_size, dtype=np.int64)
y2 = np.zeros(batch_size, dtype=np.int64)
q_type = np.zeros(batch_size, dtype=np.int64)
start_mapping = np.zeros((batch_size, max_sent_cnt, NUM_OF_PARAGRAPHS * MAX_PARAGRAPH_LEN), dtype=np.float32)
end_mapping = np.zeros((batch_size, max_sent_cnt, NUM_OF_PARAGRAPHS * MAX_PARAGRAPH_LEN), dtype=np.float32)
all_mapping = np.zeros((batch_size, max_sent_cnt, NUM_OF_PARAGRAPHS * MAX_PARAGRAPH_LEN), dtype=np.float32)
is_support = np.full((batch_size, max_sent_cnt), IGNORE_INDEX, dtype=np.int64)
ids = [x['id'] for x in batch_data]
max_sent_cnt = 0
for i in range(len(batch_data)):
pad_data(batch_data[i]['context_idxs'], (NUM_OF_PARAGRAPHS, MAX_PARAGRAPH_LEN), out=context_idxs[i])
pad_data(batch_data[i]['ques_idxs'], (self.ques_limit,), out=ques_idxs[i])
pad_data(batch_data[i]['context_char_idxs'], (NUM_OF_PARAGRAPHS, MAX_PARAGRAPH_LEN, self.char_limit), out=context_char_idxs[i])
pad_data(batch_data[i]['ques_char_idxs'], (self.ques_limit, self.char_limit), out=ques_char_idxs[i])
if batch_data[i]['y1'] >= 0:
y1[i] = batch_data[i]['y1']
y2[i] = batch_data[i]['y2']
q_type[i] = 0
elif batch_data[i]['y1'] == -1:
y1[i] = IGNORE_INDEX
y2[i] = IGNORE_INDEX
q_type[i] = 1
elif batch_data[i]['y1'] == -2:
y1[i] = IGNORE_INDEX
y2[i] = IGNORE_INDEX
q_type[i] = 2
elif batch_data[i]['y1'] == -3:
y1[i] = IGNORE_INDEX
y2[i] = IGNORE_INDEX
q_type[i] = 3
else:
assert False
for j, (para_id, cur_sp_dp) in enumerate((para_id, s) for para_id, para in enumerate(batch_data[i]['start_end_facts']) for s in para):
if j >= self.sent_limit: break
if len(cur_sp_dp) == 3:
start, end, is_sp_flag = tuple(cur_sp_dp)
else:
start, end, is_sp_flag, is_gold = tuple(cur_sp_dp)
start += para_id * MAX_PARAGRAPH_LEN
end += para_id * MAX_PARAGRAPH_LEN
if start < end:
start_mapping[i, j, start] = 1
end_mapping[i, j, end-1] = 1
all_mapping[i, j, start:end] = 1
is_support[i, j] = int(is_sp_flag)
input_lengths = (context_idxs > 0).astype(np.int64).sum(2)
max_q_len = int((ques_idxs > 0).astype(np.int64).sum(1).max())
context_idxs = torch.from_numpy(context_idxs)
ques_idxs = torch.from_numpy(ques_idxs[:, :max_q_len])
context_char_idxs = torch.from_numpy(context_char_idxs)
ques_char_idxs = torch.from_numpy(ques_char_idxs[:, :max_q_len])
input_lengths = torch.from_numpy(input_lengths)
y1 = torch.from_numpy(y1)
y2 = torch.from_numpy(y2)
q_type = torch.from_numpy(q_type)
is_support = torch.from_numpy(is_support)
start_mapping = torch.from_numpy(start_mapping)
end_mapping = torch.from_numpy(end_mapping)
all_mapping = torch.from_numpy(all_mapping)
return {'context_idxs': context_idxs,
'ques_idxs': ques_idxs,
'context_char_idxs': context_char_idxs,
'ques_char_idxs': ques_char_idxs,
'context_lens': input_lengths,
'y1': y1,
'y2': y2,
'ids': ids,
'q_type': q_type,
'is_support': is_support,
'start_mapping': start_mapping,
'end_mapping': end_mapping,
'all_mapping': all_mapping}
def get_buckets(record_file):
# datapoints = pickle.load(open(record_file, 'rb'))
datapoints = torch.load(record_file)
return [datapoints]
def convert_tokens(eval_file, qa_id, pp1, pp2, p_type):
answer_dict = {}
for qid, p1, p2, type in zip(qa_id, pp1, pp2, p_type):
if type == 0:
context = eval_file[str(qid)]["context"]
spans = eval_file[str(qid)]["spans"]
start_idx = spans[p1][0]
end_idx = spans[p2][1]
answer_dict[str(qid)] = context[start_idx: end_idx]
elif type == 1:
answer_dict[str(qid)] = 'yes'
elif type == 2:
answer_dict[str(qid)] = 'no'
elif type == 3:
answer_dict[str(qid)] = 'noanswer'
else:
assert False
return answer_dict
def evaluate(eval_file, answer_dict):
f1 = exact_match = total = 0
for key, value in answer_dict.items():
total += 1
ground_truths = eval_file[key]["answer"]
prediction = value
assert len(ground_truths) == 1
cur_EM = exact_match_score(prediction, ground_truths[0])
cur_f1, _, _ = f1_score(prediction, ground_truths[0])
exact_match += cur_EM
f1 += cur_f1
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
# def evaluate(eval_file, answer_dict, full_stats=False):
# if full_stats:
# with open('qaid2type.json', 'r') as f:
# qaid2type = json.load(f)
# f1_b = exact_match_b = total_b = 0
# f1_4 = exact_match_4 = total_4 = 0
# qaid2perf = {}
# f1 = exact_match = total = 0
# for key, value in answer_dict.items():
# total += 1
# ground_truths = eval_file[key]["answer"]
# prediction = value
# cur_EM = metric_max_over_ground_truths(
# exact_match_score, prediction, ground_truths)
# # cur_f1 = metric_max_over_ground_truths(f1_score,
# # prediction, ground_truths)
# assert len(ground_truths) == 1
# cur_f1, cur_prec, cur_recall = f1_score(prediction, ground_truths[0])
# exact_match += cur_EM
# f1 += cur_f1
# if full_stats and key in qaid2type:
# if qaid2type[key] == '4':
# f1_4 += cur_f1
# exact_match_4 += cur_EM
# total_4 += 1
# elif qaid2type[key] == 'b':
# f1_b += cur_f1
# exact_match_b += cur_EM
# total_b += 1
# else:
# assert False
# if full_stats:
# qaid2perf[key] = {'em': cur_EM, 'f1': cur_f1, 'pred': prediction,
# 'prec': cur_prec, 'recall': cur_recall}
# exact_match = 100.0 * exact_match / total
# f1 = 100.0 * f1 / total
# ret = {'exact_match': exact_match, 'f1': f1}
# if full_stats:
# if total_b > 0:
# exact_match_b = 100.0 * exact_match_b / total_b
# exact_match_4 = 100.0 * exact_match_4 / total_4
# f1_b = 100.0 * f1_b / total_b
# f1_4 = 100.0 * f1_4 / total_4
# ret.update({'exact_match_b': exact_match_b, 'f1_b': f1_b,
# 'exact_match_4': exact_match_4, 'f1_4': f1_4,
# 'total_b': total_b, 'total_4': total_4, 'total': total})
# ret['qaid2perf'] = qaid2perf
# return ret
def normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
normalized_prediction = normalize_answer(prediction)
normalized_ground_truth = normalize_answer(ground_truth)
ZERO_METRIC = (0, 0, 0)
if normalized_prediction in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return ZERO_METRIC
if normalized_ground_truth in ['yes', 'no', 'noanswer'] and normalized_prediction != normalized_ground_truth:
return ZERO_METRIC
prediction_tokens = normalized_prediction.split()
ground_truth_tokens = normalized_ground_truth.split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return ZERO_METRIC
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
| python |
import pytest
@pytest.mark.e2e
def test_arp_packet_e2e(api, utils, b2b_raw_config):
"""
Configure a raw TCP flow with,
- sender_hardware_addr increase from 00:0c:29:e3:53:ea with count 5
- target_hardware_addr decrement from 00:0C:29:E3:54:EA with count 5
- 100 frames of 1518B size each
- 10% line rate
Validate,
- tx/rx frame count and bytes are as expected
- all captured frames have expected sender_hardware_addr and target_hardware_addr
"""
api.set_config(api.config())
flow1 = b2b_raw_config.flows[0]
size = 1518
packets = 100
sender_hardware_addr = "00:0C:29:E3:53:EA"
target_hardware_addr = "00:0C:30:E3:54:EA"
sender_protocol_addr = "10.1.1.2"
target_protocol_addr = "20.1.1.5"
mac_step = "00:00:00:00:01:00"
ip_step = "0.0.0.1"
count = 5
flow1.packet.ethernet().arp()
flow_arp = flow1.packet[-1]
flow_arp.sender_hardware_addr.increment.start = sender_hardware_addr
flow_arp.sender_hardware_addr.increment.step = mac_step
flow_arp.sender_hardware_addr.increment.count = count
flow_arp.sender_protocol_addr.increment.start = sender_protocol_addr
flow_arp.sender_protocol_addr.increment.step = ip_step
flow_arp.sender_protocol_addr.increment.count = count
flow_arp.target_hardware_addr.decrement.start = target_hardware_addr
flow_arp.target_hardware_addr.decrement.step = mac_step
flow_arp.target_hardware_addr.decrement.count = count
flow_arp.target_protocol_addr.decrement.start = target_protocol_addr
flow_arp.target_protocol_addr.decrement.step = ip_step
flow_arp.target_protocol_addr.decrement.count = count
flow1.duration.fixed_packets.packets = packets
flow1.size.fixed = size
flow1.rate.percentage = 10
flow1.metrics.enable = True
utils.start_traffic(api, b2b_raw_config)
utils.wait_for(
lambda: results_ok(api, utils, size, packets),
"stats to be as expected",
timeout_seconds=30,
)
captures_ok(api, b2b_raw_config, size, utils)
def results_ok(api, utils, size, packets):
"""
Returns true if stats are as expected, false otherwise.
"""
port_results, flow_results = utils.get_all_stats(api)
frames_ok = utils.total_frames_ok(port_results, flow_results, packets)
bytes_ok = utils.total_bytes_ok(port_results, flow_results, packets * size)
return frames_ok and bytes_ok
def captures_ok(api, cfg, size, utils):
"""
Returns normally if patterns in captured packets are as expected.
"""
sender_hardware_addr = [
[0x00, 0x0C, 0x29, 0xE3, 0x53, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x54, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x55, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x56, 0xEA],
[0x00, 0x0C, 0x29, 0xE3, 0x57, 0xEA],
]
target_hardware_addr = [
[0x00, 0x0C, 0x30, 0xE3, 0x54, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x53, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x52, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x51, 0xEA],
[0x00, 0x0C, 0x30, 0xE3, 0x50, 0xEA],
]
sender_protocol_addr = [
[0x0a, 0x01, 0x01, 0x02],
[0x0a, 0x01, 0x01, 0x03],
[0x0a, 0x01, 0x01, 0x04],
[0x0a, 0x01, 0x01, 0x05],
[0x0a, 0x01, 0x01, 0x06],
]
target_protocol_addr = [
[0x14, 0x01, 0x01, 0x05],
[0x14, 0x01, 0x01, 0x04],
[0x14, 0x01, 0x01, 0x03],
[0x14, 0x01, 0x01, 0x02],
[0x14, 0x01, 0x01, 0x01],
]
cap_dict = utils.get_all_captures(api, cfg)
assert len(cap_dict) == 1
for k in cap_dict:
i = 0
for b in cap_dict[k]:
assert b[22:28] == sender_hardware_addr[i]
assert b[28:32] == sender_protocol_addr[i]
assert b[32:38] == target_hardware_addr[i]
assert b[38:42] == target_protocol_addr[i]
i = (i + 1) % 5
assert len(b) == size
if __name__ == "__main__":
pytest.main(["-s", __file__]) | python |
import os
import pytest
import json
import regal
_samples_simple = [
("and.v", "and.jed"),
("nand.v", "nand.jed"),
("not.v", "not.jed"),
("or.v", "or.jed"),
("xor.v", "xor.jed"),
("v1.v", "v1.jed"),
("v0.v", "v0.jed"),
("fb.v", "fb.jed"),
]
_samples_registered = [
("clk.v", "clk.jed"),
("clk_mixed.v", "clk_mixed.jed"),
]
_samples_complex = [
("and.v", "andc.jed"),
("nand.v", "nandc.jed"),
("not.v", "notc.jed"),
("or.v", "orc.jed"),
("xor.v", "xorc.jed"),
("v1.v", "v1c.jed"),
("v0.v", "v0c.jed"),
]
@pytest.mark.parametrize("rtl,jedec", _samples_simple)
def test_synth_simple(tmpdir, rtl, jedec):
netlist = tmpdir.join("netlist.json")
regal.synth(str(netlist), os.path.join("tests", "samples", rtl))
out = tmpdir.join("out.jed")
cfg = os.path.join("tests", "samples", "device.yaml")
regal.pnr(str(netlist), cfg, str(out))
with open(os.path.join("tests", "samples", jedec), "r") as f:
assert f.read() == out.read()
@pytest.mark.parametrize("rtl,jedec", _samples_registered)
def test_synth_registered(tmpdir, rtl, jedec):
netlist = tmpdir.join("netlist.json")
regal.synth(str(netlist), os.path.join("tests", "samples", rtl))
out = tmpdir.join("out.jed")
cfg = os.path.join("tests", "samples", "device_reg.yaml")
regal.pnr(str(netlist), cfg, str(out))
with open(os.path.join("tests", "samples", jedec), "r") as f:
assert f.read() == out.read()
@pytest.mark.parametrize("rtl,jedec", _samples_complex)
def test_synth_complex(tmpdir, rtl, jedec):
netlist = tmpdir.join("netlist.json")
regal.synth(str(netlist), os.path.join("tests", "samples", rtl))
out = tmpdir.join("out.jed")
cfg = os.path.join("tests", "samples", "device_complex.yaml")
regal.pnr(str(netlist), cfg, str(out))
with open(os.path.join("tests", "samples", jedec), "r") as f:
assert f.read() == out.read()
| python |
from setuptools import setup
from setuptools import find_namespace_packages
with open(file="README.md", mode="r") as fh:
long_description = fh.read()
setup(
name='fin-news',
author='Alex Reed',
author_email='[email protected]',
version='0.1.1',
description='A finance news aggregator used to collect articles on different market topics.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/areed1192/finance-news-aggregator',
install_requires=[
'requests==2.24.0',
'fake_useragent==0.1.11'
],
packages=find_namespace_packages(
include=['finnews', 'finnews.*']
),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Financial and Insurance Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
],
python_requires='>3.7'
)
| python |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 17:33:22 2017
@author: Martin
"""
import collections
new_otus = collections.defaultdict(list)
with open('unique_renamed_otus.txt') as data:
for d in data:
d = d.strip("\n") # remove newline char
line = d.split("\t") # split line at tab char
for acc in line[1:]: # go through accession names
size = acc
size = size.split("=") # example: size=42; we split at = and ; to get size
size = size[1].split(";")
size = size[0] # get the actual value in the list
accession_names = []
for i in range(1, int(size)+1): # count from 1 to size
n = ("0000", str(i)) # acc names are 7-digit. we add four 0s for margin
n = ''.join(n) # and its iteration no. so it remains unique
k = (acc, str(n))
k = (''.join(k))
accession_names.append(k) # join and put into a list
new_otus[line[0]].append(accession_names) # add into a dict, that looks like
# denovoX [[accY00001, accY00002], [accZ00001, accZ00002]...]
fw = open("output.txt", "w") # create output file
for k,v in new_otus.items(): # iterate through dict
v = (list(a for b in v for a in b)) # since we have a list of lists [[] [] []...],
v = '\t'.join(v) # we flatten and join so it can be written as a string
fw.write(k + '\t' + v)
fw.write("\n")
fw.close()
| python |
"""Role testing files using testinfra"""
import pytest
@pytest.mark.parametrize("config", [
(
"NTP=0.debian.pool.ntp.org "
"1.debian.pool.ntp.org "
"2.debian.pool.ntp.org "
"3.debian.pool.ntp.org"
),
(
"FallbackNTP=0.de.pool.ntp.org "
"1.de.pool.ntp.org "
"2.de.pool.ntp.org "
"4.de.pool.ntp.org"
)
])
def test_systemd_timesyncd_config(host, config):
"""Check systemd-timesyncd config file"""
f = host.file("/etc/systemd/timesyncd.conf")
assert config in f.content_string
def test_systemd_timesyncd_service(host):
"""Check systemd-timesyncd service"""
s = host.service("systemd-timesyncd")
assert s.is_running
assert s.is_enabled
| python |
'''
Given a string s, find the longest palindromic subsequence's length in s. You may assume that the maximum length of s is 1000.
Example 1:
Input:
"bbbab"
Output:
4
One possible longest palindromic subsequence is "bbbb".
Example 2:
Input:
"cbbd"
Output:
2
'''
'''
This is a standard problem of Dynamic Programming
1. If the two ends of a string are the same, then they must be included in the longest palindrome subsequence. Otherwise, both ends cannot be included in the longest palindrome subsequence.
2. Therefore,we will use the relation:
dp[i][j]: the longest palindromic subsequence's length of substring(i, j), here i, j represent left, right indexes in the string
Initialization: dp[i][i] = 1
Use relation that:
if s[i] == s[j]:
dp[i:j] = 2 + dp[i+1][j-1]
else:
dp[i:j] = max(dp[i][j-1],dp[i+1][j])
'''
class Solution:
'''
Time Complexity O(n**(2))
Space Complexity O(n**(2))
'''
def longestPalindromeSubseq(s):
dp = [[0]*len(s) for _ in range(len(s))]
#initialization
for i in range(len(s)):
dp[i][i] = 1
#subsequence from i to i+1
for i in range(len(s)-1):
dp[i][i+1] = 2 if s[i] == s[i+1] else 1
diff = 2
n = len(s)
while diff < n:
i = 0
j = i + diff
while j < n and i < n-1:
if s[i] == s[j]:
dp[i][j] = max(dp[i+1][j],dp[i][j-1],dp[i+1][j-1] + 2)
else:
dp[i][j] = max(dp[i+1][j],dp[i][j-1])
i += 1
j = i + diff
diff += 1
max_out = 1
#choosing the maximum length of subsequence
for i in range(n):
max_out = max(dp[i][-1],max_out)
return max_out
# Driver Code
if __name__ == "__main__":
s = "bbbab"
result = Solution.longestPalindromeSubseq(s)
print("length of longest Substring = ", result) | python |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : long_anisotropically_dense.py
# Author : Stephane Grabli
# Date : 04/08/2005
# Purpose : Selects the lines that are long and have a high anisotropic
# a priori density and uses causal density
# to draw without cluttering. Ideally, half of the
# selected lines are culled using the causal density.
#
# ********************* WARNING *************************************
# ******** The Directional a priori density maps must ******
# ******** have been computed prior to using this style module ******
from freestyle.chainingiterators import ChainSilhouetteIterator
from freestyle.functions import DensityF1D
from freestyle.predicates import (
NotUP1D,
QuantitativeInvisibilityUP1D,
UnaryPredicate1D,
pyHighDensityAnisotropyUP1D,
pyHigherLengthUP1D,
pyLengthBP1D,
)
from freestyle.shaders import (
ConstantColorShader,
ConstantThicknessShader,
SamplingShader,
)
from freestyle.types import IntegrationType, Operators
## custom density predicate
class pyDensityUP1D(UnaryPredicate1D):
def __init__(self, wsize, threshold, integration=IntegrationType.MEAN, sampling=2.0):
UnaryPredicate1D.__init__(self)
self._wsize = wsize
self._threshold = threshold
self._integration = integration
self._func = DensityF1D(self._wsize, self._integration, sampling)
self._func2 = DensityF1D(self._wsize, IntegrationType.MAX, sampling)
def __call__(self, inter):
c = self._func(inter)
m = self._func2(inter)
if c < self._threshold:
return 1
if m > 4*c:
if c < 1.5*self._threshold:
return 1
return 0
Operators.select(QuantitativeInvisibilityUP1D(0))
Operators.bidirectional_chain(ChainSilhouetteIterator(),NotUP1D(QuantitativeInvisibilityUP1D(0)))
Operators.select(pyHigherLengthUP1D(40))
## selects lines having a high anisotropic a priori density
Operators.select(pyHighDensityAnisotropyUP1D(0.3,4))
Operators.sort(pyLengthBP1D())
shaders_list = [
SamplingShader(2.0),
ConstantThicknessShader(2),
ConstantColorShader(0.2,0.2,0.25,1),
]
## uniform culling
Operators.create(pyDensityUP1D(3.0,2.0e-2, IntegrationType.MEAN, 0.1), shaders_list)
| python |
#
# Copyright (C) 2012-2020 Euclid Science Ground Segment
#
# This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation; either version 3.0 of the License, or (at your option)
# any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
Overview
--------
general info about this module
Summary
---------
.. autosummary::
grid_search_stratified_kfold_cv
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "Andrea Tramacere"
# Standard library
# eg copy
# absolute import rg:from copy import deepcopy
# Dependencies
# eg numpy
# absolute import eg: import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
# Project
# relative import eg: from .mod import f
def grid_search_stratified_kfold_cv(model,training_dataset,par_grid_dict=None):
kfold = StratifiedKFold(n_splits=10, random_state=1).split(training_dataset.fetures, training_dataset.target_array)
if par_grid_dict is None:
param_grid=model.par_grid_dict
g_search = GridSearchCV(model.clf, param_grid=param_grid, cv=kfold)
g_search.fit(training_dataset.features, training_dataset.target_array)
print("best parameters are %s with a CV score of %0.2f" % (g_search.best_params_, g_search.best_score_))
return g_search.best_params_, g_search.best_score_,g_search.best_estimator_ | python |
from django.conf.urls import patterns, url
urlpatterns = patterns('scheduler.views',
url(r'^list/$', 'job_list', (), 'job_list'),
)
| python |
from django.apps import AppConfig
class PhonebooksApiConfig(AppConfig):
name = 'phonebooks_api'
| python |
from os import listdir
from os.path import isfile, isdir, join
from typing import List
from bs4 import BeautifulSoup
from .model import Imagenet, Imagenet_Object
from ...generator import Generator
from ...helper import grouper
## Configure paths
out_dir = '/data/streamable4'
in_dir = '/data/ILSVRC'
in_dir_kaggle = '/data'
max_bucket_size = 25
generator = Generator(out_dir)
folder_img = join(in_dir, 'Data/CLS-LOC')
def _read_label_file_as_key_values(file):
with open(file, 'r') as f:
lines = [l.split(' ', 1) for l in f.readlines()]
for line in lines:
generator.add_key_value(line[0], line[1].strip())
def _read_xml(file):
def get_value(node, name):
return ''.join(child for child in node.find_all(name)[0].children)
with open(file, 'r') as f:
data = f.read()
root = BeautifulSoup(data, "xml")
net = Imagenet()
net.folder = get_value(root,'folder')
net.filename = get_value(root,'filename')
net.size_width = int(get_value(root,'width'))
net.size_height = int(get_value(root,'height'))
for object in root.find_all('object'):
net.objects.append(Imagenet_Object())
net.objects[-1].name = get_value(object, 'name')
net.objects[-1].bndbox_xmin = int(get_value(object, 'xmin'))
net.objects[-1].bndbox_ymin = int(get_value(object, 'ymin'))
net.objects[-1].bndbox_xmax = int(get_value(object, 'xmax'))
net.objects[-1].bndbox_ymax = int(get_value(object, 'ymax'))
return net
def _get_path_and_files(group: List[Imagenet], clean_foldername):
return (clean_foldername(group[0].folder), [f.filename for f in group])
def _read_metadata_as_bucket(metadata, image_root_folder, clean_foldername):
for group in grouper(metadata, max_bucket_size):
group = [g for g in group if g is not None]
sub_folder, files = _get_path_and_files(group, clean_foldername)
print(f'Bucket: {sub_folder} {generator.get_bucket_count()}')
image_folder = join(image_root_folder, sub_folder)
generator.append_bucket(image_folder, files, '.JPEG', group)
def _read_xml_dir_as_buckets(folder, image_root_folder, clean_foldername = lambda x: x):
all = [_read_xml(join(folder, f)) for f in listdir(folder) if isfile(join(folder, f))]
return _read_metadata_as_bucket(all, image_root_folder, clean_foldername)
def _read_jpeg_dir_as_buckets(image_root_folder, sub_folder, clean_foldername = lambda x: x):
folder = join(image_root_folder, sub_folder)
all = [f for f in listdir(folder) if isfile(join(folder, f))]
net = [Imagenet(sub_folder, f.removesuffix('.JPEG')) for f in all]
return _read_metadata_as_bucket(net, image_root_folder, clean_foldername)
## Read kaggle csv and txt files
label_file = join(in_dir_kaggle, 'LOC_synset_mapping.txt')
_read_label_file_as_key_values(label_file)
## Read imagenet xml & jpgs
# Test
train_folder = join(in_dir, 'Annotations/CLS-LOC/train') # ./n02606052/n02606052_188.xml
train_folder_img = join(in_dir, 'Data/CLS-LOC/train') # ./n02606052/n02606052_188.JPEG
clean_train_foldername = lambda f: f if f.startswith('n') else 'n' + f
for idx, f in enumerate(listdir(train_folder)):
if isdir(join(train_folder, f)):
generator.start_item('train/' + f)
_read_xml_dir_as_buckets(join(train_folder, f), train_folder_img, clean_train_foldername)
# Var
generator.start_item('val')
val_folder = join(in_dir, 'Annotations/CLS-LOC/val') # ./ILSVRC2012_val_00024102.xml
_read_xml_dir_as_buckets(val_folder, folder_img)
# Test
generator.start_item('test')
_read_jpeg_dir_as_buckets(folder_img, 'test')
generator.save_list()
# Imagenet().parse(ser) | python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014 Mikael Sandström <[email protected]>
# Copyright: (c) 2021, Ari Stark <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: oracle_user
short_description: Manages Oracle user/schema.
description:
- This module manages Oracle user/schema.
- It can create, alter or drop users.
- It can empty schemas (droping all its content).
- It can change password of users ; lock/unlock and expire/unexpire accounts.
- It can't be used to give privileges (refer to oracle_grant).
version_added: "0.8.0"
author:
- Mikael Sandström (@oravirt)
- Ari Stark (@ari-stark)
options:
authentication_type:
description:
- Type of authentication for the user.
- If not specified for a new user and no I(schema_password) is specified, there won't be authentication.
- If not specified and I(schema_password) is specified, value will be forced to I(password).
required: false
type: str
choices: ['external', 'global', 'no_authentication', 'password']
default_tablespace:
description:
- Default tablespace for the user.
- Tablespace must exist.
- If not specified for a new user, Oracle default will be used.
required: false
type: str
expired:
description:
- Expire or unexpire account.
- If not specified for a new user, Oracle default will be used.
required: false
type: bool
hostname:
description:
- Specify the host name or IP address of the database server computer.
default: localhost
type: str
locked:
description:
- Lock or unlock account.
- If not specified for a new user, Oracle default will be used.
required: false
type: bool
mode:
description:
- This option is the database administration privileges.
default: normal
type: str
choices: ['normal', 'sysdba']
oracle_home:
description:
- Define the directory into which all Oracle software is installed.
- Define ORACLE_HOME environment variable if set.
type: str
password:
description:
- Set the password to use to connect the database server.
- Must not be set if using Oracle wallet.
type: str
port:
description:
- Specify the listening port on the database server.
default: 1521
type: int
profile:
description:
- Profile of the user.
- Profile must exist.
- If not specified for a new user, Oracle default will be used.
required: false
type: str
schema_name:
description:
- Name of the user to manage.
required: true
type: str
aliases:
- name
schema_password:
description:
- Password of the user account.
- Required if I(authentication_type) is I(password).
required: false
type: str
service_name:
description:
- Specify the service name of the database you want to access.
required: true
type: str
state:
description:
- Specify the state of the user/schema.
- If I(state=empty), the schema will be purged, but not dropped.
- If I(state=absent), the tablespace will be droped, including all datafiles.
default: present
type: str
choices: ['absent', 'empty', 'present']
temporary_tablespace:
description:
- Default temporary tablespace for the user.
- Tablespace must exist.
- If not specified for a new user, Oracle default will be used.
required: false
type: str
username:
description:
- Set the login to use to connect the database server.
- Must not be set if using Oracle wallet.
type: str
aliases:
- user
requirements:
- Python module cx_Oracle
- Oracle basic tools.
notes:
- Check mode and diff mode are supported.
- Changes made by @ari-stark broke previous module interface.
'''
EXAMPLES = '''
- name: Create a new schema on a remote db by running the module on the controlmachine
oracle_user:
hostname: "remote-db-server"
service_name: "orcl"
username: "system"
password: "manager"
schema_name: "myschema"
schema_password: "mypass"
default_tablespace: "test"
state: "present"
- name: Drop a user on a remote db
oracle_user:
hostname: "remote-db-server"
service_name: "orcl"
username: "system"
password: "manager"
schema_name: "myschema"
state: "absent"
- name: Empty a schema on a remote db
oracle_user:
hostname: "remote-db-server"
service_name: "orcl"
username: "system"
password: "manager"
schema_name: "myschema"
state: "empty"
'''
RETURN = '''
ddls:
description: Ordered list of DDL requests executed during module execution.
returned: always
type: list
elements: str
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ari_stark.ansible_oracle_modules.plugins.module_utils.ora_db import OraDB
def get_existing_user(schema_name):
"""Check if the user/schema exists"""
data = ora_db.execute_select('select username,'
' account_status,'
' default_tablespace,'
' temporary_tablespace,'
' profile,'
' authentication_type,'
' oracle_maintained'
' from dba_users'
' where username = upper(:schema_name)', {'schema_name': schema_name})
if data:
row = data[0]
state = 'present'
expired = 'EXPIRED' in row[1]
locked = 'LOCKED' in row[1]
default_tablespace = row[2]
temporary_tablespace = row[3]
profile = row[4]
authentication_type = {'EXTERNAL': 'external', 'GLOBAL': 'global', 'NONE': None, 'PASSWORD': 'password'}[row[5]]
oracle_maintained = row[6] == 'Y'
diff['before']['state'] = state
diff['before']['expired'] = expired
diff['before']['locked'] = locked
diff['before']['default_tablespace'] = default_tablespace
diff['before']['temporary_tablespace'] = temporary_tablespace
diff['before']['profile'] = profile
diff['before']['authentication_type'] = authentication_type
if authentication_type == 'password':
diff['before']['schema_password'] = '**'
return {'username': schema_name, 'state': state, 'expired': expired, 'locked': locked,
'default_tablespace': default_tablespace, 'temporary_tablespace': temporary_tablespace,
'profile': profile, 'authentication_type': authentication_type, 'oracle_maintained': oracle_maintained}
else:
diff['before']['state'] = 'absent'
return None
def has_password_changed(schema_name, schema_password):
"""Check if password has changed."""
expected_error = 1017 # invalid username/password; logon denied
return ora_db.try_connect(schema_name, schema_password) == expected_error
def empty_schema(schema_name):
"""
Empty a schema by droping existing objects.
Return true if changed were made.
Emptying of the schema is a two steps action: table must be drop last, because materialized view also create tables
which are dropped during the drop of the materialized view.
"""
has_changed = False
rows = ora_db.execute_select(
"select object_name, object_type"
" from all_objects"
" where object_type in ('DATABASE LINK', 'FUNCTION', 'MATERIALIZED VIEW', 'PACKAGE', 'PROCEDURE',"
" 'SEQUENCE', 'SYNONYM', 'TABLE PARTITION', 'TRIGGER', 'TYPE', 'VIEW')"
" and owner = '%s' and generated = 'N'" % schema_name.upper())
for row in rows:
object_name = row[0]
object_type = row[1]
ora_db.execute_ddl('drop %s %s."%s"' % (object_type, schema_name, object_name))
has_changed = True
# Drop tables after drop materialized views (mviews are two objects in oracle: one mview and one table).
rows = ora_db.execute_select(
"select object_name, object_type"
" from all_objects"
" where object_type = 'TABLE'"
" and owner = '%s' and generated = 'N'" % schema_name.upper())
for row in rows:
object_name = row[0]
object_type = row[1]
ora_db.execute_ddl('drop %s %s."%s" cascade constraints' % (object_type, schema_name, object_name))
has_changed = True
return has_changed
def ensure_present(schema_name, authentication_type, schema_password, default_tablespace, temporary_tablespace,
profile, locked, expired, empty):
"""Create or modify the user"""
prev_user = get_existing_user(schema_name)
if prev_user:
changed = False
emptied = False
# Values are not changed by default, so after should be same as before
diff['after']['authentication_type'] = diff['before']['authentication_type']
diff['after']['default_tablespace'] = diff['before']['default_tablespace']
diff['after']['expired'] = diff['before']['expired']
diff['after']['locked'] = diff['before']['locked']
diff['after']['profile'] = diff['before']['profile']
diff['after']['temporary_tablespace'] = diff['before']['temporary_tablespace']
sql = 'alter user %s ' % schema_name
if authentication_type and authentication_type != prev_user['authentication_type']:
if authentication_type == 'external':
sql += 'identified externally '
elif authentication_type == 'global':
sql += 'identified globally '
elif authentication_type == 'password':
sql += 'identified by "%s" ' % schema_password
diff['after']['schema_password'] = '*'
else:
sql += 'no authentication '
diff['after']['authentication_type'] = authentication_type
changed = True
if default_tablespace and default_tablespace.lower() != prev_user['default_tablespace'].lower():
sql += 'default tablespace %s quota unlimited on %s ' % (default_tablespace, default_tablespace)
diff['after']['default_tablespace'] = default_tablespace
changed = True
if temporary_tablespace and temporary_tablespace.lower() != prev_user['temporary_tablespace'].lower():
sql += 'temporary tablespace %s ' % temporary_tablespace
diff['after']['temporary_tablespace'] = temporary_tablespace
changed = True
if profile and profile.lower() != prev_user['profile'].lower():
sql += 'profile %s ' % profile
diff['after']['profile'] = profile
changed = True
if locked is not None and locked != prev_user['locked']:
sql += 'account %s ' % ('lock' if locked else 'unlock')
diff['after']['locked'] = locked
changed = True
if expired is True and expired != prev_user['expired']:
sql += 'password expire '
diff['after']['expired'] = expired
changed = True
# If a password is defined and authentication type hasn't changed, we have to check :
# - if account must be unexpire
# - if password has changed
if schema_password and authentication_type == prev_user['authentication_type']:
# Unexpire account by defining a password
if expired is False and expired != prev_user['expired']:
sql += 'identified by "%s" ' % schema_password
diff['after']['expired'] = expired
diff['after']['password'] = '*'
changed = True
elif has_password_changed(schema_name, schema_password):
sql += 'identified by "%s" ' % schema_password
diff['after']['password'] = '*'
changed = True
if empty:
emptied = empty_schema(schema_name)
if changed or emptied:
if changed:
ora_db.execute_ddl(sql)
module.exit_json(msg='User %s changed and/or schema emptied.' % schema_name, changed=True, diff=diff,
ddls=ora_db.ddls)
else:
module.exit_json(msg='User %s already exists.' % schema_name, changed=False, diff=diff, ddls=ora_db.ddls)
else:
sql = 'create user %s ' % schema_name
if authentication_type == 'external':
sql += 'identified externally '
elif authentication_type == 'global':
sql += 'identified globally '
elif authentication_type == 'password':
sql += 'identified by "%s" ' % schema_password
else:
sql += 'no authentication '
if default_tablespace:
sql += 'default tablespace %s quota unlimited on %s ' % (default_tablespace, default_tablespace)
if temporary_tablespace:
sql += 'temporary tablespace %s ' % temporary_tablespace
if profile:
sql += 'profile %s ' % profile
if locked:
sql += 'account lock '
if expired:
sql += 'password expire '
ora_db.execute_ddl(sql)
module.exit_json(msg='User %s has been created.' % schema_name, changed=True, diff=diff, ddls=ora_db.ddls)
def ensure_absent(schema_name):
"""Drop the user if it exists"""
prev_user = get_existing_user(schema_name)
if prev_user and prev_user['oracle_maintained']:
module.fail_json(msg='Cannot drop a system user.', changed=False)
elif prev_user:
ora_db.execute_ddl('drop user %s cascade' % schema_name)
module.exit_json(msg='User %s dropped.' % schema_name, changed=True, diff=diff, ddls=ora_db.ddls)
else:
module.exit_json(msg="User %s doesn't exist." % schema_name, changed=False, diff=diff, ddls=ora_db.ddls)
def main():
global module
global ora_db
global diff
module = AnsibleModule(
argument_spec=dict(
authentication_type=dict(type='str', required=False,
choices=['external', 'global', 'no_authentication', 'password']),
default_tablespace=dict(type='str', default=None),
expired=dict(type='bool', default=None),
hostname=dict(type='str', default='localhost'),
locked=dict(type='bool', default=None),
mode=dict(type='str', default='normal', choices=['normal', 'sysdba']),
oracle_home=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
port=dict(type='int', default=1521),
profile=dict(type='str', default=None),
schema_name=dict(type='str', required=True, aliases=['name']),
schema_password=dict(type='str', default=None, no_log=True),
service_name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'empty', 'present']),
temporary_tablespace=dict(type='str', default=None),
username=dict(type='str', required=False, aliases=['user']),
),
required_together=[['username', 'password']],
supports_check_mode=True,
)
authentication_type = module.params['authentication_type']
default_tablespace = module.params['default_tablespace']
expired = module.params['expired']
locked = module.params['locked']
profile = module.params['profile']
schema_name = module.params['schema_name']
schema_password = module.params['schema_password']
state = module.params['state']
temporary_tablespace = module.params['temporary_tablespace']
# Transforming parameters
if schema_password:
authentication_type = 'password'
ora_db = OraDB(module)
diff = {'before': {'schema_name': schema_name},
'after': {'state': state,
'schema_name': schema_name, }}
if state in ['empty', 'present']:
ensure_present(schema_name, authentication_type, schema_password, default_tablespace, temporary_tablespace,
profile, locked, expired, state == 'empty')
elif state == 'absent':
ensure_absent(schema_name)
if __name__ == '__main__':
main()
| python |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Neural network operations commonly shared by the architectures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
class NormActivation(tf.keras.layers.Layer):
"""Combined Normalization and Activation layers."""
def __init__(self,
momentum=0.997,
epsilon=1e-4,
trainable=True,
init_zero=False,
use_activation=True,
activation='relu',
fused=True,
name=None):
"""A class to construct layers for a batch normalization followed by a ReLU.
Args:
momentum: momentum for the moving average.
epsilon: small float added to variance to avoid dividing by zero.
trainable: `bool`, if True also add variables to the graph collection
GraphKeys.TRAINABLE_VARIABLES. If False, freeze batch normalization
layer.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0. If False, initialize it with 1.
fused: `bool` fused option in batch normalziation.
use_actiation: `bool`, whether to add the optional activation layer after
the batch normalization layer.
activation: 'string', the type of the activation layer. Currently support
`relu` and `swish`.
name: `str` name for the operation.
"""
super(NormActivation, self).__init__(trainable=trainable)
if init_zero:
gamma_initializer = tf.keras.initializers.Zeros()
else:
gamma_initializer = tf.keras.initializers.Ones()
self._normalization_op = tf.keras.layers.BatchNormalization(
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
trainable=trainable,
fused=fused,
gamma_initializer=gamma_initializer,
name=name)
self._use_activation = use_activation
if activation == 'relu':
self._activation_op = tf.nn.relu
elif activation == 'swish':
self._activation_op = tf.nn.swish
else:
raise ValueError('Unsupported activation `{}`.'.format(activation))
def __call__(self, inputs, is_training=None):
"""Builds the normalization layer followed by an optional activation layer.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `boolean`, if True if model is in training mode.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
# We will need to keep training=None by default, so that it can be inherit
# from keras.Model.training
if is_training and self.trainable:
is_training = True
inputs = self._normalization_op(inputs, training=is_training)
if self._use_activation:
inputs = self._activation_op(inputs)
return inputs
def norm_activation_builder(momentum=0.997,
epsilon=1e-4,
trainable=True,
activation='relu',
**kwargs):
return functools.partial(
NormActivation,
momentum=momentum,
epsilon=epsilon,
trainable=trainable,
activation=activation,
**kwargs)
| python |
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n = int(input())
xy = [list(map(int, input().split())) for _ in range(n)]
ans = 0
for xy1, xy2 in zip(xy, xy[1:]):
ans += abs(xy1[0] - xy2[0])
ans += abs(xy1[1] - xy2[1])
print(ans)
if __name__ == "__main__":
main()
| python |
# --------------------------
# UFSC - CTC - INE - INE5603
# Exercício calculos
# --------------------------
# Classe responsável por determinar se um número é primo.
from view.paineis.painel_abstrato import PainelAbstrato
from model.calculos import primo
class PainelPrimo(PainelAbstrato):
def __init__(self):
super().__init__('Número Primo')
def interaja(self):
n = self._leia1int()
if primo(n):
msg = 'O número {} é primo.'.format(n)
else:
msg = 'O número {} não é primo.'.format(n)
print(msg)
| python |
import os
from dotenv import find_dotenv
from dotenv import load_dotenv
load_dotenv(find_dotenv())
BASE_URL = os.getenv("BASE_URL")
CURRENCY = os.getenv("CURRENCY")
API_URL = BASE_URL + CURRENCY
OUTPUT_FILE = os.getenv("OUTPUT_FILE")
REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT"))
CANCEL_ON_FAILURE = os.getenv("CANCEL_ON_FAILURE") == "true"
CRON_INTERVAL_MINUTES = int(os.getenv("CRON_INTERVAL_MINUTES"))
DEBUG = os.getenv("DEBUG") == "true"
| python |
from ctypes import PyDLL, py_object, c_int
from os import path
from sys import exit
import numpy as np
my_path = path.abspath(path.dirname(__file__))
path = path.join(my_path, "./bin/libmotion_detector_optimization.so")
try:
lib = PyDLL(path)
lib.c_scan.restype = py_object
lib.c_scan.argtypes = [py_object, c_int]
lib.c_find_bounding_boxes.restype = py_object
lib.c_find_bounding_boxes.argtypes = [py_object]
lib.c_pack.restype = py_object
lib.c_pack.argtypes = [py_object, py_object]
except OSError:
print("Error when loading lib")
exit(1)
def scan(img: np.ndarray, expansion_step: int):
return lib.c_scan(img, expansion_step)
def optimize_bounding_boxes(rectangles):
if rectangles is None or not len(rectangles):
return []
return lib.c_find_bounding_boxes(rectangles)
def pack(rects: list, bins: list):
return lib.c_pack(rects, bins) | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from subprocess import Popen
processes = []
for counter in range(10):
chrome_cmd = 'export BROWSER=chrome && python test_search.py'
firefox_cmd = 'export BROWSER=firefox && python test_search.py'
processes.append(Popen(chrome_cmd, shell=True))
processes.append(Popen(firefox_cmd, shell=True))
for counter in range(10):
processes[counter].wait()
# Execution time: about 9 minutes | python |
import re
"""
# Line based token containers
As denoted by `^` in the regex
"""
BLANK = re.compile(r"^$")
#TODO this will fail to match correctly if a line is `<div><p>foo bar</p></div>`
HTML_LINE = re.compile(
r"""
\s{0,3}
(?P<content>\<[^\>]+\>) #Match <ANYTHING> that is wrapped with greater/less than symbols
""", re.VERBOSE)
CODE_LINE = re.compile(r"(^\ {4})|(^\t)")
START_WS = re.compile(r"^(\s+)")
QUOTED = re.compile(r"^(\>) (?P<content>.*)")
ORDERED_ITEM = re.compile(r"^\d+\. (?P<content>.*)") # (Numeric)(period)
UNORDERED_ITEM = re.compile(r"^\* (?P<content>.*)")
LINE_HEADER = re.compile(r"""^(?P<depth>\#+)\ (?P<content>.*)""")
"""
Body tokens
"""
ANCHOR_simple = re.compile(r"""\[
(?P<content>[^\]]+)
\]
\(
(?P<href>[^\)]+)
\)""", re.VERBOSE)
ANCHOR_title = re.compile(r"""\[
(?P<content>[^\]]+)
\]
\(
(?P<href>[^\)]+)
\"(?P<title>[^\"]+)\"
\)""", re.VERBOSE)
IMAGE_simple = re.compile(r"""\!\[(?P<content>[^\]]+)\]\((?P<href>[^\)]+)\)""")
IMAGE_title = re.compile(r"""\!\[(?P<content>[^\]]+)\]\((?P<href>[^\)]+) \"(?P<title>[^\"]+)\"\)""")
STRONG_underscore = re.compile(r"""(\_{2}(?P<content>[^_]+)\_{2})""")
STRONG_star = re.compile(
r"""(
(?<!\\)
\*{2}
(?P<content>[^_]+)
(?<!\\)
\*{2}
)""", re.VERBOSE)
EMPHASIS_underscore = re.compile(
r"""(
(?<!\_) #if there is double __ at the start, ignore
\_
(?P<content>[^\_]+)
\_
(?!\_) #if there is double __ at the end, ignore
)""", re.VERBOSE)
EMPHASIS_star = re.compile(
r"""
(?<!\\)
(?<!\*)
\*
(?P<content>[^\*]+)
(?<!\\)
\*
(?!\*)
""", re.VERBOSE)
| python |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""
This example demonstrates how to instantiate the
Adafruit BNO055 Sensor using this library and just
the I2C bus number.
This example will only work on a Raspberry Pi
and does require the i2c-gpio kernel module to be
installed and enabled. Most Raspberry Pis will
already have it installed, however most do not
have it enabled. You will have to manually enable it
"""
import time
from adafruit_extended_bus import ExtendedI2C as I2C
import adafruit_bno055
# To enable i2c-gpio, add the line `dtoverlay=i2c-gpio` to /boot/config.txt
# Then reboot the pi
# Create library object using our Extended Bus I2C port
# Use `ls /dev/i2c*` to find out what i2c devices are connected
i2c = I2C(1) # Device is /dev/i2c-1
sensor = adafruit_bno055.BNO055_I2C(i2c)
last_val = 0xFFFF
def temperature():
global last_val # pylint: disable=global-statement
result = sensor.temperature
if abs(result - last_val) == 128:
result = sensor.temperature
if abs(result - last_val) == 128:
return 0b00111111 & result
last_val = result
return result
while True:
print("Temperature: {} degrees C".format(temperature()))
print("Accelerometer (m/s^2): {}".format(sensor.acceleration))
print("Magnetometer (microteslas): {}".format(sensor.magnetic))
print("Gyroscope (rad/sec): {}".format(sensor.gyro))
print("Euler angle: {}".format(sensor.euler))
print("Quaternion: {}".format(sensor.quaternion))
print("Linear acceleration (m/s^2): {}".format(sensor.linear_acceleration))
print("Gravity (m/s^2): {}".format(sensor.gravity))
print()
time.sleep(1)
| python |
import os
import json
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
DEPENDS_PATH = os.path.join(SCRIPT_DIR, '.depends.json')
FLAGS = [
'-Wall',
'-Wextra',
'-x', 'c++',
'-std=c++17',
'-isystem', '/usr/include/c++/8.2.1',
'-isystem', '/usr/include/c++/8.2.1/x86_64-pc-linux-gnu',
'-isystem', '/usr/include/c++/8.2.1/backward',
'-isystem', '/usr/include/',
]
with open(DEPENDS_PATH) as f:
DEPENDS = json.load(f)
def project_include_dir(project):
return os.path.join(SCRIPT_DIR, 'src', project, 'include')
def get_project(file_path):
src_path = os.path.join(SCRIPT_DIR, 'src')
rel_to_src = os.path.relpath(file_path, src_path)
return rel_to_src.split(os.path.sep)[0]
def Settings(**kwargs):
if kwargs['language'] == 'python':
return {}
file_path = kwargs['filename']
project = get_project(file_path)
depends = DEPENDS[project]
flags = FLAGS[:]
for p in [project] + depends:
flags.extend(['-I', project_include_dir(p)])
return {'flags': flags}
| python |
from .quantizer import *
from .api import *
| python |
# Считаем, сколько раз встречается то или иное число в массиве.
# Зная эти количества, быстро формируем уже упорядоченный массив.
# Для этой сортировки нужно знать минимум и максимум в массиве.
# Тогда генерируются ключи для вспомогательного массива, в котором
# и фиксируем чего и сколько раз встретилось.
def count_sort(a):
"""Сортировка подсчетом"""
A = [0] * 13
for val in a:
A[val] += 1
print(A)
a_sorted = []
for i in range(len(A)):
for j in range(A[i]):
a_sorted.append(i)
return a_sorted
def test_sort_function(func):
print("Тестирование функции ", func.__doc__)
A = [1, 4, 6, 4, 7, 12, 8, 2, 4]
A_sorted = [1, 2, 4, 4, 4, 6, 7, 8, 12]
A = count_sort(A)
print("OK" if A == A_sorted else "False")
if __name__ == "__main__":
test_sort_function(count_sort)
| python |
# Generated by Django 2.1.7 on 2019-03-27 15:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(blank=True, max_length=100, unique=True)),
('image', models.ImageField(blank=True, upload_to='images/')),
('body', models.TextField()),
('is_published', models.BooleanField(default=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='blogs', to=settings.AUTH_USER_MODEL)),
],
),
]
| python |
names= ('ali', 'ahmet')
sayı=int(input("sayı giriniz:"))
if sayı>=10 :
print(names[0])
else :
print(names[1]) | python |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mimetypes
import warnings
from httplib import HTTPResponse
SHOW_DEPRECATION_WARNING = True
SHOW_IN_DEVELOPMENT_WARNING = True
OLD_API_REMOVE_VERSION = '0.6.0'
def read_in_chunks(iterator, chunk_size=None):
"""
Return a generator which yields data in chunks.
@type iterator: C{Iterator}
@param response: An object which implements an iterator interface
or a File like object with read method.
@type chunk_size: C{int}
@param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
"""
if isinstance(iterator, (file, HTTPResponse)):
get_data = iterator.read
args = (chunk_size, )
else:
get_data = iterator.next
args = ()
while True:
chunk = str(get_data(*args))
if len(chunk) == 0:
raise StopIteration
yield chunk
def guess_file_mime_type(file_path):
filename = os.path.basename(file_path)
(mimetype, encoding) = mimetypes.guess_type(filename)
return mimetype, encoding
def deprecated_warning(module):
if SHOW_DEPRECATION_WARNING:
warnings.warn('This path has been deprecated and the module'
' is now available at "libcloud.compute.%s".'
' This path will be fully removed in libcloud %s.' %
(module, OLD_API_REMOVE_VERSION),
category=DeprecationWarning)
def in_development_warning(module):
if SHOW_IN_DEVELOPMENT_WARNING:
warnings.warn('The module %s is in development and your are advised '
'against using it in production.' % (module),
category=FutureWarning)
def str2dicts(data):
"""
Create a list of dictionaries from a whitespace and newline delimited text.
For example, this:
cpu 1100
ram 640
cpu 2200
ram 1024
becomes:
[{'cpu': '1100', 'ram': '640'}, {'cpu': '2200', 'ram': '1024'}]
"""
list_data = []
list_data.append({})
d = list_data[-1]
lines = data.split('\n')
for line in lines:
line = line.strip()
if not line:
d = {}
list_data.append(d)
d = list_data[-1]
continue
whitespace = line.find(' ')
if not whitespace:
continue
key = line[0:whitespace]
value = line[whitespace + 1:]
d.update({key: value})
list_data = [value for value in list_data if value != {}]
return list_data
def str2list(data):
"""
Create a list of values from a whitespace and newline delimited text (keys are ignored).
For example, this:
ip 1.2.3.4
ip 1.2.3.5
ip 1.2.3.6
becomes:
['1.2.3.4', '1.2.3.5', '1.2.3.6']
"""
list_data = []
for line in data.split('\n'):
line = line.strip()
if not line:
continue
try:
splitted = line.split(' ')
# key = splitted[0]
value = splitted[1]
except Exception:
continue
list_data.append(value)
return list_data
def dict2str(data):
"""
Create a string with a whitespace and newline delimited text from a dictionary.
For example, this:
{'cpu': '1100', 'ram': '640', 'smp': 'auto'}
becomes:
cpu 1100
ram 640
smp auto
cpu 2200
ram 1024
"""
result = ''
for k in data:
if data[k] != None:
result += '%s %s\n' % (str(k), str(data[k]))
else:
result += '%s\n' % str(k)
return result
def fixxpath(xpath, namespace):
# ElementTree wants namespaces in its xpaths, so here we add them.
return '/'.join(['{%s}%s' % (namespace, e) for e in xpath.split('/')])
def findtext(element, xpath, namespace):
return element.findtext(fixxpath(xpath=xpath, namespace=namespace))
def findattr(element, xpath, namespace):
return element.findtext(fixxpath(xpath=xpath, namespace=namespace))
def findall(element, xpath, namespace):
return element.findall(fixxpath(xpath=xpath, namespace=namespace))
def get_driver(drivers, provider):
"""
Get a driver.
@param drivers: Dictionary containing valid providers.
@param provider: Id of provider to get driver
@type provider: L{libcloud.types.Provider}
"""
if provider in drivers:
mod_name, driver_name = drivers[provider]
_mod = __import__(mod_name, globals(), locals(), [driver_name])
return getattr(_mod, driver_name)
raise AttributeError('Provider %s does not exist' % (provider))
| python |
nums = [int(i) for i in input().split()]
prefixsum = [0] * (len(nums) + 1)
mi = prefixsum[0]
ma = -100000
msum = nums[0]
for i in range(1, len(nums) + 1):
prefixsum[i] = prefixsum[i-1] + nums[i-1]
if prefixsum[i-1] < mi:
mi = prefixsum[i-1]
if prefixsum[i] - mi > msum:
msum = prefixsum[i] - mi
print(msum) | python |
import psycopg2
from config import config
class PostgresConnector:
def __init__(self):
# read connection parameters
self.params = config()
def connect(self):
""" Connect to the PostgreSQL database server """
conn = None
try:
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**self.params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
# conn.close()
# print('Database connection closed.')
return conn
if __name__ == '__main__':
postgres = PostgresConnector()
connObj = postgres.connect()
cur = connObj.cursor()
# cur.execute("SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name= 'Tweets'") ## for the SCHEMA
# cur.execute("SELECT count('Tweets') FROM \"Tweets\" where label = 'sexism'") ## SELECT QUERY
cur.execute("SELECT count('Tweets') FROM \"Tweets\"") ## SELECT QUERY
# show the results of the query
for row in cur:
print(row)
cur.close()
connObj.commit() # You have to commit in order to make actual changes to the DB | python |
from gopygo.parser import parse
from gopygo.unparser import unparse
__version__ = '0.3.2'
| python |
import random
from scicast_bot_session.client.scicast_bot_session import SciCastBotSession
from scicast_bot_session.common.utils import scicast_bot_urls
import botutils
from time import sleep
import datetime
import sys
def getinfo(site,bot='',roundid:str='', percent=0.005):
try:
api_key = botutils.lookup_key(site+bot)
URL = scicast_bot_urls[site]
s = SciCastBotSession(base_url=URL, api_key=api_key)
assets = s.get_user_info()
currentCash=assets["cash"]
budget = botutils.get_trade_cost(cash=currentCash,fraction=percent)
print(f'Pulling from {URL}',file=sys.stderr)
print(f'cash = {currentCash}, budget = {budget}',file=sys.stderr)
print("claim_id,short_name,latest_prob")
questions = s.get_questions(roundid)
for q in questions:
#print(q)
print(q['question']['claim_id'],",",q['question']['short_name'],",",q['prob'][1], sep='')
except Exception as e:
print(f'Noise Bot Error: {e}')
| python |
import sys
import os
from pathlib import Path
import locale
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QLocale
from configs import Configurator
from gui.appwidget import App
from pa import generate_pa_test
CONFIG_FILE = 'duet_pressure_advance.cfg'
def generate(cfg):
pass
if __name__ == '__main__':
cfg_file = os.path.join(Path.home(), CONFIG_FILE)
configurator = Configurator(cfg_file)
#qt_locale = QLocale.system().name()
#locale.setlocale(locale.LC_ALL, qt_locale)
app = QApplication(sys.argv)
app.setStyle('Fusion')
ex = App(generate_pa_test, configurator)
sys.exit(app.exec_())
configurator.save(cfg_file) | python |
from tkinter import *
root=Tk()
root.geometry("600x500")
addno=StringVar()
e1=Entry(root)
e1.grid(row=0,column=1)
e2=Entry(root)
e2.grid(row=1,column=1)
def add():
res1 = int(e1.get())+int(e2.get())
addno.set(res1)
n1=Label(root,text="num1").grid(row=0)
n2=Label(root,text="num2").grid(row=1)
n3=Label(root, text="Result:",bg="yellow").grid(row=3)
result=Label(root,textvariable=addno).grid(row=3,column=1)
b=Button(root,text="add",command=add).grid(row=2,column=1)
root.mainloop()
# mytext=StringVar()
# def sqare():
# res=int(e1.get())*int(e1.get())
# mytext.set(res)
# n1=Label(root,text="number").grid(row=0)
# e1=Entry(root)
# e1.grid(row=0,column=1)
# b=Button(root,text="sqare",command=sqare).grid(row=2,column=3)
# IbRES=Label(root,text="result",bg="yellow").grid(row=3)
# Result=Label(root,textvariable=mytext).grid(row=3,column=1)
#
#
#
#
#
1 | python |
import re
from random import randrange
from model.contact import Contact
def test_random_contact_home_page(app):
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
contact_from_home_page = app.contact.get_contact_list()[index]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)
assert contact_from_home_page.first_name == contact_from_edit_page.first_name
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.last_name == contact_from_edit_page.last_name
assert contact_from_home_page.contact_address == contact_from_edit_page.contact_address
assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.home_contact,
contact.mobile_phone, contact.work_phone,
contact.phone_2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None, [contact.email_com,
contact.email2, contact.email3])))
def test_all_contact_home_page_db(app, db, check_ui):
db_contacts = db.get_contact_list()
db_contacts = sorted(db_contacts, key=Contact.id_or_max)
contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
assert len(db_contacts) == len(contacts_from_home_page)
assert db_contacts == contacts_from_home_page
for number in db_contacts:
number.all_emails_from_home_page = merge_emails_like_on_home_page(number)
number.all_phones_from_home_page = merge_phones_like_on_home_page(number)
for i in range(len(db_contacts)):
assert db_contacts[i].id == contacts_from_home_page[i].id
assert db_contacts[i].first_name == contacts_from_home_page[i].first_name
assert db_contacts[i].last_name == contacts_from_home_page[i].last_name
assert db_contacts[i].contact_address == contacts_from_home_page[i].contact_address
assert db_contacts[i].all_phones_from_home_page == contacts_from_home_page[i].all_phones_from_home_page
assert db_contacts[i].all_emails_from_home_page == contacts_from_home_page[i].all_emails_from_home_page
print(str(i))
print(db_contacts[i])
print(contacts_from_home_page[i])
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[contact.home_contact,
contact.mobile_phone, contact.work_phone,
contact.phone_2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None, [contact.email_com,
contact.email2, contact.email3]))) | python |
import json
from flask import request
from flask_restful import Resource, reqparse
from database.interface import FirebaseInterface
from models.Service import Service
class ServicesController(Resource):
def __init__(self):
self.parser = reqparse.RequestParser()
self.interface = FirebaseInterface()
def get(self):
try:
dic = {"data": self.interface.getData("services")}
data = json.dumps(dic)
result = json.loads(data)
http_return_code = 200
except Exception as e:
result = str(e)
http_return_code = 400
return result, http_return_code
def post(self):
req = request.get_json()
try:
name = req["nome"]
price = req["preco"]
service = Service(name, price)
service.validateFields()
self.interface.setData(service.__dict__, "services", name)
result = "Serviço cadastrado com sucesso"
http_return_code = 200
except Exception as e:
result = str(e)
http_return_code = 400
return result, http_return_code
def delete(self, service):
try:
self.interface.deleteData("services", service)
result = "Serviço removido com sucesso"
http_return_code = 200
except Exception as e:
result = str(e)
http_return_code = 400
return result, http_return_code
def put(self):
req = request.get_json()
try:
name = req["nome"]
price = req["preco"]
service = Service(name, price)
service.validateFields()
self.interface.updateData(service.__dict__, "services", name)
result = "Serviço alterado com sucesso"
http_return_code = 200
except Exception as e:
result = str(e)
http_return_code = 400
return result, http_return_code
| python |
from hashlib import sha256
from tornado.web import HTTPError
from .db import Model, DoesNotExistError, NonUniqueError
from .game import Game
from .player import Player
from .location import Location
from .template import templater, inside_page
class Admin(Model):
_table = 'admin'
def __init__(self, id, name, password):
self.name = name
self.password = sha256(password.encode('utf-8')).hexdigest()
@classmethod
def no_users(cls):
return Admin.select().fetchone() == None
@classmethod
def signup(cls, user, password):
hash = sha256(password.encode('utf-8')).hexdigest()
return Admin.add(name=user, password=hash)
@classmethod
def login(cls, user, password):
LOGIN = """SELECT * from {}
WHERE name = ? AND password = ?
""".format(cls._table)
hash = sha256(password.encode('utf-8')).hexdigest()
c = cls._sql(LOGIN, (user, hash))
if c.fetchone():
return True
else:
return False
@classmethod
def init_db(cls):
CREATE = """CREATE table {} (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT(40) NOT NULL,
password TEXT(256) NOT NULL,
UNIQUE (name)
)""".format(cls._table)
cls._sql(CREATE)
def admin_template(game_id, game=None, players=None, locations=None, error=None) -> str:
admin = templater.load('admin.html').generate(game_id=game_id, game=game, players=players, locations=locations, error=error)
return inside_page(admin, game_id=game_id)
def admin(response, game_id=None):
loggedin = response.get_secure_cookie('loggedin')
if Admin.no_users():
response.redirect('/signup?game={}&failed=true'.format(game_id) if game_id != None else '/signup')
elif loggedin:
try:
game = Game.get(id=game_id)
game.disabled = is_disabled(game.disabled)
except DoesNotExistError:
game = None
error = None
try:
players = Player.list(game_id)
except NonUniqueError as err:
players = None
error = "Multiple death detected! Error message: " + str(err)
locations = list(Location.iter())
response.write(admin_template(game_id, game, players, locations, error))
else:
response.redirect('/login?game={}'.format(game_id) if game_id != None else '/login')
def signup_template(game_id, failed=False) -> str:
signup = templater.load('signup.html').generate(game_id=game_id, failed=failed)
return inside_page(signup, game_id=game_id)
def login_template(game_id, failed=False) -> str:
login = templater.load('login.html').generate(game_id=game_id, failed=failed)
return inside_page(login, game_id=game_id)
def disabled_template(game_id) -> str:
disabled = templater.load('disabled.html').generate(game_id=game_id)
return inside_page(disabled, game_id=game_id)
def signup_page(response):
game_id = response.get_field('game')
failed = response.get_field('failed')
return response.write(signup_template(game_id, failed))
def login_page(response):
game_id = response.get_field('game')
failed = response.get_field('failed')
return response.write(login_template(game_id, failed))
def signup(response):
game_id = response.get_field('game')
user = response.get_field('user')
password = response.get_field('password')
loggedin = response.get_secure_cookie('loggedin')
if loggedin or Admin.no_users():
Admin.signup(user, password)
response.set_secure_cookie('loggedin', str(True))
response.redirect('{}/admin'.format('/'+game_id if game_id else ''))
else:
response.redirect('/signup?game={}&failed=true'.format(game_id) if game_id != None else '/signup')
def login(response):
game_id = response.get_field('game')
user = response.get_field('user')
password = response.get_field('password')
correct_password = Admin.login(user, password)
if correct_password:
response.set_secure_cookie('loggedin', str(True))
response.redirect('{}/admin'.format('/'+game_id if game_id else ''))
else:
response.redirect('/login?game={}&failed=true'.format(game_id) if game_id != None else '/login')
def admin_only(handler):
def admin_handler(response, *args):
loggedin = response.get_secure_cookie('loggedin')
if loggedin:
handler(response, *args)
else:
raise HTTPError(403, "You do not have permission to perform this action")
return admin_handler
def is_disabled(disable):
print("IS_DISABLED", disable)
if str(disable).lower() in ['true', '1']:
disabled = True
elif str(disable).lower() in ['false', '0']:
disabled = False
else:
disabled = None
return disabled
@admin_only
def disable(response):
print(response)
game_id = response.get_field('game')
disable = response.get_field('disable')
disabled = is_disabled(disable)
if game_id != None or game_id != '' and disable != None:
game = Game.get(id=game_id)
game.update(disabled=disabled)
def disableable(handler):
def disableable_handler(response, game_id=None, *args):
if game_id is None:
latest = Game.latest()
if latest is not None:
game_id, year, number = latest
else:
game_id = None
if game_id is not None:
game = Game.get(id=game_id)
disabled = is_disabled(game.disabled)
else:
disabled = False
loggedin = response.get_secure_cookie('loggedin')
if disabled and not loggedin:
response.write(disabled_template(game_id))
elif game_id != None:
handler(response, game_id, *args)
else:
handler(response, *args)
return disableable_handler
| python |
"""Orcaflex output plugin - using orcaflex API."""
import numpy as np
def to_orcaflex(self, model, minEnergy=1e-6):
"""Writes the spectrum to an Orcaflex model
Uses the orcaflex API (OrcFxAPI) to set the wave-data of the provided orcaflex model.
The axis system conversion used is:
- Orcaflex global X = Towards East
- Orcaflex global Y = Towards North
This function creates a wave-train for each of the directions in this spectrum using a user-defined spectrum.
Calculation of wave-components in orcaflex is computationally expensive. To save computational time:
1. Use the minEnergy parameter of this function to define a treshold for the amount of energy in a wave-train.
2. In orcaflex itself: limit the amount of wave-components
3. Before exporting: regrid the spectrum to a lower amount of directions.
Orcaflex theory:
- https://www.orcina.com/webhelp/OrcaFlex/Content/html/Wavetheory.htm
- https://www.orcina.com/webhelp/OrcaFlex/Content/html/Directionconventions.htm
Example:
>>> from OrcFxAPI import *
>>> from wavespectra import read_triaxys
>>> m = Model()
>>> spectrum = read_triaxys("triaxys.DIRSPEC")).isel(time=0) # get only the fist spectrum in time
>>> spectrum.spec.to_orcaflex(m)
Args:
- model : orcaflex model (OrcFxAPI.model instance)
- minEnergy [1e-6] : threshold for minimum sum of energy in a direction before it is exported
Note:
- an Orcaflex license is required to work with the orcaflex API.
- Only 2D spectra E(f,d) are currently supported.
- The DataArray should contain only a single spectrum. Hint: first_spetrum = spectra.isel(time=0)
"""
dirs = np.array(self.dir.values)
freqs = np.array(self.freq.values)
ddir = self.dd
# verify what all coordinates other than dir and freq are one
if not np.prod(self.efth.shape) == len(dirs) * len(freqs):
raise ValueError(
"The DataArray should contain only a single spectrum.\nHint: first_spetrum = spectra.isel(time=0)"
)
nTrains = 0
env = model.environment # alias
for dir in dirs:
e = self.efth.sel(dict(dir=dir)).values.flatten()
E = ddir * e
if np.sum(E) <= minEnergy:
continue
nTrains += 1
env.NumberOfWaveTrains = nTrains
env.SelectedWaveTrainIndex = nTrains - 1 # zero-based = f'Wave{nTrains}'
env.WaveDirection = np.mod(
90 - dir + 180, 360
) # convert from coming from to going to and from compass to ofx
env.WaveType = "User Defined Spectrum"
env.WaveNumberOfSpectralDirections = 1
# interior points in the spectrum with zero energy are not allowed by orcaflex
iFirst = np.where(E > 0)[0][0]
iLast = np.where(E > 0)[0][-1]
for i in range(iFirst, iLast):
if E[i] < 1e-10:
E[i] = 1e-10
if iFirst > 0:
iFirst -= 1
if iLast < len(E) - 2:
iLast += 1
env.WaveNumberOfUserSpectralPoints = len(E[iFirst:iLast])
env.WaveSpectrumS = E[iFirst:iLast]
env.WaveSpectrumFrequency = freqs[iFirst:iLast]
env.WaveType = 'Airy' #Temporary set the wave-type to Airy. This is to avoid re-calcultion of
# the spectral properties each time the next train is set (can slow-down
# considerably when using many sprectral components
# !thank you people at orcina for your help solving this!
# When all data is set, restore all trains to 'user-defined'. The data that we set earlier
# will still be there.
for env.SelectedWaveTrainIndex in range(nTrains):
env.WaveType = 'User Defined Spectrum'
if nTrains == 0:
raise ValueError(
"No data exported, no directions with more than the minimum amount of energy"
)
| python |
import subprocess
import sys
with open('out.txt','w+') as fout:
with open('err.txt','w+') as ferr:
out=subprocess.call(["./bash-script-with-bad-syntax"],stdout=fout,stderr=ferr)
fout.seek(0)
print('output:')
print(fout.read())
ferr.seek(0)
print('error:')
print(ferr.read())
| python |
#esperava um ident depois do ':'
def x(y):
z=1
| python |
# encoding: utf-8
# Copyright 2011 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
'''Curator: interface'''
from zope.interface import Interface
from zope import schema
from ipdasite.services import ProjectMessageFactory as _
class ICurator(Interface):
'''A person and agency that is responsible for a service.'''
title = schema.TextLine(
title=_(u'Name'),
description=_(u'Name of this curator.'),
required=True,
)
description = schema.Text(
title=_(u'Description'),
description=_(u'A short summary of this curator, used in free-text searches.'),
required=False,
)
contactName = schema.TextLine(
title=_(u'Contact Name'),
description=_(u'Name of a person who curates one or more services.'),
required=False,
)
emailAddress = schema.TextLine(
title=_(u'Email Address'),
description=_(u'Contact address for a person or workgroup that curates services.'),
required=False,
)
telephone = schema.TextLine(
title=_(u'Telephone'),
description=_(u'Public telephone number in international format in order to contact this curator.'),
required=False,
)
| python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('wod_rules', '0006_auto_20150414_1606'),
]
operations = [
migrations.RemoveField(
model_name='merit',
name='content_type',
),
migrations.RemoveField(
model_name='merit',
name='object_id',
),
migrations.RemoveField(
model_name='specialty',
name='content_type',
),
migrations.RemoveField(
model_name='specialty',
name='object_id',
),
]
| python |
#!/usr/bin/env python
# Copyright 2019 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import environment
import tablet
import utils
shard_0_master = tablet.Tablet()
shard_0_replica1 = tablet.Tablet()
shard_0_replica2 = tablet.Tablet()
shard_0_rdonly = tablet.Tablet()
shard_0_backup = tablet.Tablet()
shard_1_master = tablet.Tablet()
shard_1_replica1 = tablet.Tablet()
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
# shard_2 tablets shouldn't exist yet when _apply_initial_schema() is called.
initial_tablets = [
shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly,
shard_0_backup, shard_1_master, shard_1_replica1,
]
shard_2_tablets = [shard_2_master, shard_2_replica1]
all_tablets = initial_tablets + shard_2_tablets
test_keyspace = 'test_keyspace'
db_name = 'vt_' + test_keyspace
def setUpModule():
try:
environment.topo_server().setup()
_init_mysql(all_tablets)
utils.run_vtctl(['CreateKeyspace', test_keyspace])
utils.Vtctld().start(enable_schema_change_dir=True)
except Exception as setup_exception: # pylint: disable=broad-except
try:
tearDownModule()
except Exception as e: # pylint: disable=broad-except
logging.exception('Tearing down a failed setUpModule() failed: %s', e)
raise setup_exception
def _init_mysql(tablets):
setup_procs = []
for t in tablets:
setup_procs.append(t.init_mysql())
utils.wait_procs(setup_procs)
def _setup_shard_2():
shard_2_master.init_tablet('replica', test_keyspace, '2')
shard_2_replica1.init_tablet('replica', test_keyspace, '2')
# create databases, start the tablets
for t in shard_2_tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_2_replica1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/2',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', test_keyspace])
def _teardown_shard_2():
tablet.kill_tablets(shard_2_tablets)
utils.run_vtctl(
['DeleteShard', '-recursive', '-even_if_serving', 'test_keyspace/2'],
auto_log=True)
for t in shard_2_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = []
for t in all_tablets:
teardown_procs.append(t.teardown_mysql())
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
class TestSchema(unittest.TestCase):
def setUp(self):
shard_0_master.init_tablet('replica', test_keyspace, '0')
shard_0_replica1.init_tablet('replica', test_keyspace, '0')
shard_0_replica2.init_tablet('replica', test_keyspace, '0')
shard_0_rdonly.init_tablet('rdonly', test_keyspace, '0')
shard_0_backup.init_tablet('backup', test_keyspace, '0')
shard_1_master.init_tablet('replica', test_keyspace, '1')
shard_1_replica1.init_tablet('replica', test_keyspace, '1')
# create databases, start the tablets
for t in initial_tablets:
t.create_db(db_name)
t.start_vttablet(wait_for_state=None)
# wait for the tablets to start
for t in initial_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/0',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', test_keyspace + '/1',
shard_1_master.tablet_alias], auto_log=True)
def tearDown(self):
# kill all tablets
tablet.kill_tablets(initial_tablets)
for t in initial_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
utils.run_vtctl(['DeleteShard', '-recursive', '-even_if_serving',
test_keyspace + '/0'], auto_log=True)
utils.run_vtctl(['DeleteShard', '-recursive', '-even_if_serving',
test_keyspace + '/1'], auto_log=True)
def _check_tables(self, tablet_obj, expected_count):
tables = tablet_obj.mquery(db_name, 'show tables')
self.assertEqual(
len(tables), expected_count,
'Unexpected table count on %s (not %d): got tables: %s' %
(tablet_obj.tablet_alias, expected_count, str(tables)))
def _apply_schema(self, keyspace, sql, expect_fail=False):
return utils.run_vtctl(['ApplySchema',
'-sql=' + sql,
keyspace],
expect_fail=expect_fail, auto_log=True)
def _get_schema(self, tablet_alias):
return utils.run_vtctl_json(['GetSchema',
tablet_alias])
def _create_test_table_sql(self, table):
return (
'CREATE TABLE %s (\n'
'`id` BIGINT(20) not NULL,\n'
'`msg` varchar(64),\n'
'PRIMARY KEY (`id`)\n'
') ENGINE=InnoDB') % table
def _alter_test_table_sql(self, table, index_column_name):
return (
'ALTER TABLE %s\n'
'ADD COLUMN new_id bigint(20) NOT NULL AUTO_INCREMENT FIRST,\n'
'DROP PRIMARY KEY,\n'
'ADD PRIMARY KEY (new_id),\n'
'ADD INDEX idx_column(%s)\n') % (table, index_column_name)
def _apply_initial_schema(self):
schema_changes = ';'.join([
self._create_test_table_sql('vt_select_test01'),
self._create_test_table_sql('vt_select_test02'),
self._create_test_table_sql('vt_select_test03'),
self._create_test_table_sql('vt_select_test04')])
# apply schema changes to the test keyspace
self._apply_schema(test_keyspace, schema_changes)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# get schema for each shard
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
def test_schema_changes(self):
self._apply_initial_schema()
self._apply_schema(
test_keyspace, self._alter_test_table_sql('vt_select_test03', 'msg'))
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_1_schema = self._get_schema(shard_1_master.tablet_alias)
# all shards should have the same schema
self.assertEqual(shard_0_schema, shard_1_schema)
# test schema changes
os.makedirs(os.path.join(utils.vtctld.schema_change_dir, test_keyspace))
input_path = os.path.join(
utils.vtctld.schema_change_dir, test_keyspace, 'input')
os.makedirs(input_path)
sql_path = os.path.join(input_path, 'create_test_table_x.sql')
with open(sql_path, 'w') as handler:
handler.write('create table test_table_x (id int)')
# wait until this sql file being consumed by autoschema
timeout = 10
while os.path.isfile(sql_path):
timeout = utils.wait_step(
'waiting for vtctld to pick up schema changes',
timeout, sleep_time=0.2)
# check number of tables
self._check_tables(shard_0_master, 5)
self._check_tables(shard_1_master, 5)
def test_schema_changes_drop_and_create(self):
"""Tests that a DROP and CREATE table will pass PreflightSchema check.
PreflightSchema checks each SQL statement separately. When doing so, it must
consider previous statements within the same ApplySchema command. For
example, a CREATE after DROP must not fail: When CREATE is checked, DROP
must have been executed first.
See: https://github.com/vitessio/vitess/issues/1731#issuecomment-222914389
"""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
drop_and_create = ('DROP TABLE vt_select_test01;\n' +
self._create_test_table_sql('vt_select_test01'))
self._apply_schema(test_keyspace, drop_and_create)
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_schema_changes_preflight_errors_partially(self):
"""Tests that some SQL statements fail properly during PreflightSchema."""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
# Second statement will fail because the table already exists.
create_error = (self._create_test_table_sql('vt_select_test05') + ';\n' +
self._create_test_table_sql('vt_select_test01'))
stdout = self._apply_schema(test_keyspace, create_error, expect_fail=True)
self.assertIn('already exists', ''.join(stdout))
# check number of tables
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_schema_changes_drop_nonexistent_tables(self):
"""Tests the PreflightSchema logic for dropping nonexistent tables.
If a table does not exist, DROP TABLE should error during preflight
because the statement does not change the schema as there is
nothing to drop.
In case of DROP TABLE IF EXISTS though, it should not error as this
is the MySQL behavior the user expects.
"""
self._apply_initial_schema()
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
drop_table = ('DROP TABLE nonexistent_table;')
stdout = self._apply_schema(test_keyspace, drop_table, expect_fail=True)
self.assertIn('Unknown table', ''.join(stdout))
# This Query may not result in schema change and should be allowed.
drop_if_exists = ('DROP TABLE IF EXISTS nonexistent_table;')
self._apply_schema(test_keyspace, drop_if_exists)
self._check_tables(shard_0_master, 4)
self._check_tables(shard_1_master, 4)
def test_vtctl_copyschemashard_use_tablet_as_source(self):
self._test_vtctl_copyschemashard(shard_0_master.tablet_alias)
def test_vtctl_copyschemashard_use_shard_as_source(self):
self._test_vtctl_copyschemashard('test_keyspace/0')
def _test_vtctl_copyschemashard(self, source):
# Apply initial schema to the whole keyspace before creating shard 2.
self._apply_initial_schema()
_setup_shard_2()
try:
# InitShardMaster creates the db, but there shouldn't be any tables yet.
self._check_tables(shard_2_master, 0)
self._check_tables(shard_2_replica1, 0)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
utils.run_vtctl(['CopySchemaShard',
source,
'test_keyspace/2'],
auto_log=True)
# shard_2_master should look the same as the replica we copied from
self._check_tables(shard_2_master, 4)
utils.wait_for_replication_pos(shard_2_master, shard_2_replica1)
self._check_tables(shard_2_replica1, 4)
shard_0_schema = self._get_schema(shard_0_master.tablet_alias)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertEqual(shard_0_schema, shard_2_schema)
finally:
_teardown_shard_2()
def test_vtctl_copyschemashard_different_dbs_should_fail(self):
# Apply initial schema to the whole keyspace before creating shard 2.
self._apply_initial_schema()
_setup_shard_2()
try:
# InitShardMaster creates the db, but there shouldn't be any tables yet.
self._check_tables(shard_2_master, 0)
self._check_tables(shard_2_replica1, 0)
# Change the db charset on the destination shard from utf8 to latin1.
# This will make CopySchemaShard fail during its final diff.
# (The different charset won't be corrected on the destination shard
# because we use "CREATE DATABASE IF NOT EXISTS" and this doesn't fail if
# there are differences in the options e.g. the character set.)
shard_2_schema = self._get_schema(shard_2_master.tablet_alias)
self.assertIn('utf8', shard_2_schema['database_schema'])
utils.run_vtctl_json(
['ExecuteFetchAsDba', '-json', shard_2_master.tablet_alias,
'ALTER DATABASE vt_test_keyspace CHARACTER SET latin1'])
_, stderr = utils.run_vtctl(['CopySchemaShard',
'test_keyspace/0',
'test_keyspace/2'],
expect_fail=True,
auto_log=True)
self.assertIn('schemas are different', stderr)
# shard_2_master should have the same number of tables. Only the db
# character set is different.
self._check_tables(shard_2_master, 4)
finally:
_teardown_shard_2()
if __name__ == '__main__':
utils.main()
| python |
from django.shortcuts import redirect, render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.views.generic import (CreateView, UpdateView, DetailView, TemplateView, View, DeleteView,ListView)
from django.shortcuts import render, redirect, get_object_or_404
from django.http import (HttpResponseRedirect,JsonResponse, HttpResponse,Http404)
from django.contrib import messages
from django.contrib.auth.hashers import check_password
from django.contrib.auth import authenticate
from django.contrib.auth import login as login_django
from django.contrib.auth import logout as logout_django
from django.contrib.auth.decorators import login_required
from django.contrib.auth import update_session_auth_hash
from apps.usuario.templatetags.utils import get_ip
from django.urls import reverse_lazy, reverse
from django.contrib.auth.decorators import login_required
import json
from apps.usuario.form.forms_perfil import LoginUsuarioPerfilForm,\
PasswordUsuarioPerfilForm,EditarUsuarioPerfilForm,\
PerfilFrom
from django.db.models import Q
from apps.usuario.models import Perfil
from apps.contrato.models import Persona
from apps.contrato.models import Cliente
from apps.terreno.models import Manzano,Lote
#Login
class LoginPerfilView(TemplateView,LoginRequiredMixin):
login_url = 'usuario:index'
template_name = "sigetebr/apps/usuario/index.html"#url
success_url = reverse_lazy("usuario:dashboard")#ur
def get_context_data(self, **kwargs):
context = super(LoginPerfilView, self).get_context_data(**kwargs)
return context
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return HttpResponseRedirect(self.success_url)
return super(LoginPerfilView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
form = LoginUsuarioPerfilForm(request.POST, request=request)
if form.is_valid():
#user = Perfil.objects.filter(usuario=request.POST.get('usuario')).first()
perfil = Perfil.objects.filter(usuario=request.POST.get('usuario')).first()
if perfil is not None:
if perfil.estado:
perfil = authenticate(
usuario=request.POST.get('usuario'),
password=request.POST.get('password'))
if perfil is not None:
login_django(request, perfil)
return redirect('usuario:dashboard')
#return HttpResponseRedirect('usuarios:dashboard')
return render(request, self.template_name, {
"error": True,
"message": "Tu nombre de usuario y contraseña no coinciden. Inténtalo de nuevo."}
)
return render(request, self.template_name, {
"error": True,
"message": "Su cuenta está inactiva. Por favor, póngase en contacto con el administrador"}
)
return render(request, self.template_name, {
"error": True,
"message": "Tu cuenta no se encuentra. Por favor, póngase en contacto con el administrador"}
)
return render(request, self.template_name, {
# "error": True,
# "message": "Tu nombre de Usuario y Contraseña no coinciden. Inténtalo de nuevo."
"form": form
})
#Dashboard
class DashboardView(LoginRequiredMixin,TemplateView):
template_name = 'sigetebr/apps/dashboard.html'
login_url = 'usuario:index'
def get_context_data(self, **kwargs):
context = super(DashboardView, self).get_context_data(**kwargs)
manzanostodo = Manzano.objects.all()
manzanosactiva = Manzano.objects.exclude(estado='False')
context["manzanos"] = manzanostodo
context["manzano_count"] = manzanosactiva
lotestodo = Lote.objects.all()
lotesactiva = Lote.objects.exclude(estado='False')
context["lotes"] = lotestodo
context["lote_count"] = lotesactiva
usuariotodo = Perfil.objects.all()
usuariodmin = Perfil.objects.exclude(is_superuser='True')
usuarioactiva = Perfil.objects.exclude(is_active='True')
context["usuario_count"] = usuarioactiva
context["usuarios"] = usuariotodo
personatodo = Persona.objects.all()
personaactiva = Persona.objects.exclude(estado='False')
context["persona_count"] = personaactiva
context["personas"] = personatodo
clientetodo = Cliente.objects.all()
clienteactiva = Cliente.objects.exclude(estado='False')
context["cliente_count"] = clienteactiva
context["clientes"] = clientetodo
return context
"""
Funciones
"""
#Salir
@login_required(login_url='usuario:index')
def LogoutView(request):
logout_django(request)
return redirect('usuario:index')
#Usuario Perfil Usuario
class UsuarioPerfilDetalleView(LoginRequiredMixin,DetailView):
model = Perfil
template_name = 'sigetebr/apps/usuario/configuracion/perfil_usuario.html' # url
slug_field = 'usuario'#que campo de la base de datos
slug_url_kwarg = 'usuario_url'#que campo de la url
login_url = 'usuarios:index'
#Usuario Perfil Actualizar Usuario
class UsuarioPerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView):
model = Perfil
form_class = EditarUsuarioPerfilForm
template_name = 'sigetebr/apps/usuario/configuracion/perfil_form.html' # url
success_url = reverse_lazy('usuarios:perfil_actualizar')
# success_message = "Tu usuario ha sido actualizado"
context_object_name = "user_obj"
login_url = 'usuarios:index'
def form_valid(self, form):
messages.success(self.request, "Tu Perfil Usuario ha sido actualizado")
return super(UsuarioPerfilEditarView, self).form_valid(form)
def get_object(self, queryset=None):
return self.request.user
#Usuario Perfil Actualizar Password Usuario
@login_required(login_url='usuarios:index')
def passwordusuarioview(request):
template_name = 'sigetebr/apps/usuario/configuracion/perfil_password.html' # url
form = PasswordUsuarioPerfilForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
actual = request.POST.get('password')
nuevo = request.POST.get('password')
confirma =request.POST.get('confimar_password')
print(actual)
print(nuevo)
print(confirma)
if not check_password(request.POST.get('password'), request.user.password):
messages.warning(request, 'Password Actual no coinciden!')
else:
if authenticate(usuario = request.user.usuario,password = request.POST.get('password')):
request.user.set_password(request.POST.get('new_password'))
request.user.save()
update_session_auth_hash(request, request.user)
messages.success(request, 'Password Actualizado!')
#redirect()
else:
messages.error(request, 'Verifique su Password por favor!')
context = {'form': form}
return render(request, template_name, context)
USUARIO_FIELDS = [
{'string': 'N°', 'field': 'numero'},
{'string': 'Usuario', 'field': 'usuario'},
{'string': 'Nombres', 'field': 'nombre'},
{'string': 'Email', 'field': 'email'},
{'string': 'Roles', 'field': 'roles'},
{'string': 'Estado', 'field': 'estado'},
{'string': 'Acciones', 'field': 'acciones'},
]
#class PerfilListarView(LoginRequiredMixin,generic.ListView):
class PerfilListarView(LoginRequiredMixin,TemplateView):
model = Perfil
template_name = "sigetebr/apps/usuario/perfil/listar.html"
#context_object_name = "list_usuario"
login_url = 'usuario:index'
def get_queryset(self):
queryset = self.model.objects.all()
request_post = self.request.POST
print(request_post,"Usuario")
if request_post:
if request_post.get('usuario'):
queryset = queryset.filter(
usuario__icontains=request_post.get('usuario'))
if request_post.get('email'):
queryset = queryset.filter(
email__icontains=request_post.get('email'))
print(queryset, "Resultado")
return queryset
def get_context_data(self, **kwargs):
context = super(PerfilListarView, self).get_context_data(**kwargs)
context["list_perfil"] = self.get_queryset()
context['fields'] = USUARIO_FIELDS
context["per_page"] = self.request.POST.get('per_page')
search = False
if (
self.request.POST.get('usuario') or
self.request.POST.get('email')
):
search = True
context["search"] = search
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
#Perfil Crear
class PerfilCrearView(SuccessMessageMixin,LoginRequiredMixin,CreateView):
model = Perfil
template_name = "sigetebr/apps/usuario/perfil/form.html"
context_object_name = "obj"
form_class = PerfilFrom
success_url = reverse_lazy("usuario:listar_perfil")
success_message = "Perfil de Usuario Creado Exitosamente"
login_url = 'usuario:index'
#Perfil Editar
class PerfilEditarView(SuccessMessageMixin,LoginRequiredMixin,UpdateView):
model = Perfil
template_name = "sigetebr/apps/usuario/perfil/form.html"
context_object_name = "obj_usuario"
form_class = PerfilFrom
success_url = reverse_lazy("usuario:listar_perfil")
success_message = "Perfil de Usuario Actualizada Satisfactoriamente"
login_url = 'usuario:index'
#Perfil Detalle
class PerfilDetallesView(LoginRequiredMixin,DetailView):
model = Perfil
template_name = 'sigetebr/apps/usuario/perfil/detalle.html'#url
slug_field = 'usuario'#que campo de la base de datos
context_object_name = 'obj'
slug_url_kwarg = 'usuario_url'#que campo de la url
login_url = 'usuario:index'
#Perfil Eliminar
class PerfilEliminarView(SuccessMessageMixin,LoginRequiredMixin,DeleteView):
model = Perfil
template_name='sigetebr/apps/usuario/perfil/eliminar.html'
context_object_name='obj'
success_url = reverse_lazy("usuario:listar_perfil")
success_message="Perfil de Usuario Eliminada Exitosamente"
login_url = 'usuario:index'
#Desactivar
@login_required(login_url='usuario:index')
def perfildesactivar(request, id):
perfil = Perfil.objects.filter(pk=id).first()
contexto={}
template_name = 'sigetebr/apps/usuario/perfil/estado_desactivar.html'#url
if not perfil:
return redirect('usuario:listar_perfil')
if request.method=='GET':
contexto={'obj':perfil}
if request.method=='POST':
perfil.estado=False
perfil.save()
return redirect('usuario:listar_perfil')
return render(request,template_name,contexto)
#Activar
@login_required(login_url='usuario:index')
def perfilactivar(request, id):
perfil = Perfil.objects.filter(pk=id).first()
contexto={}
template_name = 'sigetebr/apps/usuario/perfil/estado_activar.html'#url
if not perfil:
return redirect('usuario:listar_perfil')
if request.method=='GET':
contexto={'obj':perfil}
if request.method=='POST':
perfil.estado=True
perfil.save()
return redirect('usuario:listar_perfil')
return render(request,template_name,contexto)
#Estado
@login_required(login_url='usuario:index')
def cambiar_estado_perfil(request, pk):
perfil = get_object_or_404(Perfil, pk=pk)
if perfil.estado:
perfil.estado = False
messages.error(request, "Perfil de Usuario Desactivada")
else:
perfil.estado = True
messages.success(request, "Perfil de Usuario Activada")
perfil.um = request.user.id
perfil.save()
return redirect('usuario:listar_perfil')
| python |
import logging
# Setup basic logging
logging.basicConfig(
format='%(asctime)s : %(levelname)s : %(name)s : %(message)s',
level=logging.WARNING
)
from flask import Flask
from flask_uuid import FlaskUUID
from flask_migrate import Migrate
from simple_events.apis import api
from simple_events.models import db, bcrypt
from simple_events.core.utils import get_app_settings
app = Flask(__name__, instance_relative_config=True)
# Get settings
app_settings = get_app_settings()
app.config.from_object(app_settings)
# Initialise UUID extension
FlaskUUID(app)
# Initialise DB
db.init_app(app)
# Initialise Bcrypt
bcrypt.init_app(app)
# Initialise API
api.init_app(app)
migrate = Migrate(app, db)
if __name__ == '__main__':
app.run(debug=True)
| python |
import graphene
class SystemQueries(graphene.ObjectType):
hello = graphene.String(name=graphene.String(default_value="stranger"))
def resolve_hello(self, info, name):
return 'Hello ' + name
root_schema = graphene.Schema(query=SystemQueries)
| python |
from .version import VERSION
from .SoapLibrary import SoapLibrary
class SoapLibrary(SoapLibrary):
"""
SoapLibrary is a library for testing SOAP-based web services.
SoapLibrary is based on [https://python-zeep.readthedocs.io/en/master/|Zeep], a modern SOAP client for Python.
This library is designed for those who want to work with webservice automation as if they were using SoapUI,
make a request through an XML file, and receive the response in another XML file.
= Example =
| ***** Settings *****
| Library SoapLibrary
| Library OperatingSystem
|
| ***** Test Cases *****
| Example
| Create Soap Client http://endpoint.com/example.asmx?wsdl
| ${response} Call SOAP Method With XML ${CURDIR}/request.xml
| ${text} Get Data From XML By Tag ${response} tag_name
| Log ${text}
| Save XML To File ${response} ${CURDIR} response_test
"""
def __init__(self):
pass
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LIBRARY_VERSION = VERSION
| python |
import argparse
from os import path
from datetime import datetime
import logging
from logging.config import fileConfig
import tempfile
from dicom.dataset import Dataset
from pydicom.datadict import tag_for_name, dictionaryVR
from mip import Pacs, DicomAnonymizer
# parse commandline
parser = argparse.ArgumentParser(description='Download and anonymize files from a PACS system')
#--------------- PACS options ------------------
parser.add_argument('remotehost')
parser.add_argument('remoteport', type=int)
parser.add_argument('-p', '--port', help='local server port', type=int, default=1234)
parser.add_argument('-t','--aet', help='calling AET title', default='HBP')
parser.add_argument('-c','--aec', help='calling AEC call, the data-store', default='COMMON')
parser.add_argument('keys', metavar='KEY', type=str, nargs='+', help='search keys')
parser.add_argument('-l','--log', help='configuration log file', default='logging.ini')
parser.add_argument('-r','--queryRetrieveLevel', help='query retrieve level', default='PATIENT')
args = parser.parse_args()
if path.isfile(args.log):
fileConfig(args.log)
else:
logging.warning("could not find configuration log file '%s'" % args.log)
#starts our pacs instance
pacs = Pacs( args.port,
args.aet)
pacs.connect(args.remotehost,
args.remoteport,
args.aec)
ds = Dataset()
ds.QueryRetrieveLevel = args.queryRetrieveLevel
for k in args.keys:
parts=k.split('=')
tag = tag_for_name(parts[0])
ds.add_new(tag, dictionaryVR(tag) , parts[1])
items = pacs.query(ds)
for i in items:
print '---'
print i
| python |
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
import numpy as np
from ...util.linalg import tdot
from ...util.caching import Cache_this
four_over_tau = 2./np.pi
class MLP(Kern):
"""
Multi layer perceptron kernel (also known as arc sine kernel or neural network kernel)
.. math::
k(x,y) = \\sigma^{2}\\frac{2}{\\pi } \\text{asin} \\left ( \\frac{ \\sigma_w^2 x^\\top y+\\sigma_b^2}{\\sqrt{\\sigma_w^2x^\\top x + \\sigma_b^2 + 1}\\sqrt{\\sigma_w^2 y^\\top y \\sigma_b^2 +1}} \\right )
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param weight_variance: the vector of the variances of the prior over input weights in the neural network :math:`\sigma^2_w`
:type weight_variance: array or list of the appropriate size (or float if there is only one weight variance parameter)
:param bias_variance: the variance of the prior over bias parameters :math:`\sigma^2_b`
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one weight variance parameter \sigma^2_w), otherwise there is one weight variance parameter per dimension.
:type ARD: Boolean
:rtype: Kernpart object
"""
def __init__(self, input_dim, variance=1., weight_variance=1., bias_variance=1., ARD=False, active_dims=None, name='mlp'):
super(MLP, self).__init__(input_dim, active_dims, name)
self.variance = Param('variance', variance, Logexp())
self.ARD= ARD
if ARD:
wv = np.empty((input_dim,))
wv[:] = weight_variance
weight_variance = wv
self.weight_variance = Param('weight_variance', weight_variance, Logexp())
self.bias_variance = Param('bias_variance', bias_variance, Logexp())
self.link_parameters(self.variance, self.weight_variance, self.bias_variance)
@Cache_this(limit=20, ignore_args=())
def K(self, X, X2=None):
if X2 is None:
X_denom = np.sqrt(self._comp_prod(X)+1.)
X2_denom = X_denom
X2 = X
else:
X_denom = np.sqrt(self._comp_prod(X)+1.)
X2_denom = np.sqrt(self._comp_prod(X2)+1.)
XTX = self._comp_prod(X,X2)/X_denom[:,None]/X2_denom[None,:]
return self.variance*four_over_tau*np.arcsin(XTX)
@Cache_this(limit=20, ignore_args=())
def Kdiag(self, X):
"""Compute the diagonal of the covariance matrix for X."""
X_prod = self._comp_prod(X)
return self.variance*four_over_tau*np.arcsin(X_prod/(X_prod+1.))
def update_gradients_full(self, dL_dK, X, X2=None):
"""Derivative of the covariance with respect to the parameters."""
dvar, dw, db = self._comp_grads(dL_dK, X, X2)[:3]
self.variance.gradient = dvar
self.weight_variance.gradient = dw
self.bias_variance.gradient = db
def update_gradients_diag(self, dL_dKdiag, X):
dvar, dw, db = self._comp_grads_diag(dL_dKdiag, X)[:3]
self.variance.gradient = dvar
self.weight_variance.gradient = dw
self.bias_variance.gradient = db
def gradients_X(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X"""
return self._comp_grads(dL_dK, X, X2)[3]
def gradients_X_X2(self, dL_dK, X, X2):
"""Derivative of the covariance matrix with respect to X"""
return self._comp_grads(dL_dK, X, X2)[3:]
def gradients_X_diag(self, dL_dKdiag, X):
"""Gradient of diagonal of covariance with respect to X"""
return self._comp_grads_diag(dL_dKdiag, X)[3]
@Cache_this(limit=50, ignore_args=())
def _comp_prod(self, X, X2=None):
if X2 is None:
return (np.square(X)*self.weight_variance).sum(axis=1)+self.bias_variance
else:
return (X*self.weight_variance).dot(X2.T)+self.bias_variance
@Cache_this(limit=20, ignore_args=(1,))
def _comp_grads(self, dL_dK, X, X2=None):
var,w,b = self.variance, self.weight_variance, self.bias_variance
K = self.K(X, X2)
dvar = (dL_dK*K).sum()/var
X_prod = self._comp_prod(X)
X2_prod = self._comp_prod(X2) if X2 is not None else X_prod
XTX = self._comp_prod(X,X2) if X2 is not None else self._comp_prod(X, X)
common = var*four_over_tau/np.sqrt((X_prod[:,None]+1.)*(X2_prod[None,:]+1.)-np.square(XTX))*dL_dK
if self.ARD:
if X2 is not None:
XX2 = X[:,None,:]*X2[None,:,:] if X2 is not None else X[:,None,:]*X[None,:,:]
XX = np.square(X)
X2X2 = np.square(X2)
Q = self.weight_variance.shape[0]
common_XTX = common*XTX
dw = np.dot(common.flat,XX2.reshape(-1,Q)) -( (common_XTX.sum(1)/(X_prod+1.)).T.dot(XX)+(common_XTX.sum(0)/(X2_prod+1.)).dot(X2X2))/2
else:
XX2 = X[:,None,:]*X[None,:,:]
XX = np.square(X)
Q = self.weight_variance.shape[0]
common_XTX = common*XTX
dw = np.dot(common.flat,XX2.reshape(-1,Q)) - ((common_XTX.sum(0)+common_XTX.sum(1))/(X_prod+1.)).dot(XX)/2
else:
dw = (common*((XTX-b)/w-XTX*(((X_prod-b)/(w*(X_prod+1.)))[:,None]+((X2_prod-b)/(w*(X2_prod+1.)))[None,:])/2.)).sum()
db = (common*(1.-XTX*(1./(X_prod[:,None]+1.)+1./(X2_prod[None,:]+1.))/2.)).sum()
if X2 is None:
common = common+common.T
dX = common.dot(X)*w-((common*XTX).sum(axis=1)/(X_prod+1.))[:,None]*X*w
dX2 = dX
else:
dX = common.dot(X2)*w-((common*XTX).sum(axis=1)/(X_prod+1.))[:,None]*X*w
dX2 = common.T.dot(X)*w-((common*XTX).sum(axis=0)/(X2_prod+1.))[:,None]*X2*w
return dvar, dw, db, dX, dX2
@Cache_this(limit=20, ignore_args=(1,))
def _comp_grads_diag(self, dL_dKdiag, X):
var,w,b = self.variance, self.weight_variance, self.bias_variance
K = self.Kdiag(X)
dvar = (dL_dKdiag*K).sum()/var
X_prod = self._comp_prod(X)
common = var*four_over_tau/(np.sqrt(1-np.square(X_prod/(X_prod+1)))*np.square(X_prod+1))*dL_dKdiag
if self.ARD:
XX = np.square(X)
dw = np.dot(common,XX)
else:
dw = (common*(X_prod-b)).sum()/w
db = common.sum()
dX = common[:,None]*X*w*2
return dvar, dw, db, dX
| python |
from .socket_provider import SocketProvider
from .pcapy_provider import PcapyProvider
from .provider import Provider
from core.exceptions import *
class ProviderType():
Socket = "SocketProvider"
Pcapy = "PcapyProvider"
def create(providerType, device=None):
return globals()[providerType](device) | python |
# Python3 Finding Lowest Common Ancestor in Binary Tree ----> O(N)
def find_lca_bt(root, n1, n2):
if not root:
return None
left_lca = find_lca_bt(root.left, n1, n2)
right_lca = find_lca_bt(root.right, n1, n2)
if left_lca and right_lca:
return root
return left_lca if left_lca else right_lca
# Python3 Finding Lowest Common Ancestor in Binary Seacrh Tree ----> O(logN)
def find_lca_bst(root, n1, n2):
if not root:
return None
if root.data > n1 and root.data > n2:
return find_lca_bst(root.left)
if root.data < n1 and root.data < n2:
return find_lca_bst(root.right)
return root | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. moduleauthor:: hbldh <[email protected]>
Created on 2015-11-13
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from pkg_resources import resource_filename
import numpy as np
__all__ = ["C", "WEIGHTS", "f_h"]
# An array of C parameter values for which weights have been pre-calculated.
C = np.load(resource_filename("lspopt.data", "c.npy")).flatten()
# The pre-calculated Hermite polynomial coefficients
# for the C parameter values above.
WEIGHTS = np.load(resource_filename("lspopt.data", "weights.npy"))
def f_h(n, k):
"""Returns f_h value.
:param n: Window length of multitaper windows.
:type n: int
:param k: Length of non-zero Hermite polynomial coefficient array.
:type k: int
:return: The f_h value.
:rtype: float
"""
return n / _K_TO_VALUE_.get(k)
# Given length of Hermite polynomial coefficient array, return
# a value to divide N with.
_K_TO_VALUE_ = {
1: 5.4,
2: 6.0,
3: 7.3,
4: 8.1,
5: 8.7,
6: 9.3,
7: 9.8,
8: 10.3,
9: 10.9,
10: 11.2,
}
| python |
#!/usr/bin/env python
# Copyright 2020 Naoyuki Kanda
# MIT license
import sys
import os
import json
import soundfile
import librosa
import numpy as np
def get_delayed_audio(wav_file, delay, sampling_rate=16000):
audio, _ = soundfile.read(wav_file)
delay_frame = int(delay * sampling_rate)
if delay_frame != 0:
audio = np.append(np.zeros(delay_frame), audio)
return audio
def mix_audio(wavin_dir, wav_files, delays):
for i, wav_file in enumerate(wav_files):
if i == 0:
audio = get_delayed_audio(os.path.join(wavin_dir, wav_file), delays[i])
else:
additional_audio = get_delayed_audio(os.path.join(wavin_dir, wav_file), delays[i])
# tune length & sum up to audio
target_length = max(len(audio), len(additional_audio))
audio = librosa.util.fix_length(audio, target_length)
additional_audio = librosa.util.fix_length(additional_audio, target_length)
audio = audio + additional_audio
return audio
if __name__ == "__main__":
jsonl_file = sys.argv[1]
wavin_dir = sys.argv[2]
wavout_dir = sys.argv[3]
with open(jsonl_file, "r") as f:
for line in f:
data = json.loads(line)
mixed_audio = mix_audio(wavin_dir, data['wavs'], data['delays'])
outfile_path = os.path.join(wavout_dir, data['mixed_wav'])
outdir = os.path.dirname(outfile_path)
if not os.path.exists(outdir):
os.makedirs(outdir)
soundfile.write(outfile_path, mixed_audio, samplerate=16000)
| python |
"""
api for running OpenCL ports of nervana neon convolutional kernels
status: in progress
approximate guidelines/requirements:
- caller should handle opencl context and queue setup
- caller should allocate cl buffers
- library can/should provide a means to provide required dimensions of buffers to caller
- library will check dimensions of incoming buffers
"""
from neoncl.backends.kernels.cl.clshuffler import get_shuffle_kernel_d3_cl
from neoncl.backends.kernels.cl.callkernel import call_cl_kernel
from neoncl.util.math_helper import ceil_div
import numpy as np
import pyopencl as cl
from operator import mul
import functools
from neoncl.backends.convolution import FpropCuda, BpropCuda, UpdateCuda
mf = cl.mem_flags
def output_dim(caffe_compat, X, S, padding, stride):
"""
compute along 1 dimension, with these sizes, what will be the output dimension
Arguments:
X (int): input data dimension
S (int): filter dimension
padding (int): padding on each side
stride (int): striding
"""
if caffe_compat:
size = int(ceil(float(X - S + 2 * padding) // stride)) + 1
if padding > 0 and (size - 1)*stride >= X + padding:
# decrement size if last pooling op is completely in padding
size -= 1
else:
# normal neon output size determination
size = (X - S + 2 * padding) // stride + 1
return size
class Shuffler(object):
# will shuffle src into dst, transposing first and last dimensions
# dimensions are taken to be:
# A B C
# where B is product of the dimensions other than first and last
def __init__(self, ctx, src_shape):
self.kernel = get_shuffle_kernel_d3_cl(ctx, 'f4')
self.A = src_shape[0]
self.C = src_shape[-1]
self.B = functools.reduce(mul, src_shape[1:-1])
self.grid = (ceil_div(self.C, 32), ceil_div(self.A, 32), self.B)
self.block = (32, 8, 1)
self.BC = self.B * self.C
self.AB = self.A * self.B
def shuffle(self, queue, dst, src):
call_cl_kernel(
self.kernel, queue,
self.grid, self.block,
dst, src,
self.BC, self.C,
self.AB, self.A)
class Convolver(object):
def __init__(self, ctx, N, Ci, Co, kH, kW, iH, iW, padH, padW, dH, dW):
"""
layout should be:
- for I: 'C H W N'
- for W: 'Ci H W Co'
- for O: 'C H W N'
"""
self.ctx = ctx
self.Ci = Ci
self.Co = Co
self.iH = iH
self.iW = iW
self.N = N
self.kH= kH
self.kW = kW
self.dH = dH
self.dW = dW
oH = output_dim(False, iH, kH, padH, dH)
oW = output_dim(False, iW, kW, padW, dW)
assert padH == padW
self.fpropcuda = FpropCuda(ctx, 'f4',
N, Ci, Co,
1, iH, iW,
1, kH, kW,
1, oH, oW,
0, padH, padW,
0, dH, dW)
self.bpropcuda = BpropCuda(ctx, 'f4',
N, Ci, Co,
1, iH, iW,
1, kH, kW,
1, oH, oW,
0, padH, padW,
0, dH, dW)
self.updatecuda = UpdateCuda(ctx, 'f4',
N, Ci, Co,
1, iH, iW,
1, kH, kW,
1, oH, oW,
0, padH, padW,
0, dH, dW)
def getILayout(self):
return 'Ci iH iW N'
def getGradILayout(self):
return 'Ci iH iW N'
def getWLayout(self):
return 'Ci kH kW Co'
def getGradWLayout(self):
return 'Ci kH kW Co'
def getOLayout(self):
return 'Co oH oW N'
def getGradOLayout(self):
return 'Co oH oW N'
def getScratchSize(self, fpropOnly=False):
if fpropOnly:
return 0
return self.getBpropGradIScratchSize()
def getFpropScratchSize(self):
return 0
def getBpropGradWScratchSize(self):
return 0
def getBpropGradIScratchSize(self):
return self.Ci * self.Co * self.kH * self.kW
def getIShape(self):
return (self.Ci, self.iH, self.iW, self.N)
def getGradIShape(self):
return self.getIShape()
def getWShape(self):
return (self.Ci, self.kH, self.kW, self.Co)
def getGradWShape(self):
return self.getWShape()
def getOShape(self):
return (self.Co, self.oH, self.oW, self.N)
def getGradOShape(self):
return self.getOShape()
def fprop(self, queue, I, W, O, scratch=None):
self.fpropcuda.bind_params(I, W, O, 1.0, 0.0)
self.fpropcuda.execute(queue)
def bprop_gradW(self, queue, I, gradO, gradW, scratch=None):
self.updatecuda.bind_params(I, gradO, gradW, 1.0)
self.updatecuda.execute(queue)
def bprop_gradI(self, queue, gradO, W, gradI, scratch):
Wt = scratch
self.bpropcuda.shuffle(queue, Wt, W)
self.bpropcuda.bind_params(gradO, Wt, gradI, 1.0, 0.0)
self.bpropcuda.execute(queue)
| python |
# Created By: Virgil Dupras
# Created On: 2007-10-06
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import logging
import time
import traceback
import subprocess
import sys
from .CocoaProxy import CocoaProxy
proxy = CocoaProxy()
def autoreleasepool(func):
def wrapper(*args, **kwargs):
proxy.createPool()
try:
func(*args, **kwargs)
finally:
proxy.destroyPool()
return wrapper
def as_fetch(as_list, as_type, step_size=1000):
"""When fetching items from a very big list through applescript, the connection with the app
will timeout. This function is to circumvent that. 'as_type' is the type of the items in the
list (found in appscript.k). If we don't pass it to the 'each' arg of 'count()', it doesn't work.
applescript is rather stupid..."""
result = []
# no timeout. default timeout is 60 secs, and it is reached for libs > 30k songs
item_count = as_list.count(each=as_type, timeout=0)
steps = item_count // step_size
if item_count % step_size:
steps += 1
logging.info('Fetching %d items in %d steps' % (item_count, steps))
# Don't forget that the indexes are 1-based and that the upper limit is included
for step in range(steps):
begin = step * step_size + 1
end = min(item_count, begin + step_size - 1)
if end > begin:
result += as_list[begin:end](timeout=0)
else: # When there is only one item, the stupid fuck gives it directly instead of putting it in a list.
result.append(as_list[begin:end](timeout=0))
time.sleep(.1)
logging.info('%d items fetched' % len(result))
return result
def extract_tb_noline(tb):
# Same as traceback.extract_tb(), but without line fetching
limit = 100
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
list.append((filename, lineno, name, None))
tb = tb.tb_next
n = n+1
return list
def safe_format_exception(type, value, tb):
"""Format exception from type, value and tb and fallback if there's a problem.
In some cases in threaded exceptions under Cocoa, I get tracebacks targeting pyc files instead
of py files, which results in traceback.format_exception() trying to print lines from pyc files
and then crashing when trying to interpret that binary data as utf-8. We want a fallback in
these cases.
"""
try:
return traceback.format_exception(type, value, tb)
except Exception:
result = ['Traceback (most recent call last):\n']
result.extend(traceback.format_list(extract_tb_noline(tb)))
result.extend(traceback.format_exception_only(type, value))
return result
def report_crash(type, value, tb):
app_identifier = proxy.bundleIdentifier()
app_version = proxy.appVersion()
osx_version = proxy.osxVersion()
s = "Application Identifier: {}\n".format(app_identifier)
s += "Application Version: {}\n".format(app_version)
s += "Mac OS X Version: {}\n\n".format(osx_version)
s += ''.join(safe_format_exception(type, value, tb))
if app_identifier:
s += '\nRelevant Console logs:\n\n'
p = subprocess.Popen(['grep', app_identifier, '/var/log/system.log'], stdout=subprocess.PIPE)
try:
s += str(p.communicate()[0], encoding='utf-8')
except IndexError:
# This can happen if something went wrong with the grep (permission errors?)
pass
proxy.reportCrash_(s)
def install_exception_hook():
sys.excepthook = report_crash
class CocoaHandler(logging.Handler):
def emit(self, record):
proxy.log_(record.getMessage())
def install_cocoa_logger():
logging.getLogger().addHandler(CocoaHandler())
def patch_threaded_job_performer():
# _async_run, under cocoa, has to be run within an autorelease pool to prevent leaks.
# You only need this patch is you use one of CocoaProxy's function (which allocate objc
# structures) inside a threaded job.
from jobprogress.performer import ThreadedJobPerformer
ThreadedJobPerformer._async_run = autoreleasepool(ThreadedJobPerformer._async_run)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ===============================================================
# Copyright (C) 2018 HuangYk.
# Licensed under The MIT Lincese.
#
# Filename : torchsoa.py
# Author : HuangYK
# Last Modified: 2018-08-12 14:15
# Description :
#
# ===============================================================
import os
import copy
import torch
import torchnet as tnt
from torchnet.engine import Engine
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import time
import numpy as np
import pandas as pd
from tqdm import tqdm # progress bar using in python shell
from pandas import DataFrame
from collections import defaultdict
class TorchSoaEngine(object):
'''A architecture of training process
Inherit TorchSoaEngine to build a neural network training processor for
specific dataset, and override abstract method get_iterator to provide a
batch sample iterator from dataset.
Attribute:
----------
meters: Caculate loss, class accuracy, class confusion performance of
neural networks
model: Neural networks model at gpu device
parameters: Total number of parameters in model
Example:
--------
>> kw={'model':neural_network_instance,
'optimizer':optimizer_instance,
'loss_func':loss_function
'maxepoch':max_epoch, 'batch_size':batch_size,
'num_workers':num_workers}
>> net_engine = TorchSoaEngine(**kw)
>> net_engine.meters = ClassifyMeter(num_classes)
>> net_engine.train()
'''
def __init__(self, model, optimizer, loss_func, maxepoch, batch_size,
num_workers, net_name, **kws):
'''Init with training parameters, add hooks in torchnet
Training hooks function sequence is:
--> hook['on_start']
--> maxepoch iteration(
--> hook['on_start_epoch']
--> batch data iteration(
--> state['sample'] --> hook['on_sample']
--> state['optimizer'].zero
--> forward: state['network'](state['sample'])
--> state['output'], state['loss']
--> hook['on_forward'] with state['output'] and state['loss']
--> state['output'].zero, state['loss'].zero
--> backprop: state['optimizer'] with loss
--> hook['on_upadte']
--> state['t'].add
) # one epoch
--> state['epoch'].add
--> hook['on_end_epoch']
) # one training
--> hook['on_end']
Args:
-----
model: torch.nn.Module A nerual networks inherit nn.Module
optimizer: torch.optim Optim method for training
loss_func: torch.nn.functional, Loss function for nerual networks
max_epoch: int, Epoch number for training process
batch_size: int, Sample batch in a iteration
num_workers: int, Number of processors for get sample
net_name: str,
Return:
-------
A normalized torch net training architecture
'''
self._model = model
self._optimizer = optimizer
self._max_epoch = maxepoch
self._loss_func = loss_func
self._batch_size = batch_size
self._num_workers = num_workers
self._net_name = net_name
self._epoch_meters = None
self._epoch_recorder = None
self._engine = Engine()
self._engine.hooks['on_sample'] = self._on_sample
self._engine.hooks['on_forward'] = self._on_forward
self._engine.hooks['on_start_epoch'] = self._on_start_epoch
self._engine.hooks['on_end_epoch'] = self._on_end_epoch
self._engine.hooks['on_end'] = self._on_end
@property
def meters(self):
return self._epoch_meters
@meters.setter
def meters(self, meters):
self._epoch_meters = meters
@property
def epoch_rec(self):
return self._epoch_recorder
@epoch_rec.setter
def epoch_rec(self, epoch_rec):
self._epoch_recorder = epoch_rec
@property
def model(self):
return self._model
@property
def parameters(self):
return sum(param.numel for param in self._model.parameters())
def _on_start(self):
pass
def _on_sample(self, state):
'''Attach train(True) or test(False) label to samples
Args:
-----
state: dict, a state dict in torchnet, state['sample'] will provide
a list contain data, target
'''
state['sample'].append(state['train'])
def _on_start_epoch(self, state):
self._epoch_meters.reset_meters()
state['iterator'] = tqdm(state['iterator'])
def _on_forward(self, state):
'''Process forward output, loss before reset
Args:
-----
state: dict, provide output tensor and loss in state['output'],
state['loss']
'''
self._epoch_meters.add_output_to_meters(state)
def _on_update(self):
pass
def _on_end_epoch(self, state):
epoch_meters = self._epoch_meters
epoch_recorder = self._epoch_recorder
epoch_meters.print_meters(epoch=state['epoch'], train=True)
epoch_meters.send_meters(epoch=state['epoch'], train=True)
epoch_recorder.record(
index=state['epoch'], train=True,
loss=epoch_meters.loss, accuracy=epoch_meters.accuracy,
diag=epoch_meters.get_confusion_diag()[0],
num=epoch_meters.get_confusion_diag()[1]
)
epoch_meters.reset_meters()
self.test()
epoch_meters.print_meters(epoch=state['epoch'], train=False)
epoch_meters.send_meters(epoch=state['epoch'], train=False)
epoch_recorder.record(
index=state['epoch'], train=False,
loss=epoch_meters.loss, accuracy=epoch_meters.accuracy,
diag=epoch_meters.get_confusion_diag()[0],
num=epoch_meters.get_confusion_diag()[1],
conf=epoch_meters.get_confusion_matrix()
)
torch.save(self._model.state_dict(),
'epochs/{:s}_epoch_{:d}.pt'.format(
self._net_name, state['epoch']))
def _processor(self, sample):
data, target, train = sample
data = data.cuda()
target = target.cuda()
if train:
self._model.train()
else:
self._model.eval()
output = self._model(data)
loss = self._loss_func(output, target)
return loss, output
def _on_end(self, state):
'''Save training record
'''
csv_folder = './logs'
if state['train']:
csv_file = '_'.join(
[self._net_name, 'epoch', str(self._max_epoch)]
)
else:
csv_file = '_'.join([self._net_name, 'epoch', 'tmp'])
csv_file = os.path.join(csv_folder, csv_file)
self._epoch_recorder.save_csv(csv_file, state['train'])
def get_iterator(self, train):
raise NotImplementedError(
'get_iterator not implemented for TorchSoaEngine, which is an \
abstract class')
def train(self):
self._engine.train(self._processor, self.get_iterator(True),
maxepoch=self._max_epoch, optimizer=self._optimizer)
def test(self):
self._engine.test(self._processor, self.get_iterator(False))
class ClassifyMeter(object):
'''Classify task performance evaluation with loss curve, accuracy curve,
confusion matrix
This class provides loss, accuracy, confusion
Attribute:
----------
vis: ClassifyVisdom instance for plot loss, accuracy, confusion in
visdom server in real time during training
loss: float, average loss
accuracy: float, average accuracy of total samples
confusion: [k x k] np.array, class confusion matrix
'''
def __init__(self, num_classes):
self.num_classes = num_classes
self.loss_meter = tnt.meter.AverageValueMeter()
self.acc_meter = tnt.meter.ClassErrorMeter(accuracy=True)
self.confusion_meter = tnt.meter.ConfusionMeter(
num_classes, normalized=True)
self._meters = [self.loss_meter, self.acc_meter, self.confusion_meter]
self._loggers = ClassifyVisdom(num_classes)
@property
def vis(self):
'''
Return a meter list contain loss, acc, confusion
'''
return self._loggers
@property
def loss(self):
'''
Return average loss
'''
return self.loss_meter.value()[0]
@property
def accuracy(self):
'''
Return average class accuracy
'''
return self.acc_meter.value()[0]
@property
def confusion(self):
'''
Return confusion matrix of [num_classes x num_classes]
'''
self.confusion_meter.normalized = True
return self.confusion_meter.value()
def get_confusion_diag(self):
confusion = self.confusion_meter.conf
return np.diag(confusion), confusion.sum(1).clip(min=1e-12)
def get_confusion_matrix(self):
return self.confusion_meter.conf
def reset_meters(self):
for meter in self._meters:
meter.reset()
def print_meters(self, epoch=None, train=None):
process = 'Training' if train else 'Test'
print('[Epoch {:d}] {:s} Loss: {:.4f} (Accuracy: {:.2f}%)'.
format(epoch, process, self.loss, self.accuracy))
def send_meters(self, epoch=None, train=None):
self._loggers.log(epoch, self.loss, self.accuracy,
self.confusion, train)
def add_output_to_meters(self, state):
'''Add output, target to meters(loss, acc, confusion) per batch iter
Args:
-----
state: dict, provide loss, output, target
'''
self.loss_meter.add(state['loss'].data.item())
self.acc_meter.add(state['output'].data, state['sample'][1])
self.confusion_meter.add(state['output'].data, state['sample'][1])
class ClassifyVisdom(object):
'''Visdom logger for classify task, contain loss curve, accuracy curve and
confusion matrix, plot in visdom server
'''
def __init__(self, num_classes):
self._loss_logger = LossVisdom()
self._acc_logger = AccuracyVisdom()
self._confusion_logger = ConfusionVisdom(num_classes)
def log(self, epoch, loss, accuracy, confusion, train=None):
self._loss_logger.log(epoch, loss, train)
self._acc_logger.log(epoch, accuracy, train)
self._confusion_logger.log(confusion, train)
class LossVisdom(object):
'''Plot train and test loss curve together in a VisdomPlotLogger
'''
def __init__(self):
self._loss = VisdomPlotLogger('line', opts={
'title': 'Loss Curve'
})
check_visdom_server(self._loss.viz)
def log(self, epoch, loss, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
name = 'train' if train else 'test'
self._loss.log(epoch, loss, name=name)
class AccuracyVisdom(object):
'''Plot train and test accuracy curve together in a VisdomPlotLogger
'''
def __init__(self):
self._acc = VisdomPlotLogger('line', opts={
'title': 'Accuracy Curve'
})
check_visdom_server(self._acc.viz)
def log(self, epoch, accuracy, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
name = 'train' if train else 'test'
self._acc.log(epoch, accuracy, name=name)
class ConfusionVisdom(object):
'''Plot test confusion matrix in a VisdomLogger
'''
def __init__(self, num_classes):
self._confusion = VisdomLogger('heatmap', opts={
'title': 'Confusion Matrix',
'columnnames': list(range(num_classes)),
'rownames': list(range(num_classes))
})
check_visdom_server(self._confusion.viz)
def log(self, confusion, train=None):
assert train is not None,\
'train should be True or False, not {}'.format(train)
if train:
pass
else:
self._confusion.log(confusion)
class SoaRecorder(object):
'''Record loss and accuracy of a training process as csv
'''
items = ['loss-acc']
def __init__(self, record_step):
assert self.check_default_save_folder(), 'Save folder created failed'
self.record_step = record_step
self._recs = defaultdict(lambda: 'N/A')
self._recs['loss-acc'] = LossAccRecorder(record_step)
def check_default_save_folder(self, path='./logs'):
if os.path.exists(path):
return True
else:
os.makedirs(path)
self.check_default_save_folder(path)
def add_item(self, kind, num_classes):
assert kind in ['confusion'], 'Record type not support'
if kind == 'confusion':
self.items.append(kind)
self._recs[kind] = ConfusionRecorder(
self.record_step, num_classes
)
def get_record(self):
'''
Return: A dict of DataFrame, which index in items
'''
return self._recs
def record(self, index, train, loss=np.nan, accuracy=np.nan,
diag=np.nan, num=np.nan, conf=None):
'''Add loss, accuracy to DataFrame
Args:
-----
index: int, epoch or batch iteration number
loss: float, loss of net forward process in this index
accuracy: float, average accuracy among classes in this index
train: boolean, if this index is a training process
'''
kws = {'index': index, 'train': train, 'loss': loss, 'conf': conf,
'accuracy': accuracy, 'diag': diag, 'num': num}
for kind in self.items:
self._recs[kind].record(**kws)
def save_csv(self, path, train=None):
for item in self.items:
if not self._recs[item] == 'N/A':
self._recs[item].save_csv(path, train)
else:
print('{} not used'.format(item))
class LossAccRecorder(object):
'''
'''
def __init__(self, record_step):
self.record_step = record_step
self._df = DataFrame(
columns=[['loss', 'loss', 'accuracy', 'accuracy'],
['train', 'test', 'train', 'test']]
)
self._df.index.name = record_step
def record(self, index, train, loss, accuracy, **kws):
c_level1 = 'train' if train else 'test'
self._df.loc[index, ('loss', (c_level1))] = loss
self._df.loc[index, ('accuracy', (c_level1))] = accuracy
def save_csv(self, path, train):
self._df.to_csv('{0:s}_loss-acc.csv'.format(path))
class ConfusionRecorder(object):
'''
'''
items = ['diag_train', 'diag_test', 'num_train', 'num_test']
def __init__(self, record_step, num_classes):
self.record_step = record_step
self._dfs = defaultdict(lambda: 'N/A')
self._confs = []
self._confs_keys = []
for k in self.items:
self._dfs[k] = DataFrame(columns=np.arange(num_classes))
def record(self, index, train, diag, num, conf=None, **kws):
diag_key = 'diag_train' if train else 'diag_test'
num_key = 'num_train' if train else 'num_test'
self._dfs[diag_key].loc[index] = diag
self._dfs[num_key].loc[index] = num
if conf is not None and not train:
conf_df = DataFrame(conf)
conf_df.to_csv(
'./logs/{0:s}_{1:d}_test_confusion.csv'.format(
self.record_step, index)
)
self._confs.append(copy.deepcopy(conf_df))
self._confs_keys.append('epoch_{:d}'.format(index))
def save_csv(self, path, train):
df = pd.concat(
[self._dfs['diag_train'], self._dfs['diag_test'],
self._dfs['num_train'], self._dfs['num_test']],
axis=1, keys=self.items
)
df.index.name = self.record_step
df.to_csv('{:s}_diag.csv'.format(path))
if len(self._confs) > 0:
conf_concat_df = pd.concat(
self._confs, axis=1, keys=self._confs_keys
)
conf_concat_df.index.name = 'Target'
conf_concat_df.to_csv('{:s}_confusion.csv'.format(path))
def check_visdom_server(vis):
'''check if visdom server start up
Args:
-----
vis: visdom.Visdom isinstance
Return:
-------
Throw a assert exception if visdom server not work,
return none if visdom server is running
'''
startup_sec = 1
while not vis.check_connection() and startup_sec > 0:
time.sleep(0.1)
startup_sec -= 0.1
assert vis.check_connection(), 'No visdom server found, \
use python -m visdom.server to start a visdom server'
| python |
import unittest
import sys
from ctypeslib import clang2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test_windows(self):
clang2py.main(["clang2py",
"-c",
"-w",
"-m", "ctypes.wintypes",
"-o", "_winapi_gen.py",
"windows.h"
])
import _winapi_gen
def test(self):
clang2py.main(["clang2py",
"-c",
"-o", "_stdio_gen.xml",
"stdio.h"
])
import _stdio_gen
if __name__ == "__main__":
import unittest
unittest.main()
| python |
import random
import numpy as np
import math
from collections import deque
import time
import pickle
from sklearn.linear_model import LinearRegression
from Simulations.GameFeatures import GameFeatures as GF
from BehaviouralModels.BehaviouralModels import BehaviouralModelInterface
MIN_REPLAY_MEMORY_SIZE = 16_384
MAX_REPLAY_MEMORY_SIZE = 16_384
MINIBATCH_SIZE = 16_384 #Affect how many states it will use to fit
DISCOUNT = 0.99
class IndiLRRL(BehaviouralModelInterface):
def __init__(self, goals, initial_game_state, feasible_actions, model_addr, results_addr):
super().__init__(goals, initial_game_state, feasible_actions, results_addr)
self._model_addr = model_addr
self._create_directory(self._model_addr)
self._previous_action = None
self._previous_state = None
self._previous_game = None
self._previous_score = 0
self._turn_count = 0
if self._get_file_size(self._model_addr + ".txt"):
#Load
self._regressions, self._epsilon = self._load_model()
else:
#Create
#Setup regression - One for each action's score
model_state = self._game_to_model_state(initial_game_state)
rand_vals = np.random.uniform(low=-1, high=1, size=(len(feasible_actions)))
self._regressions = LinearRegression().fit([model_state], [rand_vals])
#Set epsilon
self._epsilon = 1
self._epsilon_decay = 0.99925 #0.99975 before
self._episodes = 6000
self._episode_epsilon = self._epsilon_decay**self._episodes
if self._epsilon < self._episode_epsilon:
self._epsilon = 0
self._terminal_count = 0
#Setup memory for last N states
self._replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
def get_epsilon(self):
return self._epsilon
def _load_model(self):
print("#####LOAD MODEL#####")
model = pickle.load(open(self._model_addr, 'rb'))
epsilon = None
with open(self._model_addr + ".txt") as model_file:
for line in model_file:
epsilon = float(line)
return model, epsilon
def save_model(self):
pickle.dump(self._regressions, open(self._model_addr, 'wb'))
with open(self._model_addr + ".txt", "w") as file:
file.write(str(self._epsilon))
def action(self, game_state, train_flag = True):
self._turn_count += 1
model_state = self._game_to_model_state(game_state)
if train_flag:
score = self._calculate_score(game_state[0], game_state[2], game_state[3]) - self._previous_score #Reward - Use reqard difference instead
self._previous_score = self._calculate_score(game_state[0], game_state[2], game_state[3])
if self._epsilon > self._episode_epsilon and self._epsilon != 0:
if self._turn_count % 100 == 0:
print(f"steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}")
if self._turn_count % 500 == 0:
self._epsilon *= self._epsilon_decay
print(f"Epsilon: {self._epsilon}")
if isinstance(self._previous_state, np.ndarray):
terminal_state = game_state[0] == 0 or model_state[0] != self._previous_state[0] or game_state[2] != self._previous_game[2] #If dead, different health, or different points
self._terminal_count += 1 if terminal_state else 0
self._update_replay_memory((self._previous_state, model_state, self._previous_action, score, game_state[0] == 0, terminal_state))
self._train(terminal_state , game_state[0])
else:
if self._turn_count % 100 == 0:
print(f"steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}")
elif not self._turn_count % 100:
print(f"steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}")
action = self._calculate_action([model_state], 0 if not train_flag or self._epsilon < self._episode_epsilon else self._epsilon)
self._previous_action = action
self._previous_state = model_state
self._previous_game = game_state
return action
def _game_to_model_state(self, game_state):
player_coor = (game_state[3][0]/len(game_state[-1][0]), game_state[3][1]/len(game_state[-1]))
player_life = game_state[1]/100
image_shape = (len(game_state[-1]), len(game_state[-1][0]), len(game_state[-1][0][0][0]))
np_map = np.array(game_state[-1])
np_model_state_map = np.array([ np_map[:,:,0].reshape(-1, *image_shape)/255,
np_map[:,:,1].reshape(-1, *image_shape)/255,
np_map[:,:,2].reshape(-1, *image_shape)/255 ])
return np.concatenate((np.array([player_life, player_coor[0], player_coor[1]]).flatten(), np_model_state_map.flatten()))
def _update_replay_memory(self, transition):
self._replay_memory.append(transition)
def _calculate_action(self, model_state, epsilon):
prediction = self._predict(model_state)[0]
action_index = self._choose_action_from_prediction(prediction, epsilon)
return self._feasible_actions[action_index]
def _predict(self, model_state):
predictions = self._regressions.predict(model_state)
return predictions
def _choose_action_from_prediction(self, prediction, epsilon):
index = np.argmax(prediction)
if np.random.random() < epsilon:
index = np.random.randint(0, len(prediction))
return index
def _train(self, terminal_state, step):
if len(self._replay_memory) < MIN_REPLAY_MEMORY_SIZE or self._terminal_count % 50 != 0 or not terminal_state:
return
print(f"Training at step: {self._turn_count}")
minibatch = self._replay_memory
current_states = self._get_state_in_prediction_structure(minibatch, 0)
current_q_list = np.array(self._predict(current_states))
new_current_states = self._get_state_in_prediction_structure(minibatch, 1)
future_q_list = np.array(self._predict(new_current_states))
X = []
y = []
for index, (current_state, new_current_state, action, reward, done, life_changer) in enumerate(minibatch):
if done:
new_q = -10 #reward
elif life_changer:
new_q = reward
else:
max_future_q = np.max(future_q_list[index])
new_q = reward + DISCOUNT * max_future_q
result = current_q_list[index]
result[action] = new_q
X += [current_state]
y += [result]
self._regressions.fit(X, y)
def _get_state_in_prediction_structure(self, minibatch, data_index):
current_states = np.array([transition[data_index] for transition in minibatch])
return current_states
class GroupLRRL(BehaviouralModelInterface):
_replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
_global_training_count = 0
_global_instances = 0
_regressions = None
_epsilon = 1
def __init__(self, goals, initial_game_state, feasible_actions, model_addr, results_addr):
super().__init__(goals, initial_game_state, feasible_actions, results_addr)
self._model_addr = model_addr
self._main_model = None
if GroupLRRL._regressions == None:
self._create_directory(self._model_addr) #Only create Model directory if it is the main model, not even epsilon is required.
self._main_model = True #Should every model count down epsilon?
else:
self._main_model = False
self._previous_action = None
self._previous_state = None
self._previous_game = None
self._previous_score = 0
self._turn_count = 0
if self._get_file_size(self._model_addr + ".txt"):
#Load
if GroupLRRL._regressions == None:
GroupLRRL._regressions, GroupLRRL._epsilon = self._load_model()
else:
#Create
#Setup regression - One for each action's score
if GroupLRRL._regressions == None:
model_state = self._game_to_model_state(initial_game_state)
rand_vals = np.random.uniform(low=-1, high=1, size=(len(feasible_actions)))
GroupLRRL._regressions = LinearRegression().fit([model_state], [rand_vals])
#Set epsilon
GroupLRRL._epsilon = 1
self._epsilon_decay = 0.99925 #0.99975 before
self._episodes = 6000
self._episode_epsilon = self._epsilon_decay**self._episodes
if self._epsilon < self._episode_epsilon:
self._epsilon = 0
self._terminal_count = 0
GroupLRRL._global_instances += 1
#Setup memory for last N states
GroupLRRL._replay_memory = deque(maxlen=MAX_REPLAY_MEMORY_SIZE)
def get_epsilon(self):
return GroupLRRL._epsilon
def _load_model(self):
print("#####LOAD MODEL#####")
model = pickle.load(open(self._model_addr, 'rb'))
epsilon = None
with open(self._model_addr + ".txt") as model_file:
for line in model_file:
epsilon = float(line)
return model, epsilon
def save_model(self):
if self._main_model == True:
pickle.dump(GroupLRRL._regressions, open(self._model_addr, 'wb'))
with open(self._model_addr + ".txt", "w") as file:
file.write(str(GroupLRRL._epsilon))
def action(self, game_state, train_flag = True):
self._turn_count += 1
GroupLRRL._global_training_count += 1
model_state = self._game_to_model_state(game_state)
if train_flag:
score = self._calculate_score(game_state[0], game_state[2], game_state[3]) - self._previous_score #Reward - Use reqard difference instead
self._previous_score = self._calculate_score(game_state[0], game_state[2], game_state[3])
if GroupLRRL._epsilon > self._episode_epsilon and GroupLRRL._epsilon != 0:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
if self._turn_count % 500 == 0:
GroupLRRL._epsilon *= self._epsilon_decay
print(f"Epsilon: {GroupLRRL._epsilon}, Name: {self._model_addr}")
if isinstance(self._previous_state, np.ndarray):
terminal_state = game_state[0] == 0 or model_state[0] != self._previous_state[0] or game_state[2] != self._previous_game[2] #If dead, different health, or different points
self._terminal_count += 1 if terminal_state else 0
self._update_replay_memory((self._previous_state, model_state, self._previous_action, score, game_state[0] == 0, terminal_state))
self._train(terminal_state , game_state[0])
else:
if self._turn_count % 100 == 0:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
elif not self._turn_count % 100:
print(f"Train: {train_flag}, steps: {self._turn_count}, life: {game_state[1]}, points: {game_state[2]}, score: {self._previous_score}, Name: {self._model_addr}")
action = self._calculate_action([model_state], 0 if not train_flag or self._epsilon < self._episode_epsilon else self._epsilon)
self._previous_action = action
self._previous_state = model_state
self._previous_game = game_state
return action
def _game_to_model_state(self, game_state):
player_coor = (game_state[3][0]/len(game_state[-1][0]), game_state[3][1]/len(game_state[-1]))
player_life = game_state[1]/100
image_shape = (len(game_state[-1]), len(game_state[-1][0]), len(game_state[-1][0][0][0]))
np_map = np.array(game_state[-1])
np_model_state_map = np.array([ np_map[:,:,0].reshape(-1, *image_shape)/255,
np_map[:,:,1].reshape(-1, *image_shape)/255,
np_map[:,:,2].reshape(-1, *image_shape)/255 ])
return np.concatenate((np.array([player_life, player_coor[0], player_coor[1]]).flatten(), np_model_state_map.flatten()))
def _update_replay_memory(self, transition):
GroupLRRL._replay_memory.append(transition)
def _calculate_action(self, model_state, epsilon):
prediction = self._predict(model_state)[0]
action_index = self._choose_action_from_prediction(prediction, epsilon)
return self._feasible_actions[action_index]
def _predict(self, model_state):
predictions = GroupLRRL._regressions.predict(model_state)
return predictions
def _choose_action_from_prediction(self, prediction, epsilon):
index = np.argmax(prediction)
if np.random.random() < epsilon:
index = np.random.randint(0, len(prediction))
return index
def _train(self, terminal_state, step):
if len(GroupLRRL._replay_memory) < MIN_REPLAY_MEMORY_SIZE or GroupLRRL._global_training_count % (GroupLRRL._global_instances*1000) != 0:
return
print(f"Training at step: {self._turn_count}")
minibatch = GroupLRRL._replay_memory
current_states = self._get_state_in_prediction_structure(minibatch, 0)
current_q_list = np.array(self._predict(current_states))
new_current_states = self._get_state_in_prediction_structure(minibatch, 1)
future_q_list = np.array(self._predict(new_current_states))
X = []
y = []
for index, (current_state, new_current_state, action, reward, done, life_changer) in enumerate(minibatch):
if done:
new_q = -10 #reward
elif life_changer:
new_q = reward
else:
max_future_q = np.max(future_q_list[index])
new_q = reward + DISCOUNT * max_future_q
result = current_q_list[index]
result[action] = new_q
X += [current_state]
y += [result]
GroupLRRL._regressions.fit(X, y)
def _get_state_in_prediction_structure(self, minibatch, data_index):
current_states = np.array([transition[data_index] for transition in minibatch])
return current_states | python |
# -*- coding: utf-8 -*-
# Copyright 2015 Donne Martin. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import unicode_literals
from __future__ import print_function
import mock
from tests.compat import unittest
from prompt_toolkit.key_binding.input_processor import KeyPress
from prompt_toolkit.keys import Keys
from saws.saws import Saws
class KeysTest(unittest.TestCase):
def setUp(self):
self.saws = Saws(refresh_resources=False)
self.registry = self.saws.key_manager.manager.registry
self.processor = self.saws.aws_cli.input_processor
self.DOCS_HOME_URL = \
'http://docs.aws.amazon.com/cli/latest/reference/index.html'
def feed_key(self, key):
self.processor.feed(KeyPress(key, u''))
self.processor.process_keys()
def test_F2(self):
orig_color = self.saws.get_color()
self.feed_key(Keys.F2)
assert orig_color != self.saws.get_color()
def test_F3(self):
orig_fuzzy = self.saws.get_fuzzy_match()
self.feed_key(Keys.F3)
assert orig_fuzzy != self.saws.get_fuzzy_match()
def test_F4(self):
orig_shortcut = self.saws.get_shortcut_match()
self.feed_key(Keys.F4)
assert orig_shortcut != self.saws.get_shortcut_match()
@mock.patch('saws.saws.webbrowser')
def test_F9(self, mock_webbrowser):
self.feed_key(Keys.F9)
mock_webbrowser.open.assert_called_with(self.DOCS_HOME_URL)
def test_F10(self):
with self.assertRaises(EOFError):
self.feed_key(Keys.F10)
@mock.patch('saws.resources.print')
def test_f5(self, mock_print):
self.feed_key(Keys.F5)
mock_print.assert_called_with('Done refreshing')
| python |
from selenium import webdriver
import unittest
import os
import sys
PACKAGE_ROOT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(),
os.path.expanduser(__file__))))
PACKAGE_PATH = os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_ROOT))
sys.path.append(PACKAGE_PATH)
from blog.selenium_tests.pages import BlogHomePage, BlogDetailPage
from blog.selenium_tests.base_tests import BlogBaseTests
class BlogDetailTests(BlogBaseTests, unittest.TestCase):
"""
Methods to test the blog detail pages.
"""
def setUp(self):
"""
Make the driver, get the page.
"""
self.driver = webdriver.Firefox()
self.driver.get(BlogHomePage.URL)
self.page = BlogHomePage(self.driver)
def tearDown(self):
"""
Close driver.
"""
self.driver.close()
def test_entry_elements_present(self):
"""
Make sure title, tagline, and text are all there.
"""
self.page.click_an_entry()
self.page = BlogDetailPage(self.driver)
self.assertTrue(self.page.verify_path())
self.assertTrue(self.page.verify_title_present())
self.assertTrue(self.page.verify_tagline_present())
self.assertTrue(self.page.verify_body_present())
if __name__ == '__main__':
unittest.main()
| python |
# encoding: UTF-8
'''
v1:yalinwang
针对bitfinex 接口进行了改进与优化,增加了部分日志功能
本文件中实现了CTA策略引擎,针对CTA类型的策略,抽象简化了部分底层接口的功能。
关于平今和平昨规则:
1. 普通的平仓OFFSET_CLOSET等于平昨OFFSET_CLOSEYESTERDAY
2. 只有上期所的品种需要考虑平今和平昨的区别
3. 当上期所的期货有今仓时,调用Sell和Cover会使用OFFSET_CLOSETODAY,否则
会使用OFFSET_CLOSE
4. 以上设计意味着如果Sell和Cover的数量超过今日持仓量时,会导致出错(即用户
希望通过一个指令同时平今和平昨)
5. 采用以上设计的原因是考虑到vn.trader的用户主要是对TB、MC和金字塔类的平台
感到功能不足的用户(即希望更高频的交易),交易策略不应该出现4中所述的情况
6. 对于想要实现4中所述情况的用户,需要实现一个策略信号引擎和交易委托引擎分开
的定制化统结构(没错,得自己写)
v2:相比于原版对引擎的修改,对senderorder sendstoporder cancleorder 均没有进行改变,主要修改了
时间驱动的监测引擎process 函数
processTickEvent 没有修改,注意相对比之前有变动
注意对常量额引入操作 constant
'''
from __future__ import division
import json
import os
import traceback
import importlib
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from copy import copy
from vnpy.event import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.language import constant
from vnpy.trader.vtObject import VtTickData, VtBarData
from vnpy.trader.vtGateway import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData
from vnpy.trader.vtFunction import todayDate, getJsonPath
from vnpy.trader.utils.notification import notify
from decimal import *
import logging
from vnpy.trader.app.ctaStrategy.ctaBase import *
from vnpy.trader.app.ctaStrategy.strategy import STRATEGY_CLASS
########################################################################
class CtaEngine(object):
"""CTA策略引擎"""
settingFileName = 'CTA_setting.json'
settingfilePath = getJsonPath(settingFileName, __file__)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 保存策略实例的字典
# key为策略名称,value为策略实例,注意策略名称不允许重复
self.strategyDict = {}
# 保存vtSymbol和策略实例映射的字典(用于推送tick数据)
# 由于可能多个strategy交易同一个vtSymbol,因此key为vtSymbol
# value为包含所有相关strategy对象的list
self.tickStrategyDict = {}
# 保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据)
# key为vtOrderID,value为strategy对象
self.orderStrategyDict = {}
# 本地停止单编号计数
self.stopOrderCount = 0
# stopOrderID = STOPORDERPREFIX + str(stopOrderCount)
# 本地停止单字典
# key为stopOrderID,value为stopOrder对象
self.stopOrderDict = {} # 停止单撤销后不会从本字典中删除
self.workingStopOrderDict = {} # 停止单撤销后会从本字典中删除
# 保存策略名称和委托号列表的字典
# key为name,value为保存orderID(限价+本地停止)的集合
self.strategyOrderDict = {}
# 成交号集合,用来过滤已经收到过的成交推送
self.tradeSet = set()
# 引擎类型为实盘
self.engineType = ENGINETYPE_TRADING
# 注册日式事件类型
self.mainEngine.registerLogEvent(EVENT_CTA_LOG)
# 注册事件监听
self.registerEvent()
# self.path = os.path.join(os.getcwd(), u"reports" )
# if not os.path.isdir(self.path):
# os.makedirs(self.path)
# 上期所昨持仓缓存
self.ydPositionDict = {}
#----------------------------------------------------------------------
def sendOrder(self, vtSymbol, orderType, price, volume, priceType, strategy):
"""发单
cta引擎之中所有的操作都是基于引擎的,具体数据流为 strategy --->ctatemple----->ctaenging
在ctaenging 之中进行四个交易方向的order 分别为"买开" "卖开" "买平" "卖平"
这块是非常重要的,首先在存储的reqorder list 列表之中进行循环,调用底层接口进行发单,返回vtOrder;维护两个列表 orderStrategyDict[vtOrderID]
保存vtOrderID和strategy对象映射的字典(用于推送order和trade数据) key为vtOrderID,value为strategy对象; 保存策略名称和委托号列表的字典
key为name,value为保存orderID(限价+本地停止)的集合
"""
contract = self.mainEngine.getContract(vtSymbol)
req = VtOrderReq()
reqcount = 1
req.symbol = contract.symbol
req.exchange = contract.exchange
req.vtSymbol = contract.vtSymbol
req.price = self.roundToPriceTick(contract.priceTick, price)
req.volume = volume
req.productClass = strategy.productClass
req.currency = strategy.currency
req.byStrategy = strategy.name
# 设计为CTA引擎发出的委托只允许使用限价单
# req.priceType = PRICETYPE_LIMITPRICE
req.priceType = priceType
# CTA委托类型映射
"""
cta策略底层委托映射 可以根据传入的ordertype求出来相应的direction 和 offset,进而判断开平仓方向
注意这里使用的bitfinex 由于bitfinex gateway api 没有开平,所以需要在gateway 之中进行定义转换
"""
if orderType == CTAORDER_BUY:
req.direction = constant.DIRECTION_LONG
req.offset = constant.OFFSET_OPEN
elif orderType == CTAORDER_SELL:
req.direction = constant.DIRECTION_SHORT
# 只有上期所才要考虑平今平昨,上期所映射
if contract.exchange != constant.EXCHANGE_SHFE:
req.offset = constant.OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.ydPositionDict.get(vtSymbol+'_LONG', None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
self.writeCtaLog(u'获取昨持多仓为0,发出平今指令')
req.offset = constant.OFFSET_CLOSETODAY
elif posBuffer:
if volume <= posBuffer:
req.offset = constant.OFFSET_CLOSE
self.writeCtaLog(u'{}优先平昨,昨多仓:{},平仓数:{}'.format(vtSymbol, posBuffer, volume))
req.offset = constant.OFFSET_CLOSE
if (posBuffer - volume)>0:
self.writeCtaLog(u'{}剩余昨多仓{}'.format(vtSymbol,(posBuffer - volume)))
else:
req.offset = constant.OFFSET_CLOSE
req.volume = posBuffer
self.writeCtaLog(u'{}平仓量{},大于昨多仓,拆分优先平昨仓数:{}'.format(vtSymbol, volume, posBuffer))
req2 = copy(req)
req2.offset = constant.OFFSET_CLOSETODAY
req2.volume = volume - posBuffer
self.writeCtaLog(u'{}平仓量大于昨多仓,拆分到平今仓数:{}'.format(vtSymbol, req2.volume))
reqcount = 2
elif orderType == CTAORDER_SHORT:
req.direction = constant.DIRECTION_SHORT
req.offset = constant.OFFSET_OPEN
elif orderType == CTAORDER_COVER:
req.direction = constant.DIRECTION_LONG
# # 只有上期所才要考虑平今平昨
if contract.exchange != constant.EXCHANGE_SHFE:
req.offset = constant.OFFSET_CLOSE
else:
# 获取持仓缓存数据
posBuffer = self.ydPositionDict.get(vtSymbol+'_SHORT', None)
# 如果获取持仓缓存失败,则默认平昨
if not posBuffer:
self.writeCtaLog(u'获取昨持空仓为0,发出平今指令')
req.offset = constant.OFFSET_CLOSETODAY
elif posBuffer:
if volume <= posBuffer:
req.offset = constant.OFFSET_CLOSE
self.writeCtaLog(u'{}优先平昨,昨空仓:{},平仓数:{}'.format(vtSymbol, posBuffer, volume))
req.offset = constant.OFFSET_CLOSE
if (posBuffer - volume)>0:
self.writeCtaLog(u'{}剩余昨空仓{}'.format(vtSymbol,(posBuffer - volume)))
else:
req.offset = constant.OFFSET_CLOSE
req.volume = posBuffer
self.writeCtaLog(u'{}平仓量{},大于昨空仓,拆分优先平昨仓数:{}'.format(vtSymbol, volume, posBuffer))
req2 = copy(req)
req2.offset = constant.OFFSET_CLOSETODAY
req2.volume = volume - posBuffer
self.writeCtaLog(u'{}平仓量大于昨空仓,拆分到平今仓数:{}'.format(vtSymbol, req2.volume))
reqcount = 2
# 委托转换
# reqList = self.mainEngine.convertOrderReq(req) # 不转了
if reqcount == 1:
reqList = [req]
else:
reqList = [req,req2]
vtOrderIDList = [] # 维系一个列表 vtOrderIDList
# if not reqList:
# return vtOrderIDList
for convertedReq in reqList:
vtOrderID = self.mainEngine.sendOrder(convertedReq, contract.gatewayName) # 发单
self.orderStrategyDict[vtOrderID] = strategy # 保存vtOrderID和策略的映射关系
self.strategyOrderDict[strategy.name].add(vtOrderID) # 添加到策略委托号集合中
vtOrderIDList.append(vtOrderID)
self.writeCtaLog('策略%s: 发送%s委托%s, 交易:%s,%s,数量:%s @ %s'
%(strategy.name, priceType, vtOrderID, vtSymbol, orderType, volume, price ))
return vtOrderIDList
#----------------------------------------------------------------------
def cancelOrder(self, vtOrderID):
"""撤单"""
# 查询报单对象
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 检查是否报单还有效,只有有效时才发出撤单指令
orderFinished = (order.status == constant.STATUS_ALLTRADED
or order.status == constant.STATUS_CANCELLED
or order.status == constant.STATUS_REJECTED
or order.status == constant.STATUS_CANCELLING)
if not orderFinished:
req = VtCancelOrderReq()
req.vtSymbol = order.vtSymbol
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
self.writeCtaLog('策略%s: 对本地订单%s,品种%s发送撤单委托'%(order.byStrategy, vtOrderID, order.vtSymbol))
def batchCancelOrder(self,vtOrderIDList):
"""批量撤单"""
# 查询报单对象
reqList = []
for vtOrderID in vtOrderIDList:
order = self.mainEngine.getOrder(vtOrderID)
# 如果查询成功
if order:
# 检查是否报单还有效,只有有效时才发出撤单指令
orderFinished = (order.status == constant.STATUS_ALLTRADED
or order.status == constant.STATUS_CANCELLED
or order.status == constant.STATUS_REJECTED
or order.status == constant.STATUS_CANCELLING)
if not orderFinished:
req = VtCancelOrderReq()
req.vtSymbol = order.vtSymbol
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
reqList.append(req)
self.mainEngine.batchCancelOrder(reqList, order.gatewayName)
self.writeCtaLog('策略%s: 对本地订单%s,发送批量撤单委托,实际发送单量%s'%(order.byStrategy, vtOrderIDList,len(reqList)))
#----------------------------------------------------------------------
def sendStopOrder(self, vtSymbol, orderType, price, volume, priceType, strategy):
"""发停止单(本地实现)
这是很重要的一个函数,主要是用来维护本地停止单,注意 stopOrderID 与strategy 与 so 之间的映射关系
"""
self.stopOrderCount += 1
stopOrderID = STOPORDERPREFIX + str(self.stopOrderCount)
so = StopOrder()
so.vtSymbol = vtSymbol
so.orderType = orderType
so.price = price
so.priceType = priceType
so.volume = volume
so.strategy = strategy
so.stopOrderID = stopOrderID
so.status = STOPORDER_WAITING
so.byStrategy = strategy.name
if orderType == CTAORDER_BUY:
so.direction = constant.DIRECTION_LONG
so.offset = constant.OFFSET_OPEN
elif orderType == CTAORDER_SELL:
so.direction = constant.DIRECTION_SHORT
so.offset = constant.OFFSET_CLOSE
elif orderType == CTAORDER_SHORT:
so.direction = constant.DIRECTION_SHORT
so.offset = constant.OFFSET_OPEN
elif orderType == CTAORDER_COVER:
so.direction = constant.DIRECTION_LONG
so.offset = constant.OFFSET_CLOSE
# 保存stopOrder对象到字典中
self.stopOrderDict[stopOrderID] = so
self.workingStopOrderDict[stopOrderID] = so
# 保存stopOrderID到策略委托号集合中
self.strategyOrderDict[strategy.name].add(stopOrderID)
# 推送停止单状态
strategy.onStopOrder(so)
return [stopOrderID]
#----------------------------------------------------------------------
def cancelStopOrder(self, stopOrderID):
"""撤销停止单"""
# 检查停止单是否存在
if stopOrderID in self.workingStopOrderDict:
so = self.workingStopOrderDict[stopOrderID]
strategy = so.strategy
# 更改停止单状态为已撤销
so.status = STOPORDER_CANCELLED
# 从活动停止单字典中移除
del self.workingStopOrderDict[stopOrderID]
# 从策略委托号集合中移除
s = self.strategyOrderDict[strategy.name]
if stopOrderID in s:
s.remove(stopOrderID)
# 通知策略
strategy.onStopOrder(so)
#----------------------------------------------------------------------
def processStopOrder(self, tick):
"""收到行情后处理本地停止单(检查是否要立即发出)
注意这类的是使用的tickr 级别的数据进行的,注意这里的停止单有两种
1.没有仓位等待之中停止单
2.有仓位止损等待之中的停止单
"""
vtSymbol = tick.vtSymbol
# 首先检查是否有策略交易该合约
if vtSymbol in self.tickStrategyDict:
# 遍历等待中的停止单,检查是否会被触发
for so in list(self.workingStopOrderDict.values()):
if so.vtSymbol == vtSymbol:
longTriggered = ((so.direction == constant.DIRECTION_LONG) and tick.lastPrice>=so.price) # 多头停止单被触发
shortTriggered = ((so.direction == constant.DIRECTION_SHORT) and tick.lastPrice<=so.price) # 空头停止单被触发
if longTriggered or shortTriggered:
# 买入和卖出分别以涨停跌停价发单(模拟市价单)
# 对于没有涨跌停价格的市场则使用5档报价
if so.direction == constant.DIRECTION_LONG:
if tick.upperLimit:
price = tick.upperLimit
else:
price = tick.askPrice5
else:
if tick.lowerLimit:
price = tick.lowerLimit
else:
price = tick.bidPrice5
# 发出市价委托
vtOrderID = self.sendOrder(so.vtSymbol, so.orderType,
price, so.volume, so.priceType, so.strategy)
# 检查因为风控流控等原因导致的委托失败(无委托号)
if vtOrderID:
# 从活动停止单字典中移除该停止单
del self.workingStopOrderDict[so.stopOrderID]
# 从策略委托号集合中移除
s = self.strategyOrderDict[so.strategy.name]
if so.stopOrderID in s:
s.remove(so.stopOrderID)
# 更新停止单状态,并通知策略
so.status = STOPORDER_TRIGGERED
so.strategy.onStopOrder(so)
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
# 收到tick行情后,先处理本地停止单(检查是否要立即发出)
self.processStopOrder(tick)
# 推送tick到对应的策略实例进行处理
if tick.vtSymbol in self.tickStrategyDict:
#tick时间可能出现异常数据,使用try...except实现捕捉和过滤
try:
# 添加datetime字段
if not tick.datetime:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
except ValueError:
self.writeLog(traceback.format_exc(), logging.ERROR)
return
# 逐个推送到策略实例中
l = self.tickStrategyDict[tick.vtSymbol]
for strategy in l:
if strategy.trading:
self.callStrategyFunc(strategy, strategy.onTick, tick)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""
处理委托推送
这里的数据流的方向是senderorderevent----vtenging---->gateway----成交----退给上层引擎进行细节控制
"""
order = event.dict_['data']
vtOrderID = order.vtOrderID
if vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[vtOrderID]
# 针对bitfinex 进行优化,不适用order 计算策略持仓
if order.gatewayName == 'BITFINEX':
if order.status == constant.STATUS_CANCELLED:
order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_CLOSE
order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_CLOSE
elif order.status == constant.STATUS_ALLTRADED or order.status == constant.STATUS_PARTTRADED:
order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_OPEN
order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_OPEN
elif order.status == constant.STATUS_NOTTRADED:
order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_CLOSE
order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_CLOSE
# 如果委托已经完成(拒单、撤销、全成),则从活动委托集合中移除
if order.status in constant.STATUS_FINISHED:
s = self.strategyOrderDict[strategy.name]
if vtOrderID in s:
s.remove(vtOrderID)
self.callStrategyFunc(strategy, strategy.onOrder, order)
#
else:
if order.status == constant.STATUS_CANCELLED:
if order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_CLOSE:
posName = order.vtSymbol + "_SHORT"
strategy.eveningDict[posName] += order.totalVolume - order.tradedVolume
elif order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_CLOSE:
posName = order.vtSymbol + "_LONG"
strategy.eveningDict[posName] += order.totalVolume - order.tradedVolume
elif order.status == constant.STATUS_ALLTRADED or order.status == constant.STATUS_PARTTRADED:
if order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_OPEN:
posName = order.vtSymbol + "_LONG"
strategy.eveningDict[posName] += order.thisTradedVolume
elif order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_OPEN:
posName = order.vtSymbol + "_SHORT"
strategy.eveningDict[posName] += order.thisTradedVolume
elif order.status == constant.STATUS_NOTTRADED:
if order.direction == constant.DIRECTION_LONG and order.offset == constant.OFFSET_CLOSE:
posName = order.vtSymbol + "_SHORT"
strategy.eveningDict[posName] -= order.totalVolume
elif order.direction == constant.DIRECTION_SHORT and order.offset == constant.OFFSET_CLOSE:
posName = order.vtSymbol + "_LONG"
strategy.eveningDict[posName] -= order.totalVolume
# 如果委托已经完成(拒单、撤销、全成),则从活动委托集合中移除
if order.status in constant.STATUS_FINISHED:
s = self.strategyOrderDict[strategy.name]
if vtOrderID in s:
s.remove(vtOrderID)
self.callStrategyFunc(strategy, strategy.onOrder, order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""处理成交推送"""
trade = event.dict_['data']
# 过滤已经收到过的成交回报
if trade.vtTradeID in self.tradeSet:
return
self.tradeSet.add(trade.vtTradeID)
# 将成交推送到策略对象中
if trade.vtOrderID in self.orderStrategyDict:
strategy = self.orderStrategyDict[trade.vtOrderID]
"""
计算策略持仓,在其他的交易所的gateway 的接口之中有开平的方向,目前在bitfinex 上是没有的,所以这里的根据volume
进行持仓的判断是无效的
"""
# 计算策略持仓 这里针对bitfinex 进行了优化,删除了对仓位的判断
if trade.gatewayName == 'BITFINEX':
if trade.direction == constant.DIRECTION_LONG and trade.offset == constant.OFFSET_OPEN:
posName = trade.vtSymbol + "_LONG"
elif trade.direction == constant.DIRECTION_LONG and trade.offset == constant.OFFSET_CLOSE:
posName = trade.vtSymbol + "_SHORT"
elif trade.direction == constant.DIRECTION_SHORT and trade.offset == constant.OFFSET_CLOSE:
posName = trade.vtSymbol + "_LONG"
elif trade.direction == constant.DIRECTION_SHORT and trade.offset == constant.OFFSET_OPEN:
posName = trade.vtSymbol + "_SHORT"
else:
if trade.direction == constant.DIRECTION_LONG and trade.offset == constant.OFFSET_OPEN:
posName = trade.vtSymbol + "_LONG"
strategy.posDict[str(posName)] += trade.volume
elif trade.direction == constant.DIRECTION_LONG and trade.offset == constant.OFFSET_CLOSE:
posName = trade.vtSymbol + "_SHORT"
strategy.posDict[str(posName)] -= trade.volume
elif trade.direction == constant.DIRECTION_SHORT and trade.offset == constant.OFFSET_CLOSE:
posName = trade.vtSymbol + "_LONG"
strategy.posDict[str(posName)] -= trade.volume
elif trade.direction == constant.DIRECTION_SHORT and trade.offset == constant.OFFSET_OPEN:
posName = trade.vtSymbol + "_SHORT"
strategy.posDict[str(posName)] += trade.volume
self.callStrategyFunc(strategy, strategy.onTrade, trade)
#----------------------------------
def processPositionEvent(self, event): # nearly abandon
"""
重点关注其中的持仓的推送环节
处理持仓推送 由sendorder ---->vtenging---->gateway----->成交然后交易所回报--------》推送给策略进行细节控制
可以看到这里的是针对每个策略进行仓位的更新的
根据bitfinex websocket 的特带点来看,首先进行监听的是possition 之后才是进去监听 order
这里专门针对交易所bitfinex 进行单独维护了一套,pos 参数的判断条件,其中在bitfinex 之中我默认地定义的pos_dic
是 DIRECTION_NET,当进行平仓操作之后,仓位变成此,仓位为 DIRECTION_NET,要进行策略的 pos 的维护需要进行重新定义
"""
pos = event.dict_['data']
for strategy in self.strategyDict.values():
if strategy.inited and pos.vtSymbol in strategy.symbolList:
if pos.direction == constant.DIRECTION_LONG:
posName = pos.vtSymbol + "_LONG"
strategy.posDict[str(posName)] = pos.position
strategy.eveningDict[str(posName)] = pos.position - pos.frozen
if 'CTP' in posName:
self.ydPositionDict[str(posName)] = pos.ydPosition
elif pos.direction == constant.DIRECTION_SHORT:
self.writeCtaLog('processPositionEvent 持有仓位为【空】仓 %s' % (constant.DIRECTION_SHORT))
posName2 = pos.vtSymbol + "_SHORT"
strategy.posDict[str(posName2)] = pos.position
strategy.eveningDict[str(posName2)] = pos.position - pos.frozen
if 'CTP' in posName2:
self.ydPositionDict[str(posName2)] = pos.ydPosition
elif pos.direction == constant.DIRECTION_NET and pos.gatewayName == constant.EXCHANGE_BITFINEX:
if pos.position == 0:
self.writeCtaLog('processPositionEvent 没有持仓 %s' % (constant.DIRECTION_NET))
strategy.eveningDict[str(pos.vtSymbol + "_SHORT")] = pos.position - pos.frozen
strategy.posDict[str(pos.vtSymbol + "_SHORT")] = pos.position
strategy.eveningDict[str(pos.vtSymbol + "_LONG")] = pos.position - pos.frozen
strategy.posDict[str(pos.vtSymbol + "_LONG")] = pos.position
# 保存策略持仓到数据库
# self.saveSyncData(strategy)
#------------------------------------------------------
def processAccountEvent(self,event):
"""账户推送"""
account = event.dict_['data']
for strategy in self.strategyDict.values():
if strategy.inited:
for sym in strategy.symbolList:
if account.gatewayName in sym:
strategy.accountDict[str(account.accountID)] = account.available
break
def processErrorEvent(self,event):
error = event.dict_['data']
for strategy in self.strategyDict.values():
if strategy.inited:
for sym in strategy.symbolList:
if error.gatewayName in sym:
msg = f'ProcessError,错误码:{error.errorID},错误信息:{error.errorMsg}'
self.writeLog(msg, logging.ERROR) # 待扩展
notify(msg,strategy)
return
#--------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_POSITION, self.processPositionEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
self.eventEngine.register(EVENT_ACCOUNT, self.processAccountEvent)
self.eventEngine.register(EVENT_ERROR, self.processErrorEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是VtTickData或者VtBarData)"""
pass
# for collectionName_ in collectionName:
# self.mainEngine.dbInsert(dbName, collectionName_, data.__dict__)
#----------------------------------------------------------------------
def loadBar(self, dbName, collectionName, hours):
"""从数据库中读取Bar数据,startDate是datetime对象"""
pass
# startDate = self.today - timedelta(hours = hours)
# for collectionName_ in collectionName:
# d = {'datetime':{'$gte':startDate}}
# barData = self.mainEngine.dbQuery(dbName, collectionName_, d, 'datetime')
# l = []
# for d in barData:
# bar = VtBarData()
# bar.__dict__ = d
# bar.vtSymbol = collectionName_
# l.append(bar)
# return l
#----------------------------------------------------------------------
def loadTick(self, dbName, collectionName, hours):
"""从数据库中读取Tick数据,startDate是datetime对象"""
pass
# startDate = self.today - timedelta(hours = hours)
# for collectionName_ in collectionName:
# d = {'datetime':{'$gte':startDate}}
# tickData = self.mainEngine.dbQuery(dbName, collectionName_, d, 'datetime')
# l = []
# for d in tickData:
# tick = VtTickData()
# tick.__dict__ = d
# l.append(tick)
# return l
#----------------------------------------------------------------------
def writeCtaLog(self, content):
"""快速发出CTA模块日志事件"""
log = VtLogData()
log.logContent = content
log.gatewayName = 'CTA_STRATEGY'
event = Event(type_=EVENT_CTA_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
def writeLog(self, content, level=logging.info):
log = VtLogData()
log.logContent = content
log.gatewayName = 'CTA_STRATEGY'
log.logLevel = level
event = Event(type_=EVENT_CTA_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def loadStrategy(self, setting):
"""载入策略"""
try:
name = setting['name']
className = setting['className']
vtSymbolset=setting['symbolList']
except KeyError as e:
# self.writeCtaLog(u'载入策略出错:%s' %e)
self.writeLog(u'载入策略出错:%s' % traceback.format_exc(), logging.error)
return
# 获取策略类
strategyClass = STRATEGY_CLASS.get(className, None)
if not strategyClass:
STRATEGY_GET_CLASS = self.loadLocalStrategy()
strategyClass = STRATEGY_GET_CLASS.get(className, None)
if not strategyClass:
# self.writeCtaLog(u'找不到策略类:%s' %className)
self.writeLog(u'找不到策略类:%s' %className, logging.ERROR)
return
# 防止策略重名
if name in self.strategyDict:
# self.writeCtaLog(u'策略实例重名:%s' %name)
self.writeLog(u'策略实例重名:%s' %name, logging.ERROR)
else:
# 创建策略实例
strategy = strategyClass(self, setting)
self.strategyDict[name] = strategy
strategy.symbolList = vtSymbolset
strategy.mailAdd = setting.get("mailAdd",None)
strategy.name = name
# 创建委托号列表
self.strategyOrderDict[name] = set()
for vtSymbol in vtSymbolset :
# 保存Tick映射关系
if vtSymbol in self.tickStrategyDict:
l = self.tickStrategyDict[vtSymbol]
else:
l = []
self.tickStrategyDict[vtSymbol] = l
l.append(strategy)
#-----------------------------------------------------------------------
def subscribeMarketData(self, strategy):
"""订阅行情"""
# 订阅合约
for vtSymbol in strategy.symbolList:
contract = self.mainEngine.getContract(vtSymbol)
if contract:
req = VtSubscribeReq()
req.symbol = contract.symbol
req.vtSymbol = contract.vtSymbol
req.exchange = contract.exchange
# 对于IB接口订阅行情时所需的货币和产品类型,从策略属性中获取
req.currency = strategy.currency
req.productClass = strategy.productClass
self.mainEngine.subscribe(req, contract.gatewayName)
else:
# self.writeCtaLog(u'策略%s的交易合约%s无法找到' %(strategy.name, vtSymbol))
self.writeLog(u'策略%s的交易合约%s无法找到' %(strategy.name, vtSymbol), logging.ERROR)
#----------------------------------------------------------------------
def initStrategy(self, name):
"""初始化策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if not strategy.inited:
strategy.inited = True
self.initPosition(strategy)
self.callStrategyFunc(strategy, strategy.onInit)
self.subscribeMarketData(strategy) # 加载同步数据后再订阅行情
self.writeCtaLog(u'策略%s: 初始化' %name)
else:
self.writeCtaLog(u'请勿重复初始化策略实例:%s' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#---------------------------------------------------------------------
def startStrategy(self, name):
"""启动策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.inited and not strategy.trading:
strategy.trading = True
self.callStrategyFunc(strategy, strategy.onStart)
self.writeCtaLog(u'策略%s: 启动' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def stopStrategy(self, name):
"""停止策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if strategy.trading:
self.writeCtaLog(u'策略%s: 准备停止工作' % name)
strategy.trading = False
self.callStrategyFunc(strategy, strategy.onStop)
# 对该策略发出的所有限价单进行撤单
for vtOrderID, s in list(self.orderStrategyDict.items()):
if s is strategy:
self.cancelOrder(vtOrderID)
# 对该策略发出的所有本地停止单撤单
for stopOrderID, so in list(self.workingStopOrderDict.items()):
if so.strategy is strategy:
self.cancelStopOrder(stopOrderID)
strategy.inited = False ## 取消注释使策略在停止后可以再次初始化
self.writeCtaLog(u'策略%s: 停止工作' %name)
## 加上删除持仓信息
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
#----------------------------------------------------------------------
def initAll(self):
"""全部初始化"""
for name in list(self.strategyDict.keys()):
self.initStrategy(name)
#----------------------------------------------------------------------
def startAll(self):
"""全部启动"""
for name in list(self.strategyDict.keys()):
self.startStrategy(name)
#----------------------------------------------------------------------
def stopAll(self):
"""全部停止"""
for name in list(self.strategyDict.keys()):
self.stopStrategy(name)
#----------------------------------------------------------------------
def saveSetting(self):
"""保存策略配置"""
with open(self.settingfilePath, 'w') as f:
l = []
for strategy in list(self.strategyDict.values()):
setting = {}
for param in strategy.paramList:
setting[param] = strategy.__getattribute__(param)
l.append(setting)
jsonL = json.dumps(l, indent=4)
f.write(jsonL)
#----------------------------------------------------------------------
def loadSetting(self):
"""读取策略配置"""
with open(self.settingfilePath) as f:
l = json.load(f)
for setting in l:
if 'policy' in setting.keys():
POLICY_CLASS = {}
if setting['policy']:
POLICY_CLASS = self.loadPolicy(setting['policy'])
policyClass = POLICY_CLASS.get(setting['policy'], None)
if not policyClass:
self.writeCtaLog(u'找不到Policy:%s' %setting['policy'])
return
newsetting = policyClass(setting)
newsetting.assert_symbol()
print(newsetting.setting)
self.loadStrategy(newsetting.setting)
continue
self.loadStrategy(setting)
# for strategy in self.strategyDict.values():
# self.loadSyncData(strategy)
#----------------------------------------------------------------------
def getStrategyVar(self, name):
"""获取策略当前的变量字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
varDict = OrderedDict()
for key in strategy.varList:
varDict[key] = strategy.__getattribute__(key)
return varDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#----------------------------------------------------------------------
def getStrategyParam(self, name):
"""获取策略的参数字典"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
paramDict = OrderedDict()
for key in strategy.paramList:
paramDict[key] = strategy.__getattribute__(key)
return paramDict
else:
self.writeCtaLog(u'策略实例不存在:' + name)
return None
#-----------------------------------
def getStrategyNames(self):
"""查询所有策略名称"""
return self.strategyDict.keys()
#----------------------------------------------------------------------
def putStrategyEvent(self, name):
"""触发策略状态变化事件(通常用于通知GUI更新)"""
strategy = self.strategyDict[name]
d = {k:strategy.__getattribute__(k) for k in strategy.varList}
event = Event(EVENT_CTA_STRATEGY+name)
event.dict_['data'] = d
self.eventEngine.put(event)
d2 = {k:str(v) for k,v in d.items()}
d2['name'] = name
event2 = Event(EVENT_CTA_STRATEGY)
event2.dict_['data'] = d2
self.eventEngine.put(event2)
#----------------------------------------------------------------------
def callStrategyFunc(self, strategy, func, params=None):
"""调用策略的函数,若触发异常则捕捉"""
try:
if params:
func(params)
else:
func()
except Exception:
# 停止策略,修改状态为未初始化
self.stopStrategy(strategy.name)
content = '\n'.join([u'策略%s:触发异常, 当前状态已保存, 挂单将全部撤销' %strategy.name,
traceback.format_exc()])
notify(content,strategy)
# self.writeCtaLog(content)
self.writeLog(content, logging.ERROR)
#----------------------------------------------------------------------------------------
def saveSyncData(self, strategy): #改为posDict
"""保存策略的持仓情况到数据库"""
flt = {'name': strategy.name,
'subject':str(strategy.symbolList)}
# result = []
d = {}
for key in strategy.syncList:
d[key] = strategy.__getattribute__(key)
# result.append(key)
# result.append(d[key])
flt['SyncData'] = d
# self.mainEngine.dbUpdate(POSITION_DB_NAME, strategy.name,
# d, flt, True)
# content = u'策略%s: 同步数据保存成功,当前仓位状态:%s' %(strategy.name,result)
# self.writeCtaLog(content)
def saveVarData(self, strategy):
flt = {'name': strategy.name,
'subject':str(strategy.symbolList)}
# result = []
d = {}
for key in strategy.varList:
d[key] = strategy.__getattribute__(key)
# result.append(key)
# result.append(d[key])
flt['VarData'] = d
# self.mainEngine.dbUpdate(VAR_DB_NAME, strategy.name,
# d, flt, True)
# content = u'策略%s: 参数数据保存成功,参数为%s' %(strategy.name,result)
# self.writeCtaLog(content)
#----------------------------------------------------------------------
def loadSyncData(self, strategy):
"""从数据库载入策略的持仓情况"""
# flt = {'name': strategy.name,
# 'posName': str(strategy.symbolList)}
# syncData = self.mainEngine.dbQuery(POSITION_DB_NAME, strategy.name, flt)
# d = syncData['SyncData']
# for key in strategy.syncList:
# if key in d:
# strategy.__setattr__(key, d[key])
def loadVarData(self, strategy):
"""从数据库载入策略的持仓情况"""
# flt = {'name': strategy.name,
# 'posName': str(strategy.symbolList)}
# varData = self.mainEngine.dbQuery(VAR_DB_NAME, strategy.name, flt)
# d = varData['VarData']
# for key in strategy.varList:
# if key in d:
# strategy.__setattr__(key, d[key])
#----------------------------------------------------------------------
def roundToPriceTick(self, priceTick, price):
"""取整价格到合约最小价格变动"""
d = Decimal(str(price))
newPrice = float(d.quantize(Decimal(str(priceTick))))
return newPrice
#----------------------------------------------------------------------
def stop(self):
"""停止"""
pass
#----------------------------------------------------------------------
def cancelAll(self, name):
"""全部撤单"""
s = self.strategyOrderDict[name]
# 遍历列表,查找非停止单全部撤单
# 这里不能直接遍历集合s,因为撤单时会修改s中的内容,导致出错
for orderID in list(s):
if STOPORDERPREFIX not in orderID:
self.cancelOrder(orderID)
def cancelAllStopOrder(self,name):
"""撤销所有停止单"""
s= self.strategyOrderDict[name]
for orderID in list(s):
if STOPORDERPREFIX in orderID:
self.cancelStopOrder(orderID)
#----------------------------------------------------------------------
def getPriceTick(self, strategy):
"""获取最小价格变动"""
for vtSymbol in strategy.symbolList:
contract = self.mainEngine.getContract(vtSymbol)
if contract:
return contract.priceTick
return 0
#--------------------------------------------------------------
def loadHistoryBar(self,vtSymbol,type_,size = None,since = None):
"""读取历史数据"""
data = self.mainEngine.loadHistoryBar(vtSymbol, type_, size, since)
histbar = []
for index, row in data.iterrows():
bar = VtBarData()
bar.open = row.open
bar.close = row.close
bar.high = row.high
bar.low = row.low
bar.volume = row.volume
bar.vtSymbol = vtSymbol
bar.datetime = row.datetime
histbar.append(bar)
return histbar
def initPosition(self,strategy):
"""
通过引擎来维护更新策略持仓,保障在持有仓位的状态下,重新启动程序有相关的仓位
:param strategy:
:return:
"""
for symbol in strategy.symbolList:
strategy.posDict[symbol+"_LONG"] = 0
strategy.posDict[symbol+"_SHORT"] = 0
strategy.eveningDict[symbol+"_LONG"] = 0
strategy.eveningDict[symbol+"_SHORT"] = 0
# 根据策略的品种信息,查询特定交易所该品种的持仓
for vtSymbol in strategy.symbolList:
self.mainEngine.initPosition(vtSymbol)
def qryAllOrders(self,name):
if name in self.strategyDict:
strategy = self.strategyDict[name]
s = self.strategyOrderDict[name]
for symbol in strategy.symbolList:
self.mainEngine.qryAllOrders(symbol, -1, status = 1)
# self.writeCtaLog("ctaEngine对策略%s发出%s的挂单轮询请求,本地订单数量%s"%(name,symbol,len(list(s))))
def restoreStrategy(self, name):
"""恢复策略"""
if name in self.strategyDict:
strategy = self.strategyDict[name]
if not strategy.inited and not strategy.trading:
strategy.inited = True
strategy.trading = True
self.callStrategyFunc(strategy, strategy.onRestore)
self.loadVarData(strategy) # 初始化完成后加载同步数据
self.loadSyncData(strategy)
self.writeCtaLog(u'策略%s: 恢复策略状态成功' %name)
else:
self.writeCtaLog(u'策略%s: 策略无法从当前状态恢复' %name)
else:
self.writeCtaLog(u'策略实例不存在:%s' %name)
def loadLocalStrategy(self):
# 用来保存策略类的字典
STRATEGY_GET_CLASS = {}
# 获取目录路径, 遍历当前目录下的文件
path = os.getcwd()
for root, subdirs, files in os.walk(path):
for name in files:
# 只有文件名中包含strategy且非.pyc的文件,才是策略文件
if 'Strategy' in name and '.pyc' not in name:
# 模块名称需要上前缀
moduleName = name.replace('.py', '')
# 使用importlib动态载入模块
try:
module = importlib.import_module(moduleName)
# 遍历模块下的对象,只有名称中包含'Strategy'的才是策略类
for k in dir(module):
if 'Strategy' in k:
v = module.__getattribute__(k)
STRATEGY_GET_CLASS[k] = v
except:
print('-' * 20)
print(('Failed to import strategy file %s:' %moduleName))
traceback.print_exc()
return STRATEGY_GET_CLASS
def getGateway(self, gatewayName):
return self.mainEngine.gatewayDict.get(gatewayName, None)
def loadPolicy(self,policyName):
POLICY_CLASS ={}
if os.path.exists('policy.py'):
try:
module = importlib.import_module('policy')
for k in dir(module):
if policyName in k:
v = module.__getattribute__(k)
POLICY_CLASS[k] = v
except:
print('-' * 20)
print(('Failed to import policy file'))
traceback.print_exc()
return POLICY_CLASS
| python |
################################################################################
#
# Copyright (C) 2019 Garrett Brown
# This file is part of pyqudt - https://github.com/eigendude/pyqudt
#
# pyqudt is derived from jQUDT
# Copyright (C) 2012-2013 Egon Willighagen <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
# See the file LICENSE for more information.
#
################################################################################
from qudt.ontology.unit_factory import UnitFactory
from qudt.unit import Unit
class TemperatureUnit(object):
""" """
KELVIN: Unit = UnitFactory.get_unit('http://qudt.org/vocab/unit#Kelvin')
CELSIUS: Unit = UnitFactory.get_unit('http://qudt.org/vocab/unit#DegreeCelsius')
FAHRENHEIT: Unit = UnitFactory.get_unit(
'http://qudt.org/vocab/unit#DegreeFahrenheit'
)
| python |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def start(account_name=None, account_key=None, connection_string=None, sas_token=None, auth_mode=None, destination_blob, destination_container, timeout=None, destination_if_modified_since=None, destination_if_unmodified_since=None, destination_if_match=None, destination_if_none_match=None, destination_tags_condition=None, source_if_modified_since=None, source_if_unmodified_since=None, source_if_match=None, source_if_none_match=None, source_tags_condition=None, source_sas=None, source_container=None, source_blob=None, source_snapshot=None, source_account_name=None, source_account_key=None, source_path=None, source_share=None, destination_lease_id=None, source_lease_id=None, rehydrate_priority=None, requires_sync=None, tier=None, tags=None, source_uri=None, metadata=None):
params = get_params(locals())
command = "az storage blob copy start " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def cancel(account_name=None, account_key=None, connection_string=None, sas_token=None, auth_mode=None, destination_container, destination_blob, copy_id, lease_id=None, timeout=None):
params = get_params(locals())
command = "az storage blob copy cancel " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def start_batch(account_name=None, account_key=None, connection_string=None, sas_token=None, auth_mode=None, source_account_name=None, source_account_key=None, source_uri=None, source_client=None, destination_container=None, destination_path=None, source_container=None, source_share=None, source_sas=None, pattern=None, dryrun=None):
params = get_params(locals())
command = "az storage blob copy start-batch " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| python |
import csv, json
import to_json
# LOD preparations
# Import the LOD library.
from lod import lod
# The object_manager contains every object that has been created in this Scenario so far.
object_manager = lod.get_object_manager()
def main(lod_manager):
#
# Get the arguments that were given to this Program.
#
# Each element of 'arguments' is a named object that was passed to this Program.
# If the object is a file, that file can be opened.
# The parameters selected by the user have been put into a single file in JSON format, so we can just read them out that way.
# (Note that if your requirements are more complicated, you can use several input files from different sources.
# For example, if you have two different Options that each require parameters from users, or several files that have been uploaded by the user or generated by other programs)
arguments = lod.get_program_arguments()
try:
#
# Try to open and parse data.
#
with open(arguments['user_parameters_file'].file, 'r') as f:
json_data = to_json.parse(f)
except:
confidence = 1000
description = None
trigger = {}
display = {
'must_always_be_shown' : True,
'parameter_file_name' : 'userParametersFile',
'message_components' : [
{
"text" : "The file could not be parsed. Please try again."
}
],
'buttons' : [
{
'text' : "Parse to JSON",
'style' : 'cta',
}
],
}
actions = [
{
'type' : 'execute_program',
'program' : "Orlando-test-program",
'arguments' : {
'user_parameters_file' : 'userParametersFile',
}
},
]
existing_variables = {}
new_option = lod.option(confidence, 'orlando_test_program_option_try_again', description, trigger, display, actions, existing_variables)
return
#
# Output the result
#
json_file = lod.add_output_file("output.json")
json.dump(json_data, open(json_file, 'w+'))
# Create a simple tag connecting the file.
# It is up to others how they want to react to this Tag.
lod.tag('orlando_test_program_tag', arguments=[json_file])
# Execute the main() function defined above.
# We wrap the whole thing in an lod.manager().
# This ensures that the objects created above (Options, Files, Messages, Tags) are made available to Elody.
# (The objects are not created immediately, since the program runs in an isolated environment.
# Instead, they are all given to Elody once the Program has finished running.)
# It also creates log files from any errors or print() statements that occur, which is useful for debugging.
# To inspect these log files, you need to use the lod-executor to run the Programs locally.
with lod.manager(suppress_exceptions_after_logging_them=False, redirect_stdout_to_log=True) as lod_manager:
main(lod_manager)
| python |
import berrl as bl
import pandas as pd
import numpy as np
d=pd.read_csv('STSIFARS.csv')
d=d[d.STANAME=='WEST VIRGINIA']
d.to_csv('wv_traffic_fatals.csv') | python |
##parameters=title=None, description=None, event_type=None, effectiveDay=None, effectiveMo=None, effectiveYear=None, expirationDay=None, expirationMo=None, expirationYear=None, start_time=None, startAMPM=None, stop_time=None, stopAMPM=None, location=None, contact_name=None, contact_email=None, contact_phone=None, event_url=None, **kw
##
from Products.CMFCalendar.exceptions import ResourceLockedError
from Products.CMFCalendar.utils import Message as _
try:
context.edit(title, description, event_type, effectiveDay, effectiveMo,
effectiveYear, expirationDay, expirationMo, expirationYear,
start_time, startAMPM, stop_time, stopAMPM, location,
contact_name, contact_email, contact_phone, event_url)
return context.setStatus(True, _(u'Event changed.'))
except ResourceLockedError, errmsg:
return context.setStatus(False, errmsg)
| python |
class ColorTranslator(object):
""" Translates colors to and from GDI+ System.Drawing.Color structures. This class cannot be inherited. """
@staticmethod
def FromHtml(htmlColor):
"""
FromHtml(htmlColor: str) -> Color
Translates an HTML color representation to a GDI+ System.Drawing.Color structure.
htmlColor: The string representation of the Html color to translate.
Returns: The System.Drawing.Color structure that represents the translated HTML color or
System.Drawing.Color.Empty if htmlColor is null.
"""
pass
@staticmethod
def FromOle(oleColor):
"""
FromOle(oleColor: int) -> Color
Translates an OLE color value to a GDI+ System.Drawing.Color structure.
oleColor: The OLE color to translate.
Returns: The System.Drawing.Color structure that represents the translated OLE color.
"""
pass
@staticmethod
def FromWin32(win32Color):
"""
FromWin32(win32Color: int) -> Color
Translates a Windows color value to a GDI+ System.Drawing.Color structure.
win32Color: The Windows color to translate.
Returns: The System.Drawing.Color structure that represents the translated Windows color.
"""
pass
@staticmethod
def ToHtml(c):
"""
ToHtml(c: Color) -> str
Translates the specified System.Drawing.Color structure to an HTML string color representation.
c: The System.Drawing.Color structure to translate.
Returns: The string that represents the HTML color.
"""
pass
@staticmethod
def ToOle(c):
"""
ToOle(c: Color) -> int
Translates the specified System.Drawing.Color structure to an OLE color.
c: The System.Drawing.Color structure to translate.
Returns: The OLE color value.
"""
pass
@staticmethod
def ToWin32(c):
"""
ToWin32(c: Color) -> int
Translates the specified System.Drawing.Color structure to a Windows color.
c: The System.Drawing.Color structure to translate.
Returns: The Windows color value.
"""
pass
| python |
from functools import wraps
#PUBLIC COMMAND
def init(fn):
def wrapper(*args,**kwargs):
message = args[0].message
if message.chat.type == 'supergroup' or message.chat.type == 'group':
return fn(*args,**kwargs)
else:
return False
return wrapper | python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.