content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#coding:utf-8
#
# id: bugs.core_1056
# title: A query could produce different results, depending on the presence of an index
# decription:
# tracker_id: CORE-1056
# min_versions: []
# versions: 2.0
# qmid: bugs.core_1056
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.0
# resources: None
substitutions_1 = []
init_script_1 = """create table t (c varchar(10) character set win1250 collate pxw_csy);
insert into t values ('ch');
commit;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """set plan on;
select * from t where c starting with 'c';
commit;
create index t_c on t (c);
commit;
select * from t where c starting with 'c';
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN (T NATURAL)
C
==========
ch
PLAN (T INDEX (T_C))
C
==========
ch
"""
@pytest.mark.version('>=2.0')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-22 19:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mimicon2016', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='signupextra',
name='want_certificate',
field=models.BooleanField(default=False, verbose_name='Haluan todistuksen ty\xf6skentelyst\xe4ni Mimiconissa'),
),
]
| python |
from django.contrib import admin
from models import *
# Register your models here.
class CategoryAdmin(admin.ModelAdmin):
list_display = ['id', 'title']
class GoodsInfoAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'price', 'unit', 'click', 'inventory', 'detail', 'desc', 'image']
admin.site.register(Category, CategoryAdmin)
admin.site.register(GoodsInfo, GoodsInfoAdmin) | python |
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db, login_manager
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
dictionary_table = db.Table('dictionary',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('word_id', db.Integer, db.ForeignKey('word.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(70), nullable=False, unique=True)
password = db.Column(db.String(94), nullable=False)
first_name = db.Column(db.String(30), nullable=False)
last_name = db.Column(db.String(30), nullable=False)
dictionary = db.relationship('Word',
secondary=dictionary_table,
lazy='dynamic',
backref=db.backref('users', lazy='dynamic')
)
created_at = db.Column(db.DateTime, default=db.func.now(), nullable=False)
def generate_password_hash(self, password):
self.password = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password, password)
| python |
from typing import List, Tuple
import torch
from torch.utils.data import Dataset
from .feature import InputFeature
class FeaturesDataset(Dataset):
def __init__(self, features: List[InputFeature]):
self.features = features
def __len__(self,):
return len(self.features)
def __getitem__(self, idx: int):
raise NotImplementedError()
class T5NERDataset(FeaturesDataset):
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
feat = self.features[idx]
input_ids = torch.tensor(feat.source_token_ids, dtype=torch.long)
attention_mask = torch.tensor(feat.attention_mask, dtype=torch.long)
lm_labels = torch.tensor(feat.target_token_ids, dtype=torch.long)
outputs = (input_ids, attention_mask, lm_labels)
return outputs
| python |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic TFX ImportExampleGen executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from typing import Any, Dict, Text, Union
from absl import logging
import apache_beam as beam
import tensorflow as tf
from tfx.components.example_gen import base_example_gen_executor
from tfx.components.example_gen import utils
from tfx.proto import example_gen_pb2
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(bytes)
def _ImportSerializedRecord( # pylint: disable=invalid-name
pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""Read TFRecord files to PCollection of records.
Note that each input split will be transformed by this function separately.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
input_base_uri = exec_properties[utils.INPUT_BASE_KEY]
input_split_pattern = os.path.join(input_base_uri, split_pattern)
logging.info('Reading input TFRecord data %s.', input_split_pattern)
# TODO(jyzhao): profile input examples.
return (pipeline
# TODO(jyzhao): support multiple input container format.
| 'ReadFromTFRecord' >>
beam.io.ReadFromTFRecord(file_pattern=input_split_pattern))
class Executor(base_example_gen_executor.BaseExampleGenExecutor):
"""Generic TFX import example gen executor."""
def GetInputSourceToExamplePTransform(self) -> beam.PTransform:
"""Returns PTransform for importing records."""
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(Union[tf.train.Example,
tf.train.SequenceExample, bytes])
def ImportRecord(pipeline: beam.Pipeline, exec_properties: Dict[Text, Any],
split_pattern: Text) -> beam.pvalue.PCollection:
"""PTransform to import records.
The records are tf.train.Example, tf.train.SequenceExample,
or serialized proto.
Args:
pipeline: Beam pipeline.
exec_properties: A dict of execution properties.
- input_base: input dir that contains input data.
split_pattern: Split.pattern in Input config, glob relative file pattern
that maps to input files with root directory given by input_base.
Returns:
PCollection of records (tf.Example, tf.SequenceExample, or bytes).
"""
output_payload_format = exec_properties.get(utils.OUTPUT_DATA_FORMAT_KEY)
serialized_records = (
pipeline
# pylint: disable=no-value-for-parameter
| _ImportSerializedRecord(exec_properties, split_pattern))
if output_payload_format == example_gen_pb2.PayloadFormat.FORMAT_PROTO:
return serialized_records
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_EXAMPLE):
return (serialized_records
| 'ToTFExample' >> beam.Map(tf.train.Example.FromString))
elif (output_payload_format ==
example_gen_pb2.PayloadFormat.FORMAT_TF_SEQUENCE_EXAMPLE):
return (serialized_records
| 'ToTFSequenceExample' >> beam.Map(
tf.train.SequenceExample.FromString))
raise ValueError('output_payload_format must be one of FORMAT_TF_EXAMPLE,'
' FORMAT_TF_SEQUENCE_EXAMPLE or FORMAT_PROTO')
return ImportRecord
| python |
#!/usr/bin/env python3
"""Emulate a client by calling directly EC2 instance."""
import os
import sys
import json
import logging
# AWS Lambda does not ship requests out of the box
# import requests
import urllib3
# Global configuration
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
http = urllib3.PoolManager()
def test_ec2_via_http(ip):
"""Call EC2 via HTTP."""
try:
r = http.request('GET', 'http://{0}'.format(ip), timeout=3.5, retries=0)
response = r.data.decode('utf-8')
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug('Correct response: %s...', response[:20])
return (200 <= r.status < 300, r.status, response)
except urllib3.exceptions.HTTPError as err:
err_string = str(err)
logging.error('Encountered error while accessing %s: %s ', ip, err_string)
return (False, 500, err_string)
def lambda_handler(event, context):
"""Entrypoint to AWS lambda execution."""
ip_to_test = os.environ["IP_TO_TEST"]
status, code, text = test_ec2_via_http(ip_to_test)
# Lamda response should follow:
# https://aws.amazon.com/premiumsupport/knowledge-center/malformed-502-api-gateway/
# in order to be consumable via API Gateway
return {
'statusCode': code,
'isBase64Encoded': False,
'body': json.dumps({'status': status, 'text': text})
}
def main():
"""Enter the program to test it locally."""
# given
ip_to_test = sys.argv[1]
# when
test_result = test_ec2_via_http(ip_to_test)
# then
logging.info("Status: {0}, Code: {1}, Text: {2}".format(*test_result))
if __name__ == "__main__":
main()
| python |
# -*- coding: utf-8 -*-
"""
Implement S3 Backed Binary and Unicode Attribute.
Since the content of big Binary or Unicode are not stored in DynamoDB, we
cannot use custom attriubte ``pynamodb.attributes.Attribute`` to implement it.
"""
import os
import zlib
from base64 import b64encode, b64decode
from pynamodb.models import Model
from six import string_types
try:
import typing
except:
pass
s3_endpoint = None
if 'S3_PORT' in os.environ:
s3_endpoint = 'http://{}:{}'.format(
os.environ['SLS_OFF_HOST'], os.environ['S3_PORT'] )
def s3_key_safe_b64encode(text):
return b64encode(text.encode("utf-8")).decode("utf-8").replace("=", "")
def s3_key_safe_b64decode(text):
div, mod = divmod(len(text), 4)
if mod != 0:
text = text + "=" * (4 - mod)
return b64decode(text.encode("utf-8")).decode("utf-8")
def parse_s3_uri(s3_uri):
chunks = s3_uri.split("/", 3)
bucket = chunks[2]
key = chunks[3]
return bucket, key
class BaseS3BackedAttribute(object):
"""
Implement S3 relative operation for each attribute.
:type s3_uri_getter: typing.Union[str, typing.Callable]
:param s3_uri_getter: str or callable function, it takes the pynamodb orm
object as input, returns the S3 URI string for this s3 backed attribute.
"""
def __init__(self, s3_uri_getter, compress=False, name=None):
self.s3_uri_getter = s3_uri_getter
if isinstance(s3_uri_getter, string_types):
self.s3_uri_getter_real = lambda obj: getattr(obj, s3_uri_getter)
elif callable(s3_uri_getter):
self.s3_uri_getter_real = s3_uri_getter
else:
raise Exception
self.compress = compress
self.name = name
def serialize(self, data):
raise NotImplementedError
def deserialize(self, data):
raise NotImplementedError
def set_to(self, data):
return (self, data)
def head_object(self, model_obj):
s3_uri = self.s3_uri_getter_real(model_obj)
bucket, key = parse_s3_uri(s3_uri)
return model_obj.get_s3_client().head_object(Bucket=bucket, Key=key)
def _put_binary_data(self, model_obj, data):
"""
Write binary data as it is to s3.
:type model_obj: S3BackedMixin
:type data: bytes
"""
s3_uri = self.s3_uri_getter_real(model_obj)
bucket, key = parse_s3_uri(s3_uri)
res = model_obj.get_s3_client().put_object(
Bucket=bucket, Key=key, Body=data)
return res
def put_object(self, model_obj, data):
"""
:type model_obj: S3BackedMixin
"""
if self.compress:
body = zlib.compress(self.serialize(data))
else:
body = self.serialize(data)
return self._put_binary_data(model_obj, body)
def _read_binary_data(self, model_obj):
"""
Read binary data as it is from s3
:type model_obj: S3BackedMixin
"""
s3_uri = self.s3_uri_getter_real(model_obj)
bucket, key = parse_s3_uri(s3_uri)
res = model_obj.get_s3_client().get_object(
Bucket=bucket, Key=key)
return res["Body"].read()
def read_data(self, model_obj):
"""
:return:
"""
if self.compress:
return self.deserialize(zlib.decompress(self._read_binary_data(model_obj)))
else:
return self.deserialize(self._read_binary_data(model_obj))
def delete_object(self, model_obj):
"""
:type model_obj: S3BackedMixin
"""
s3_uri = self.s3_uri_getter_real(model_obj)
bucket, key = parse_s3_uri(s3_uri)
res = model_obj.get_s3_client().delete_object(Bucket=bucket, Key=key)
return res
class S3BackedBinaryAttribute(BaseS3BackedAttribute):
def serialize(self, data):
return data
def deserialize(self, data):
return data
class S3BackedUnicodeAttribute(BaseS3BackedAttribute):
def serialize(self, data):
return data.encode("utf-8")
def deserialize(self, data):
return data.decode("utf-8")
class S3BackedMixin(object): # type: typing.Type[Model]
_s3_client = None
_s3_backed_attr_mapper = None
_s3_backed_value_mapper = None
@classmethod
def get_s3_backed_attr_mapper(cls):
"""
:type cls: Model
:rtype: dict
"""
if cls._s3_backed_attr_mapper is None:
cls._s3_backed_attr_mapper = dict()
for attr, value in cls.__dict__.items():
try:
if isinstance(value, BaseS3BackedAttribute):
value.name = attr
cls._s3_backed_attr_mapper[attr] = value
except Exception as e:
pass
return cls._s3_backed_attr_mapper
@classmethod
def get_s3_client(cls):
"""
:type cls: Model
"""
if cls._s3_client is None:
pynamodb_connection = cls._get_connection().connection
cls._s3_client = pynamodb_connection.session.create_client(
"s3", pynamodb_connection.region,
endpoint_url=s3_endpoint)
return cls._s3_client
def atomic_save(self,
condition=None,
s3_backed_data=None):
"""
An ``atomic`` save operation for multiple S3 backed attribute.
:type self: typing.Union[Model, S3BackedMixin]
:type s3_backed_data: List[BaseS3BackedAttribute.set_to(data)]
:param s3_backed_data: example ``[page.html_content.set_to("<html> ... </html>"), page.image_content.set_to(b"...")]``
"""
if s3_backed_data is None:
s3_backed_data = list()
saved_data_list = list()
for s3_backed_attr, data in s3_backed_data:
try:
s3_backed_attr.put_object(self, data)
saved_data_list.append((s3_backed_attr, data))
# if any of s3.put_object failed, roll back and skip dynamodb.put_item
except Exception as put_object_error:
for s3_backed_attr, data in saved_data_list:
s3_backed_attr.delete_object(self)
raise put_object_error
try:
res = self.save(condition=condition)
del saved_data_list
return res
except Exception as dynamodb_save_error: # delete saved s3 object if dynamodb write operation failed
for s3_backed_attr, data in saved_data_list:
s3_backed_attr.delete_object(self)
del saved_data_list
raise dynamodb_save_error
def atomic_update(self,
actions=None,
condition=None,
s3_backed_data=None):
"""
An ``atomic`` update operation for multiple S3 backed attribute.
:type self: typing.Union[Model, S3BackedMixin]
:type s3_backed_data: List[BaseS3BackedAttribute.set_to(data)]
:param s3_backed_data: example ``[page.html_content.set_to("<html> ... </html>"), page.image_content.set_to(b"...")]``
"""
if s3_backed_data is None:
s3_backed_data = list()
previous_data_list = list()
for s3_backed_attr, data in s3_backed_data:
try:
previous_data_list.append(
(
s3_backed_attr,
s3_backed_attr._read_binary_data(self)
)
)
s3_backed_attr.put_object(self, data)
# if any of s3.put_object failed, roll back and skip dynamodb.put_item
except Exception as put_object_error:
for s3_backed_attr, data in previous_data_list:
s3_backed_attr.put_object(self, data)
raise put_object_error
if actions is not None:
return self.update(actions=actions, condition=condition)
def atomic_delete(self,
condition=None):
"""
An ``atomic`` delete operation for multiple S3 backed attribute.
:type self: typing.Union[Model, S3BackedMixin]
"""
self.delete(condition=condition)
for attr, value in self.get_s3_backed_attr_mapper().items():
# check if the s3 object exists, if exists, delete it
try:
value.head_object(self)
value.delete_object(self)
except Exception as e:
pass
| python |
# Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.aws.common import Resource
from touchdown.core import argument, serializers
from touchdown.core.plan import Plan, Present
from ..account import BaseAccount
from .rule import Rule
from .waf import WafApply, WafDescribe, WafDestroy
class ActivatedRule(Resource):
resource_name = "activated_rule"
action = argument.String(
field="Action",
choices=["BLOCK", "ALLOW", "COUNT"],
serializer=serializers.Dict(Type=serializers.String()),
)
priority = argument.Integer(field="Priority")
rule = argument.Resource(Rule, field="RuleId")
class WebACL(Resource):
resource_name = "web_acl"
name = argument.String(field="Name")
metric_name = argument.String(field="MetricName")
default_action = argument.String(
field="DefaultAction",
choices=["BLOCK", "ALLOW", "COUNT"],
serializer=serializers.Dict(Type=serializers.String()),
)
activated_rules = argument.ResourceList(
ActivatedRule, field="ActivatedRules", create=False
)
account = argument.Resource(BaseAccount)
class Describe(WafDescribe, Plan):
resource = WebACL
service_name = "waf"
api_version = "2015-08-24"
describe_action = "list_web_acls"
describe_envelope = "WebACLs"
annotate_action = "get_web_acl"
key = "WebACLId"
container_update_action = "update_web_acl"
container = "Rules"
container_member = "ActivatedRule"
local_container = "activated_rules"
class Apply(WafApply, Describe):
create_action = "create_web_acl"
signature = (Present("name"), Present("metric_name"), Present("default_action"))
class Destroy(WafDestroy, Describe):
destroy_action = "delete_web_acl"
| python |
from __future__ import print_function, division
import sys
sys._running_pytest = True
import pytest
from sympy.core.cache import clear_cache
def pytest_report_header(config):
from sympy.utilities.misc import ARCH
s = "architecture: %s\n" % ARCH
from sympy.core.cache import USE_CACHE
s += "cache: %s\n" % USE_CACHE
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
s += "ground types: %s %s\n" % (GROUND_TYPES, version)
return s
def pytest_addoption(parser):
parser.addoption("--slow", action="store_true",
help="allow slow tests to run")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line("markers", "slow: slow test")
def pytest_runtest_setup(item):
if not isinstance(item, pytest.Function):
return
if item.config.getoption("--slow"):
if not 'slow' in item.keywords:
pytest.skip()
elif 'slow' in item.keywords:
pytest.skip("slow test: pass --slow to run")
def pytest_terminal_summary(terminalreporter):
if (terminalreporter.stats.get('error', None) or
terminalreporter.stats.get('failed', None)):
terminalreporter.write_sep(
' ', 'DO *NOT* COMMIT!', red=True, bold=True)
def pytest_runtest_teardown():
clear_cache()
| python |
# -*- coding: utf-8 -*-
from .munsell import * # noqa
from . import munsell
__all__ = []
__all__ += munsell.__all__
| python |
import argparse
import importlib
from verify import mnist, cifar, imagenet
import time
def verify(args):
try:
net_class_module = importlib.import_module(args.netclassfile)
net_class = getattr(net_class_module, args.netclassname)
except Exception as err:
print('Error: Import model class failed.')
print(err)
exit(-1)
if args.epsilon > 1. or args.epsilon < 0.:
print('Error: error rate should be in [0,1]')
if args.eta > 1. or args.eta < 0.:
print('Error: significance level should be in [0,1]')
start = time.time()
if args.dataset == 'mnist':
mnist.mnist_verify(net_class, args)
elif args.dataset == 'cifar10':
cifar.cifar_verify(net_class, args)
elif args.dataset == 'imagenet':
imagenet.imagenet_verify(net_class, args)
print('Time: ', time.time()-start)
parser = argparse.ArgumentParser()
parser.add_argument('-ncf', '--netclassfile', type=str,
help='Python network class file contains the network class defined by PyTorch', required=True)
parser.add_argument('-nc', '--netclassname', type=str,
help='Name of the network class', required=True)
parser.add_argument('-m', '--model', type=str,
help='Model File for the network class containing the PyTorch statedict', required=True)
parser.add_argument('-d', '--dataset', type=str, choices=['mnist', 'cifar10', 'imagenet'],
help='The dataset of the model can be either mnist, cifar10 or imagenet', required=True)
parser.add_argument('-r', '--radius', type=int, choices=range(0, 256),
help='The verification radius of the L-inf ball (0-255)', required=True, metavar='0-255')
parser.add_argument('-eps', '--epsilon', type=float,
help='The error rate of the PAC-model', required=True)
parser.add_argument('-eta', '--eta', type=float,
help='The significance level of the PAC-model (1-confidence)', required=True)
parser.add_argument('-img', '--image', type=str,
help='Path of the image file to be verified (required for Imagenet models)')
parser.add_argument('-ind', '--index', type=int, default=0,
help='The index of the image to be verified. (required for Mnist and Cifar10 models)')
parser.add_argument('-train', '--train', action='store_true',
help='Set if you want to verify images in trainset. (optional, only effect on Mnist and Cifar10 models)')
parser.add_argument('-gpu', '--gpu', action='store_true',
help='Set to use GPU (Optional, defualt False)')
parser.add_argument('-FT', '--FThreshold', type=int, default=2000,
help='The sampling threshold for the first focused learning phase. (optional, only effect on Mnist and Cifar10, default 2000)')
parser.add_argument('-ST', '--SThreshold', type=int, default=8000,
help='The sampling threshold for the second focused learning phase. (optional, only effect on Mnist and Cifar10, default 8000)')
parser.add_argument('-b', '--budget', type=int, default=20000,
help='The sampling budget for stepwise splitting. (optional, only effect on Imagenet, default=20000)')
parser.add_argument('-bsize', '--batchsize', type=int, default=200,
help='The batchsize of the sampling procedure (optional, only effect on Imagenet and Cifar10, default=200)')
parser.add_argument('-mean', '--mean', type=tuple,
help='The mean used to normalize the data. (optional, (0.485, 0.456, 0.406) for Imagenet, (0.4914, 0.4822, 0.4465) for Cifar10, (0.1307,) for Mnist, by default)')
parser.add_argument('-std', '--std', type=tuple,
help='The standard deviation used to normalize the data. (optional, (0.229, 0.224, 0.225) for Imagenet, (0.2023, 0.1994, 0.2010) for Cifar10, (0.3081,) for Mnist, by default)')
parser.add_argument('-l', '--label', type=int, choices=range(0, 1000),
help='The true label of the image according to the 1000-classes Imagenet dataset. (optional, will use the output label of the neural network if not provided, only effect on Imagenet)', metavar='0-999')
parser.add_argument('-solver', '--lpsolver', choices=[
'gurobi', 'cbc'], help='The Linear Programming Solver. (Gurobi or CBC, cvxpy default LP solver if not assigned)')
imagenet_required = ['image']
args = parser.parse_args()
verify(args)
# print(args)
| python |
"""
Produces template's named argument to article categories mapping
"""
from __future__ import print_function
import logging
import json
import re
from collections import defaultdict
from mwclient.client import Site
import requests
logging.basicConfig(level=logging.INFO)
def get_articles_from_top_categories(site, categories_limit=3, articles_limit=5):
"""
:type site Site
:type categories_limit int
:type articles_limit int
:rtype: list[str,str]
"""
# http://muppet.sandbox-s6.wikia.com/api.php?action=query&list=querypage&qppage=Mostpopularcategories&qplimit=20
res = site.get(action='query', list='querypage', qppage='Mostpopularcategories', qplimit=categories_limit)
categories = [result['title'] for result in res['query']['querypage']['results']]
for category in categories:
# get first X pages from the category
# http://muppet.sandbox-s6.wikia.com/api.php?action=query&list=categorymembers&cmtitle=Category:Sesame%20Street%20Episodes&cmlimit=50
res = site.get(action='query', list='categorymembers', cmtitle='Category:{}'.format(category), cmlimit=articles_limit)
for page in res['query']['categorymembers']:
# we're interested in main namespace articles one
if page['ns'] == 0:
yield page['title'], category
def get_infobox_arguments(site, title):
"""
:type site Site
:type title str
:rtype: list[str]
"""
logger = logging.getLogger('get_infobox_arguments')
logger.info('Article: %s', title)
# https://nfs.sandbox-s6.fandom.com/wikia.php?controller=TemplatesApiController&method=getMetadata&title=Ferrari_355_F1
res = json.loads(site.raw_call(
http_method='GET',
script='wikia',
data={
'controller': 'TemplatesApiController',
'method': 'getMetadata',
'title': title
}
))
infoboxes = [template for template in res['templates'] if template['type'] == 'infobox']
# print(infoboxes)
# return a set of template arguments used on a given article
arguments = set()
for infobox in infoboxes:
arguments.update(infobox['parameters'].keys())
return arguments
def arguments_to_categories(wikis, env=None, proxy=None):
"""
:type wikis list[str]
:type env str
:type proxy str
:rtype: dict
"""
logger = logging.getLogger('arguments_to_categories')
# apply the environment
if env:
wikis = [re.sub(r'\.(wikia|fandom)', '.{}.\\1'.format(env), wiki) for wiki in wikis]
logger.info('Gathering stats for %s domains', wikis)
# we will emit results as (template argument) => (a set of article categories where this argument is used)
res = defaultdict(set)
# set up connection to MediaWiki backend via our internal proxy
pool = requests.Session()
if proxy:
logger.info('Using HTTP proxy: %s', proxy)
pool.proxies = {'http': proxy}
# gather statistics for each wiki
for wiki in wikis:
site = Site(host=('http', wiki), path='/', pool=pool)
# process each article
for article, category in get_articles_from_top_categories(site):
# update each template argument found with a category where this article is in
for argument in get_infobox_arguments(site, article):
res[argument].add(category)
return res
if __name__ == '__main__':
mapping = arguments_to_categories(
wikis=[
'muppet.wikia.com',
'nfs.fandom.com',
'gta.wikia.com',
],
env='sandbox-s6',
proxy='border-http-s3:80'
)
for arg, items in mapping.items():
print('{} -> {}'.format(
arg, items))
| python |
"""
BaMi_optimal.py - compares BaMiC with BaMiF and includes the (according to us) optimal integration strategies.
"""
import sys
import matplotlib.pyplot as plt
from pywmi.engines.xsdd.literals import LiteralInfo
from _pywmi.vtree.bottomup_elimination import bottomup_balanced_minfill as bamif
from _pywmi.vtree.topdown_balanced_mincut import topdown_balanced_mincut_hg as bamic
from _pywmi.vtree.int_tree import *
from _pywmi.vtree.topdown_mincut import conversion_tables
from _pywmi.experiment import *
from _pywmi.problems import *
from pywmi.engines.pyxadd.algebra import PyXaddAlgebra
full_reduce = True
reduce_strategy = PyXaddAlgebra.FULL_REDUCE if full_reduce else PyXaddAlgebra.ONLY_INIT_INTEGRATION_REDUCE
all_strats = [bamic,
bamif]
xadd = lambda: PyXaddAlgebra(reduce_strategy=reduce_strategy)
# %%
tpg_star_gen = lambda n: make_from_graph(tpg_star(n))
tpg_3ary_gen = lambda n: make_from_graph(tpg_3ary_tree(n))
tpg_path_gen = lambda n: make_from_graph(tpg_path(n))
# %%
size_range = list(range(3, 41))
env_timeout.set(50)
ordered = False
algebra = xadd
verbose = False
sys.setrecursionlimit(10**6)
# %%
def splitpath_int_vtree_gen(literal_info: LiteralInfo):
""" Creates an integration order in a split path form x0 - x1 - x2 - x3 - ... """
logic2cont, cont2logic = conversion_tables(literal_info)
cont_vars = sorted(list(cont2logic.keys()), key=lambda n: int(n[1:]))
assert len(cont_vars) >= 3
middle_index = math.floor(len(cont_vars)/2)
# Create left line
left_int_tree = IntTreeVar(cont_vars[0])
for cont in cont_vars[1:middle_index]:
left_int_tree = IntTreeLine(cont, left_int_tree)
# Create right line
right_int_tree = IntTreeVar(cont_vars[-1])
for cont in reversed(cont_vars[middle_index+1:-1]):
right_int_tree = IntTreeLine(cont, right_int_tree)
# Middle split
int_tree = IntTreeSplit(cont_vars[middle_index], left_int_tree, right_int_tree)
return int_tree.create_vtree(logic2cont.keys(), logic2cont)
def star_int_vtree_gen(literal_info: LiteralInfo):
""" Creates an integration order for problems with a star primal (star, xor, mutex). """
logic2cont, cont2logic = conversion_tables(literal_info)
middle_var, _ = max(cont2logic.items(), key=lambda x: len(x[1]))
other_vars_int_trees = [IntTreeVar(v) for v in cont2logic.keys() if v != middle_var]
if len(other_vars_int_trees) != 0:
int_tree = IntTreeParallel(middle_var, other_vars_int_trees)
else:
int_tree = IntTreeVar(middle_var)
return int_tree.create_vtree(logic2cont.keys(), logic2cont)
def dual_int_vtree_gen(literal_info: LiteralInfo):
""" Creates an integration order for the dual problem. """
logic2cont, cont2logic = conversion_tables(literal_info)
cont_pairs = [list(pair) for pair in logic2cont.values() if len(pair) == 2]
int_pairs = [IntTreeLine(x[0], IntTreeVar(x[1])) for x in cont_pairs]
int_tree = IntTreeParallel(None, int_pairs)
return int_tree.create_vtree(logic2cont.keys(), logic2cont)
# %%
# DUAL
all_strats.append(dual_int_vtree_gen)
dual_exp = CompareStrategies(
algebra=algebra,
problem_generator=dual,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished dual_exp")
all_strats.pop()
# XOR
all_strats.append(star_int_vtree_gen)
xor_exp = CompareStrategies(
algebra=algebra,
problem_generator=xor,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished xor_exp")
all_strats.pop()
# MUTEX
all_strats.append(star_int_vtree_gen)
mutex_exp = CompareStrategies(
algebra=algebra,
problem_generator=mutual_exclusive,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished mutex_exp")
all_strats.pop()
# STAR
all_strats.append(star_int_vtree_gen)
tpg_star_exp = CompareStrategies(
algebra=algebra,
problem_generator=tpg_star_gen,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished star_exp")
all_strats.pop()
# 3ARY
all_strats.append(bamif) # TODO: Optimal strategy
tpg_3ary_exp = CompareStrategies(
algebra=algebra,
problem_generator=tpg_3ary_gen,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished 3ary_exp")
all_strats.pop()
# PATH
all_strats.append(splitpath_int_vtree_gen)
tpg_path_exp = CompareStrategies(
algebra=algebra,
problem_generator=tpg_path_gen,
size=size_range,
vtree_strategy=all_strats,
verbose=verbose,
ordered=ordered,
)
print("Finished path_exp")
all_strats.pop()
# %% md
# Graph
# %%
all_data = [
('dual', dual_exp),
('xor', xor_exp),
('mutex', mutex_exp),
('pg-star', tpg_star_exp),
('pg-3ary', tpg_3ary_exp),
('pg-path', tpg_path_exp)
]
vtree_heuristics = [
#('implicit-balanced', 'black', '+'),
#('implicit-leftlinear', 'green', 'o'),
#('implicit-rightlinear', 'purple', 's'),
('balanced-mincut', 'red', '.'),
('balanced-minfill', 'blue', ','),
('optimal', 'green', 'x')
]
# %%
from matplotlib.ticker import MaxNLocator
fig, axes = plt.subplots(2, 3)
fig.set_size_inches(9, 6)
fig.subplots_adjust(bottom=0.14, wspace=0.3, hspace=0.3)
for i, (name, exp) in enumerate(all_data):
i1 = i // 3
i2 = i % 3
ax = axes[i1][i2]
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
for (strat_name, color, marker), (_, times) in zip(vtree_heuristics, exp.all_experiments()):
# vtree_times = list(times.get_all_results('vtree_time'))
total_times = list(times.get_all_results('total_time'))
sizes = times.values[:len(total_times)]
ax.plot(sizes, total_times, color=color, marker=marker, linestyle='-', label=strat_name)
# ax.plot(sizes, vtree_times, color=color, marker='o', linestyle='--')
if i1 != 1:
ax.set_xlabel(None)
else:
ax.set_xlabel("Problem size (n)")
if i2 == 0:
ax.set_ylabel("Time (s)")
else:
ax.set_ylabel(None)
ax.set_title(f"{name}(n)")
# Bug: fig.legend not included in pdf
ax.legend(loc='lower center', ncol=2,
bbox_to_anchor=(0.5, -0.04), bbox_transform=fig.transFigure)
# %%
filename = 'bami_comparison'
if ordered:
filename += '-ordered'
if algebra == xadd:
filename += '-xadd'
filename += '-full' if full_reduce else '-init'
fig.savefig(filename + '.pdf', bbox_inches='tight')
# %%
| python |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Asr Model."""
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import cluster_factory
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.core import summary_utils
from lingvo.core import test_helper
from lingvo.core import test_utils
from lingvo.tasks.asr import decoder
from lingvo.tasks.asr import input_generator
from lingvo.tasks.asr import model
from lingvo.tasks.asr import model_test_input_generator as tig
import numpy as np
class DecoderForTest(decoder.AsrDecoder):
"""Unit test class for AsrDecoder with functional.for based unrolling."""
@classmethod
def Params(cls):
p = super(DecoderForTest, cls).Params()
p.use_while_loop_based_unrolling = False
return p
class AsrModelTest(test_utils.TestCase):
def _testParams(self):
input_shape = [2, 16, 8, 3]
p = model.AsrModel.Params()
p.decoder.target_seq_len = 5
p.encoder.input_shape = input_shape
p.input = tig.TestInputGenerator.Params()
p.input.target_max_length = 5
p.input.source_shape = input_shape
p.input.target_shape = [2, 5]
p.name = 'test_mdl'
return p
def testMakeDecoderTheta(self):
# Test that decoder theta returns a copy of theta.decoder without changes.
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
decoder_theta = mdl._MakeDecoderTheta(theta=mdl.theta, input_batch=None)
mdl.BProp()
self.assertEqual(decoder_theta, mdl.theta.decoder)
def testFProp(self):
with self.session(use_gpu=False):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
self.evaluate(tf.global_variables_initializer())
test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())
actual_var_names = [_.name for _ in tf.trainable_variables()]
print('all vars \n', '\n'.join(actual_var_names))
expected_var_names = [
'test_mdl/enc/conv_L0/w/var:0',
'test_mdl/enc/conv_L0/beta/var:0',
'test_mdl/enc/conv_L0/gamma/var:0',
'test_mdl/enc/conv_L1/w/var:0',
'test_mdl/enc/conv_L1/beta/var:0',
'test_mdl/enc/conv_L1/gamma/var:0',
'test_mdl/enc/f_conv_lstm_0/wm/var:0',
'test_mdl/enc/f_conv_lstm_0/b/var:0',
'test_mdl/enc/b_conv_lstm_0/wm/var:0',
'test_mdl/enc/b_conv_lstm_0/b/var:0',
'test_mdl/enc/conv_lstm_cnn_0/w/var:0',
'test_mdl/enc/conv_lstm_cnn_0/beta/var:0',
'test_mdl/enc/conv_lstm_cnn_0/gamma/var:0',
'test_mdl/enc/fwd_rnn_L0/wm/var:0',
'test_mdl/enc/fwd_rnn_L0/b/var:0',
'test_mdl/enc/bak_rnn_L0/wm/var:0',
'test_mdl/enc/bak_rnn_L0/b/var:0',
'test_mdl/enc/proj_L0/w/var:0',
'test_mdl/enc/proj_L0/beta/var:0',
'test_mdl/enc/proj_L0/gamma/var:0',
'test_mdl/enc/fwd_rnn_L1/wm/var:0',
'test_mdl/enc/fwd_rnn_L1/b/var:0',
'test_mdl/enc/bak_rnn_L1/wm/var:0',
'test_mdl/enc/bak_rnn_L1/b/var:0',
'test_mdl/enc/proj_L1/w/var:0',
'test_mdl/enc/proj_L1/beta/var:0',
'test_mdl/enc/proj_L1/gamma/var:0',
'test_mdl/enc/fwd_rnn_L2/wm/var:0',
'test_mdl/enc/fwd_rnn_L2/b/var:0',
'test_mdl/enc/bak_rnn_L2/wm/var:0',
'test_mdl/enc/bak_rnn_L2/b/var:0',
'test_mdl/dec/emb/var_0/var:0',
'test_mdl/dec/rnn_cell/wm/var:0',
'test_mdl/dec/rnn_cell/b/var:0',
'test_mdl/dec/atten/source_var/var:0',
'test_mdl/dec/atten/query_var/var:0',
'test_mdl/dec/atten/hidden_var/var:0',
'test_mdl/dec/softmax/weight_0/var:0',
'test_mdl/dec/softmax/bias_0/var:0',
]
self.assertCountEqual(expected_var_names, actual_var_names)
def testDecode(self):
with self.session(use_gpu=False):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
input_batch = mdl.input_generator.GetPreprocessedInputBatch()
dec_out_dict = mdl.DecodeWithTheta(mdl.theta, input_batch)
self.evaluate(tf.global_variables_initializer())
dec_out = self.evaluate(dec_out_dict)
print('dec_out', dec_out)
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(dec_out, metrics_dict)
self.assertEqual(1.0, metrics_dict['wer'].value)
self.assertEqual(1.0, metrics_dict['norm_wer'].value)
self.assertEqual(1.0, metrics_dict['ter'].value)
self.assertEqual(0, len(key_value_pairs))
def testPostProcessDecodeOut(self):
p = self._testParams()
p.decoder.beam_search.num_hyps_per_beam = 2
mdl = p.Instantiate()
fake_dec_out = {
'utt_id': ['utt1', 'utt2'],
'transcripts': ['a b c d', 'a'],
'topk_decoded': [['a b c d', 'a b c d'], ['wrong', '']],
'topk_scores': [[1.0, 0.9], [1.0, 0.9]],
'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]],
'topk_lens': [2, 4, 4, 2],
'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],
'target_paddings': [[0, 0, 0, 1], [0, 0, 0, 1]],
'norm_wer_errors': [[0, 0], [1, 1]],
'norm_wer_words': [[4, 4], [1, 1]],
}
fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()}
metrics_dict = mdl.CreateDecoderMetrics()
key_value_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)
self.assertEqual(0 + 1, metrics_dict['wer'].total_value)
self.assertEqual(4 + 1, metrics_dict['wer'].total_weight)
self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)
self.assertEqual(4 + 1, metrics_dict['norm_wer'].total_weight)
self.assertEqual(4, metrics_dict['ter'].total_value)
self.assertEqual(6, metrics_dict['ter'].total_weight)
self.assertEqual(2, metrics_dict['num_samples_in_batch'].total_value)
self.assertEqual(1.0, metrics_dict['num_samples_in_batch'].total_weight)
self.assertEqual((4 / 5 * 3 / 3 * 2 / 2 * 1 / 1)**(1 / 4),
metrics_dict['corpus_bleu'].value)
self.assertEqual((0 + 1) / 2, metrics_dict['sacc'].value)
self.assertEqual((0 + 1) / (4 + 1), metrics_dict['oracle_norm_wer'].value)
self.assertEqual(0, len(key_value_pairs))
def testPostProcessDecodeOutFiltersEpsilonTokensForWER(self):
p = self._testParams()
p.decoder.beam_search.num_hyps_per_beam = 1
mdl = p.Instantiate()
fake_dec_out = {
'utt_id': ['utt1', 'utt2'],
'transcripts': ['a b c d', 'a b c'],
'topk_decoded': [['a b<epsilon>c d'], ['<epsilon>a b<epsilon>']],
'topk_scores': [[1.0], [1.0]],
'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],
'topk_lens': [3, 4],
'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],
'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],
'norm_wer_errors': [[0], [1]],
'norm_wer_words': [[4], [3]],
}
fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()}
metrics_dict = mdl.CreateDecoderMetrics()
kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)
self.assertEqual(0 + 1, metrics_dict['wer'].total_value)
self.assertEqual(7, metrics_dict['wer'].total_weight)
self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)
self.assertEqual(7, metrics_dict['norm_wer'].total_weight)
self.assertEqual(0, len(kv_pairs))
def testPostProcessDecodeOutFiltersNoiseTokensForWER(self):
p = self._testParams()
p.decoder.beam_search.num_hyps_per_beam = 1
mdl = p.Instantiate()
fake_dec_out = {
'utt_id': ['utt1', 'utt2'],
'transcripts': ['a b c d', 'a b c'],
'topk_decoded': [['a b <noise> c d'], ['<noise> a b <noise>']],
'topk_scores': [[1.0], [1.0]],
'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],
'topk_lens': [3, 4],
'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],
'target_paddings': [[0, 0, 0, 1], [0, 0, 1, 1]],
'norm_wer_errors': [[0], [1]],
'norm_wer_words': [[4], [3]],
}
fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()}
metrics_dict = mdl.CreateDecoderMetrics()
kv_pairs = mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)
self.assertEqual(0 + 1, metrics_dict['wer'].total_value)
self.assertEqual(7, metrics_dict['wer'].total_weight)
self.assertEqual(0 + 1, metrics_dict['norm_wer'].total_value)
self.assertEqual(7, metrics_dict['norm_wer'].total_weight)
self.assertEqual(0, len(kv_pairs))
def testPostProcessDecodeOutHandlesEmptyRef(self):
p = self._testParams()
p.decoder.beam_search.num_hyps_per_beam = 1
mdl = p.Instantiate()
fake_dec_out = {
'utt_id': ['utt1', 'utt2'],
'transcripts': ['', 'a b c d'],
'topk_decoded': [['a'], ['a b c d']],
'topk_scores': [[1.0], [1.0]],
'topk_ids': [[1, 2, 3, 4], [2, 3, 4, 5]],
'topk_lens': [3, 4],
'target_labels': [[1, 2, 3, 4], [2, 3, 4, 5]],
'target_paddings': [[1, 1, 1, 1], [0, 0, 1, 1]],
'norm_wer_errors': [[1], [0]],
'norm_wer_words': [[0], [4]],
}
fake_dec_out = {k: np.array(v) for k, v in fake_dec_out.items()}
metrics_dict = mdl.CreateDecoderMetrics()
mdl.PostProcessDecodeOut(fake_dec_out, metrics_dict)
self.assertEqual(1 + 0, metrics_dict['wer'].total_value)
self.assertEqual(0 + 4, metrics_dict['wer'].total_weight)
self.assertEqual(1 + 0, metrics_dict['norm_wer'].total_value)
self.assertEqual(0 + 4, metrics_dict['norm_wer'].total_weight)
def testBProp(self):
with self.session(use_gpu=False):
tf.random.set_seed(93820985)
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())
mdl.train_op.run()
def testBPropSmoothDecay(self):
with self.session(use_gpu=False):
tf.random.set_seed(93820985)
p = self._testParams()
p.train.lr_schedule = (
schedule.ContinuousSchedule.Params().Set(
start_step=350000, half_life_steps=45000))
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
mdl.BProp()
self.evaluate(tf.global_variables_initializer())
test_utils.CompareToGoldenSingleFloat(self, 4.472597, mdl.loss.eval())
mdl.train_op.run()
def testAllLayerParams(self):
with self.session(use_gpu=False, graph=tf.Graph()):
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
lps = base_layer.RecursiveFindLayerParams(mdl.params)
l_names = sorted([p.cls.__name__ for p in lps])
expected_layers = sorted([
'Adam',
'AdditiveAttention',
'AsciiTokenizer',
'AsrDecoder',
'AsrEncoder',
'AsrModel',
'BatchNormLayer',
'BeamSearchHelper',
'GreedySearchHelper',
'TargetSequenceSampler',
'ConvLSTMCell',
'Conv2DLayer',
'Conv2DLayer',
'EmbeddingLayer',
'HighwaySkipLayer',
'LSTMCellSimple',
'LSTMCellSimple',
'NullContextualizer',
'NullFusion',
'NullLm',
'Learner',
'PiecewiseConstantSchedule',
'ProjectionLayer',
'SimpleFullSoftmax',
'SpectrumAugmenter',
'StackingOverTime',
'TestInputGenerator',
])
self.assertEqual(expected_layers, l_names)
def testParamValueSumSquared(self):
with self.session(use_gpu=False, graph=tf.Graph()):
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
all_vars = tf.trainable_variables()
py_utils.SumSquared(all_vars)
def testCollectVarHistogram(self):
with self.session(use_gpu=False, graph=tf.Graph()):
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)
summary_utils.CollectVarHistogram(var_grads)
def testGradientMult(self):
with self.session(use_gpu=False, graph=tf.Graph()):
p = self._testParams()
mdl = p.Instantiate()
mdl.FPropDefaultTheta()
var_grads = py_utils.ComputeGradients(mdl.loss, mdl.vars)
py_utils.ApplyGradMultiplier(var_grads, -1.1)
def testLRDecay(self):
with self.session(use_gpu=False, graph=tf.Graph()):
p = self._testParams()
tp = p.train
tp.lr_schedule.boundaries = [300000, 400000, 500000]
tp.lr_schedule.values = [1.0, 0.1, 0.01, 0.001]
lrs = tp.lr_schedule.Instantiate()
steps = [299999, 300001, 399999, 400001, 499999, 500001]
fetches = [lrs.Value(_) for _ in steps]
values = self.evaluate(fetches)
self.assertAllClose([1.0, 0.1, 0.1, 0.01, 0.01, 0.001], values)
def testBatchSplit(self):
def Run(num_splits):
p = self._testParams()
with self.session(use_gpu=False, graph=tf.Graph()):
tf.random.set_seed(93820981)
p.input.cur_iter_in_seed = False
p.input.bucket_batch_limit = [
b * 2 / num_splits for b in p.input.bucket_batch_limit
]
with cluster_factory.ForTestingWorker(gpus=num_splits, do_eval=True):
mdl = p.Instantiate()
metrics = mdl.FPropDefaultTheta()[0]
self.evaluate(tf.global_variables_initializer())
return self.evaluate(metrics['loss'])
res1, res2 = Run(1), Run(2)
self.assertAllClose(res1[0], res2[0])
self.assertAllEqual(res1[1], res2[1])
def testInference(self):
def _CreateModelParamsForTest():
p = model.AsrModel.Params()
p.name = 'test_config'
# Encoder params.
ep = p.encoder
ep.input_shape = [None, None, 80, 1]
ep.lstm_cell_size = 16
ep.num_lstm_layers = 2
ep.conv_filter_shapes = [(3, 3, 1, 32), (3, 3, 32, 32)]
ep.conv_filter_strides = [(2, 2), (2, 2)]
ep.num_conv_lstm_layers = 0
# Initialize decoder params.
dp = p.decoder
dp.rnn_cell_dim = 16
dp.rnn_layers = 2
dp.source_dim = ep.lstm_cell_size * 2
# Use functional while based unrolling.
dp.use_while_loop_based_unrolling = False
p.input = input_generator.AsrInput.Params()
ip = p.input
ip.frame_size = 80
ip.append_eos_frame = True
ip.pad_to_max_seq_length = False
return p
with self.session(
use_gpu=False, graph=tf.Graph()) as sess, self.SetEval(True):
p = _CreateModelParamsForTest()
mdl = p.Instantiate()
subgraphs = mdl.Inference()
self.assertIn('default', subgraphs)
fetches, feeds = subgraphs['default']
self.assertIn('wav', feeds)
for name in ['hypotheses', 'scores', 'src_frames', 'encoder_frames']:
self.assertIn(name, fetches)
with open(
test_helper.test_src_dir_path('tools/testdata/gan_or_vae.16k.wav'),
'rb') as f:
wav = f.read()
self.evaluate(tf.global_variables_initializer())
fetches = sess.run(fetches, {feeds['wav']: wav})
self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),
fetches['hypotheses'].shape)
self.assertAllEqual((1, p.decoder.beam_search.num_hyps_per_beam),
fetches['scores'].shape)
self.assertAllEqual((1, 314, p.encoder.input_shape[2], 1),
fetches['src_frames'].shape)
self.assertAllEqual((80, 1, 2 * p.encoder.lstm_cell_size),
fetches['encoder_frames'].shape)
if __name__ == '__main__':
tf.test.main()
| python |
#!/usr/bin/env python3
# Copyright 2021 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.from setuptools import setup
from requests.auth import HTTPBasicAuth
from common import *
import json
import requests
from datetime import datetime
from collections import OrderedDict
username_column_encoded = base64.b64encode(bytes(cf_name + ":" + username_column, 'utf-8'))
message_column_encoded = base64.b64encode(bytes(cf_name + ":" + message_column, 'utf-8'))
created_time_column_encoded = base64.b64encode(bytes(cf_name + ":" + created_time, 'utf-8'))
# Delete table if it exists
request = requests.get(baseurl + "/" + table_name + "/schema",
auth=HTTPBasicAuth(DB_USER, DB_PASS))
if is_successful(request):
request = requests.delete(baseurl + "/" + table_name + "/schema",
auth=HTTPBasicAuth(DB_USER, DB_PASS))
if is_successful(request):
print("Deleted table " + table_name)
else:
print("Error out. Status code was " + str(request.status_code) + "\n" + request.text)
# Create Table
content = '<?xml version="1.0" encoding="UTF-8"?>'
content += '<TableSchema name="' + table_name + '">'
content += ' <ColumnSchema name="' + cf_name + '" />'
content += '</TableSchema>'
request = requests.post(baseurl + "/" + table_name + "/schema", data=content,
headers={"Content-Type": "text/xml", "Accept": "text/xml"},
auth=HTTPBasicAuth(DB_USER, DB_PASS))
if is_successful(request):
print("Created table " + table_name)
else:
print("Error out while creating table. Status code was " + str(request.status_code) + "\n" + request.text)
quit()
def get_current_time():
now = datetime.now() # current date and time
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
return date_time
rows = []
jsonOutput = {"Row": rows}
print("Writing data to " + table_name)
for i in range(0, 20):
rowKey = username + "-" + str(i)
rowKeyEncoded = base64.b64encode(bytes(rowKey, 'utf-8'))
usernameEncoded = base64.b64encode(bytes(username + "-" + str(i), 'utf-8'))
currentTime = get_current_time()
currentTimeEncoded = base64.b64encode(bytes(currentTime, 'utf-8'))
testMessage = "test message" + str(i)
testMessageEncoded = base64.b64encode(bytes(testMessage, 'utf-8'))
cell = OrderedDict([
("key", rowKeyEncoded.decode('utf-8')),
("Cell",
[
{"column": message_column_encoded.decode('utf-8'), "$": testMessageEncoded.decode('utf-8')},
{"column": username_column_encoded.decode('utf-8'), "$": usernameEncoded.decode('utf-8')},
{"column": created_time_column_encoded.decode('utf-8'), "$": currentTimeEncoded.decode('utf-8')},
])
])
print("Row key: " + rowKey + "; Username: " +
rowKey + "; " + "Message: " + testMessage + "; Created time: " + currentTime)
rows.append(cell)
request = requests.post(baseurl + "/" + table_name + "/" + rowKey, data=json.dumps(jsonOutput),
headers={"Content-Type": "application/json", "Accept": "application/json"},
auth=HTTPBasicAuth(DB_USER, DB_PASS))
if is_successful(request):
print("Successfully added messages for " + table_name)
else:
print("Error out while loading data. Status code was " + str(request.status_code) + "\n" + request.text)
quit()
| python |
# by amounra 0216 : http://www.aumhaa.com
# written against Live 9.6 release on 021516
from __future__ import absolute_import, print_function
import Live
import math
from ableton.v2.base import inject, listens
from ableton.v2.control_surface import ControlSurface, ControlElement, Layer, Skin, PrioritizedResource, Component, ClipCreator, DeviceBankRegistry
from ableton.v2.control_surface.elements import ButtonMatrixElement
from ableton.v2.control_surface.components import M4LInterfaceComponent, SessionRingComponent, SessionNavigationComponent, SessionComponent, TransportComponent, DeviceComponent, ViewControlComponent
from ableton.v2.control_surface.components.mixer import simple_track_assigner
from aumhaa.v2.base import initialize_debug
from aumhaa.v2.control_surface import SendLividSysexMode
from aumhaa.v2.control_surface.elements import MonoEncoderElement, MonoBridgeElement
from aumhaa.v2.control_surface.elements.mono_button import *
from aumhaa.v2.control_surface.components import DeviceNavigator, MonoMixerComponent
from aumhaa.v2.livid import LividControlSurface, LividRGB
from .Map import *
debug = initialize_debug()
MIDI_NOTE_TYPE = 0
MIDI_CC_TYPE = 1
MIDI_PB_TYPE = 2
MIDI_MSG_TYPES = (MIDI_NOTE_TYPE, MIDI_CC_TYPE, MIDI_PB_TYPE)
MIDI_NOTE_ON_STATUS = 144
MIDI_NOTE_OFF_STATUS = 128
MIDI_CC_STATUS = 176
MIDI_PB_STATUS = 224
class GuitarWing(LividControlSurface):
_sysex_id = 20
_model_name = 'GuitarWing'
def __init__(self, *a, **k):
super(GuitarWing, self).__init__(*a, **k)
self._skin = Skin(GuitarWingColors)
with self.component_guard():
self._setup_controls()
self._setup_m4l_interface()
self._setup_session_control()
self._setup_mixer_control()
self._setup_device_control()
self._setup_transport_control()
self._setup_view_control()
def _setup_controls(self):
is_momentary = True
optimized = True
resource = PrioritizedResource
self._button = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = BUTTONS[index], name = 'Button_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(10)]
self._fader = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = SLIDERS[index], name = 'Fader_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)]
self._fader_button = [MonoEncoderElement(msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = SLIDERS[index], name = 'Fader_Button_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)]
self._ccs = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = CCS[index], name = 'CCs_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(4)]
self._pad = [MonoButtonElement(is_momentary = is_momentary, msg_type = MIDI_NOTE_TYPE, channel = CHANNEL, identifier = PADS[index], name = 'Pad_' + str(index), script = self, skin = self._skin, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(5)]
self._padCC = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = PADS[index], name = 'PadCC_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(5)]
self._accel = [MonoEncoderElement(msg_type = MIDI_CC_TYPE, channel = CHANNEL, identifier = ACCELS[index], name = 'Accel_' + str(index), num = index, script = self, optimized_send_midi = optimized, resource_type = resource, monobridge = self._monobridge) for index in range(3)]
self._parameter_control_matrix = ButtonMatrixElement(rows = [ [ self._fader[0], self._fader[1], self._fader[2], self._accel[2], self._ccs[0], self._ccs[1], self._ccs[2], self._ccs[3] ]])
self._scene_launch_matrix = ButtonMatrixElement(rows = [self._pad[:4]])
def _setup_session_control(self):
self._session_ring = SessionRingComponent(num_tracks = 1, num_scenes = 4, tracks_to_use = lambda : self.song.visible_tracks + self.song.return_tracks)
self._session_ring.set_enabled(False)
self._session = SessionComponent(session_ring = self._session_ring, auto_name = True)
hasattr(self._session, '_enable_skinning') and self._session._enable_skinning()
self._session.layer = Layer(scene_launch_buttons = self._scene_launch_matrix)
self._session_navigation =SessionNavigationComponent(name = 'SessionNavigation', session_ring = self._session_ring)
self._session_navigation._horizontal_banking.scroll_up_button.color = 'Session.NavigationButtonOn'
self._session_navigation._horizontal_banking.scroll_down_button.color = 'Session.NavigationButtonOn'
self._session_navigation.layer = Layer(left_button = self._button[1], right_button = self._button[0])
self._session_navigation.set_enabled(True)
def _setup_mixer_control(self):
self._mixer = MonoMixerComponent(name = 'Mixer', tracks_provider = self._session_ring, track_assigner = simple_track_assigner, invert_mute_feedback = True, auto_name = True, enable_skinning = True)
self.song.view.selected_track = self._mixer.channel_strip(0)._track
def _setup_transport_control(self):
self._transport = TransportComponent()
self._transport.layer = Layer(play_button = self._button[6],
loop_button = self._button[7],
seek_backward_button = self._button[8],
record_button = self._button[9])
self._transport.set_enabled(True)
def _setup_device_control(self):
self._device = DeviceComponent(name = 'Device_Component', device_provider = self._device_provider, device_bank_registry = DeviceBankRegistry())
self._device.layer = Layer(parameter_controls = self._parameter_control_matrix)
self._device.set_enabled(True)
def _setup_m4l_interface(self):
self._m4l_interface = M4LInterfaceComponent(controls=self.controls, component_guard=self.component_guard)
self.get_control_names = self._m4l_interface.get_control_names
self.get_control = self._m4l_interface.get_control
self.grab_control = self._m4l_interface.grab_control
self.release_control = self._m4l_interface.release_control
def _setup_view_control(self):
self._view_control = ViewControlComponent()
self._view_control.layer = Layer(prev_track_button = self._button[1], next_track_button = self._button[0])
# a | python |
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
"""Collection of utility functions for Matplotlib."""
from typing import Any, Dict, List, Optional, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
from descartes.patch import PolygonPatch
from matplotlib.animation import FuncAnimation
from matplotlib.lines import Line2D
from shapely.geometry import LineString, Polygon
def draw_polygon_mpl(
ax: plt.Axes, polygon: np.ndarray, color: Union[Tuple[float, float, float], str], linewidth: Optional[float] = None
) -> None:
"""Draw a polygon.
The polygon's first and last point must be the same (repeated).
Args:
ax: Matplotlib axes instance to draw on
polygon: Array of shape (N, 2) or (N, 3)
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
"""
if linewidth is None:
ax.plot(polygon[:, 0], polygon[:, 1], color=color)
else:
ax.plot(polygon[:, 0], polygon[:, 1], color=color, linewidth=linewidth)
def draw_polygonpatch_matplotlib(points: Any, color: Union[Tuple[float, float, float], str]) -> None:
"""Draw a PolygonPatch.
Args:
points: Unused argument
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
"""
fig = plt.figure(1, figsize=(10, 10), dpi=90)
ax = fig.add_subplot(111)
ext = [(0, 0), (0, 0.5), (0.5, 0.5), (0.5, 0), (0, 0)]
int = [(0.2, 0.3), (0.3, 0.3), (0.3, 0.4), (0.2, 0.4)]
polygon = Polygon(ext, [int])
patch = PolygonPatch(polygon, facecolor=color, alpha=0.5, zorder=2)
ax.add_patch(patch)
def draw_lane_polygons(
ax: plt.Axes, lane_polygons: np.ndarray, color: Union[Tuple[float, float, float], str] = "y"
) -> None:
"""Draw a lane using polygons.
Args:
ax: Matplotlib axes
lane_polygons: Array of (N,) objects, where each object is a (M,3) array
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
"""
for i, polygon in enumerate(lane_polygons):
ax.plot(polygon[:, 0], polygon[:, 1], color=color, alpha=0.3, zorder=1)
def plot_bbox_2D(
ax: plt.Axes, pts: np.ndarray, color: Union[Tuple[float, float, float], str], linestyle: str = "-"
) -> None:
"""Draw a bounding box.
2D bbox vertices should be arranged as::
0----1
| |
2----3
i.e. the connectivity is 0->1, 1->3, 3->2, 2->0
Args:
ax: Matplotlib axes
pts: Array of shape (4, 2) representing the 4 points of the bounding box.
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
linestyle: The linestyle to use
"""
ax.plot(pts[0:2, 0], pts[0:2, 1], c=color, linestyle=linestyle)
ax.plot(pts[2:4, 0], pts[2:4, 1], c=color, linestyle=linestyle)
ax.plot(pts[np.array([1, 3]), 0], pts[np.array([1, 3]), 1], c=color, linestyle=linestyle)
ax.plot(pts[np.array([0, 2]), 0], pts[np.array([0, 2]), 1], c=color, linestyle=linestyle)
def animate_polyline(polyline: np.ndarray, axes_margin: int = 5, show_plot: bool = True) -> None:
"""Draw and animate a polyline on a plot.
Args:
polyline: Array of shape (N, 2) representing the points of the line
axes_margin: How much margin for the axes
show_plot: Whether to show the plot after rendering it
"""
xmin = np.amin(polyline[:, 0]) - axes_margin
xmax = np.amax(polyline[:, 0]) + axes_margin
ymin = np.amin(polyline[:, 1]) - axes_margin
ymax = np.amax(polyline[:, 1]) + axes_margin
fig, ax = plt.subplots()
xdata, ydata = [], []
(ln,) = plt.plot([], [], "ro", animated=True)
def init() -> Tuple[Line2D]:
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
return (ln,)
def update(frame: List[Any]) -> Tuple[Line2D]:
xdata.append(frame[0])
ydata.append(frame[1])
ln.set_data(xdata, ydata)
return (ln,)
ani = FuncAnimation(fig, update, frames=polyline, init_func=init, blit=True)
if show_plot:
plt.show()
def plot_lane_segment_patch(
polygon_pts: np.ndarray, ax: plt.Axes, color: Union[Tuple[float, float, float], str] = "y", alpha: float = 0.3
) -> None:
"""Plot a lane segment using a PolygonPatch.
Args:
polygon_pts: Array of shape (N, 2) representing the points of the polygon
ax: Matplotlib axes
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
alpha: the opacity of the lane segment
"""
polygon = Polygon(polygon_pts)
patch = PolygonPatch(polygon, facecolor=color, edgecolor=color, alpha=alpha, zorder=2)
ax.add_patch(patch)
def plot_nearby_centerlines(
lane_centerlines: Dict[Any, Any], ax: plt.Axes, nearby_lane_ids: List[int], color: Union[Tuple[int, int, int], str]
) -> None:
"""Plot centerlines.
Args:
lane_centerlines: Python dictionary where key is lane ID, value is object describing the lane
ax: Matplotlib axes
nearby_lane_ids: List of integers representing lane IDs
color: Tuple of shape (3,) representing the RGB color or a single character 3-tuple, e.g. 'b'
"""
for curr_lane_id in nearby_lane_ids:
centerline = lane_centerlines[curr_lane_id]["centerline"]
ax.plot(centerline[:, 0], centerline[:, 1], color=color, linestyle="--", alpha=0.4)
def visualize_centerline(centerline: LineString) -> None:
"""Visualize the computed centerline.
Args:
centerline: Sequence of coordinates forming the centerline
"""
line_coords = list(zip(*centerline))
lineX = line_coords[0]
lineY = line_coords[1]
plt.plot(lineX, lineY, "--", color="grey", alpha=1, linewidth=1, zorder=0)
plt.text(lineX[0], lineY[0], "s")
plt.text(lineX[-1], lineY[-1], "e")
plt.axis("equal")
| python |
from fastapi.testclient import TestClient
from app.main import app
client = TestClient(app)
def test_valid_input():
"""Return 200 Success when input is valid."""
response = client.post(
'/predict',
json={
'title': 'Water bike',
'blurb': 'A bike that floats',
'goal': '5000',
'launch_date': '08/06/2020',
'deadline': '10/20/2020',
'category': 'sports'
}
)
body = response.json()
def test_invalid_input():
"""Return 422 Validation Error when x1 is negative."""
response = client.post(
'/predict',
json={
'title': 'Water bike',
'blurb': 'A bike that floats',
'goal': '5000',
'launch_date': '08/06/2020',
'deadline': '10/20/2020',
'category': 'sports'
}
)
body = response.json()
| python |
encode,decode=lambda s:''.join(c//200*"🫂"+c%200//50*"💖"+c%50//10*"✨"+c%10//5*"🥺"+c%5*","+(c==0)*"❤️"+"👉👈"for c in s.encode()),lambda s:bytes([200*(c:=b.count)("🫂")+50*c("💖")+10*c("✨")+5*c("🥺")+c(",")for b in s.split("👉👈")[:-1]]).decode()
| python |
# -*- coding: utf-8 -*-
"""Sweep config interface."""
from .cfg import SweepConfig, schema_violations_from_proposed_config
from .schema import fill_validate_schema, fill_parameter, fill_validate_early_terminate
__all__ = [
"SweepConfig",
"schema_violations_from_proposed_config",
"fill_validate_schema",
"fill_parameter",
"fill_validate_early_terminate",
]
| python |
from typing import Callable
from fastapi import FastAPI
from app.db.init_db import init_db, create_engine
def create_startup_handler(app: FastAPI, db_url: str) -> Callable:
async def startup() -> None:
engine = create_engine(db_url)
await init_db(engine)
app.state.alchemy_engine = engine
return startup
def create_shutdown_handler(app: FastAPI) -> Callable:
async def shutdown() -> None:
await app.state.alchemy_engine.dispose()
return shutdown
| python |
__author__ = 'socialmoneydev'
from jsonBase import JsonBase
from programlimit import ProgramLimit
from programinterestrate import ProgramInterestRate
class ProgramChecking(JsonBase):
def isHashedPayload(self):
return True
def __init__(self):
self.category = None
self.type = None
self.balanceLimit = None
self.interestRates = []
self.isExternalWithdrawEnabled = None
self.isInterestEnabled = None
self.isRecurringContributionEnabled = None
self.perTransactionDepositLimit = None
self.perTransactionWithdrawLimit = None
def fromDict(self, dct, classDefs):
classDefs = classDefs or dict()
classDefs['interestRates'] = ProgramInterestRate
classDefs['perTransactionWithdrawLimit'] = ProgramLimit
classDefs['perTransactionDepositLimit'] = ProgramLimit
super(ProgramChecking, self).fromDict(dct, classDefs)
| python |
#!/usr/local/bin/python3.5 -u
answer = 1 + 7 * 7 - 8
print(answer)
| python |
__version__ = '0.1.5'
name = "drf_scaffold"
| python |
def count_prime_fuctors(n, c):
# count the number of primes in particular number
# argument `c` should be Counter class
if n<2:
return
m=n
i=2
while i<=m:
while m%i==0:
m//=i
c[i]+=1
i+=1
from collections import Counter
n=int(input())
d=Counter()
for i in range(1,n+1):
count_prime_fuctors(i,d)
ans=1
mod=10**9+7
for v in d.values():
ans*=v+1
ans%=mod
print(ans)
| python |
#from time import sleep
class SessionHelper():
def __init__(self, app):
self.app = app
def login(self, user_email, password):
driver = self.app.driver
self.app.open_page()
#driver.find_element_by_id("email").click()
driver.find_element_by_id("email").send_keys(user_email)
driver.find_element_by_id("password").send_keys(password)
# driver.find_element_by_id("password").click()
driver.find_element_by_xpath("//input[@value='SIGN IN']").click()
def logout(self):
driver = self.app.driver
driver.find_element_by_id("c1-user-text").click()
driver.find_element_by_id("c1-menu-logout").click()
#driver.getCurrentUrl()
def ensure_logout(self):
driver = self.app.driver
if self.is_logged_in():
self.logout()
def is_logged_in(self):
driver = self.app.driver
#sleep(1)
return len(driver.find_elements_by_id("c1-user-text")) > 0
def is_logged_in_as(self, user_email):
driver = self.app.driver
return driver.find_element_by_id("c1-user-text").get_attribute("title") == user_email
def ensure_login(self, user_email, password):
#driver = self.app.driver
if self.is_logged_in():
if self.is_logged_in_as(user_email):
return
else:
self.logout()
self.login(user_email, password) | python |
#!/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nmigen import Shape
from nmigen.hdl.rec import Record, DIR_FANIN, DIR_FANOUT
class _Endpoint:
"""Abstract base class for Sinks and Sources."""
def __init__(self, payload_type, name, src_loc_at):
self.payload_type = payload_type
self._record = Record([
("valid", Shape(), DIR_FANOUT),
("ready", Shape(), DIR_FANIN),
("last", Shape(), DIR_FANOUT),
("payload", payload_type, DIR_FANOUT),
], src_loc_at=2+src_loc_at, name=name)
self.valid = self._record.valid
self.ready = self._record.ready
self.last = self._record.last
self.payload = self._record.payload
def is_transferring(self):
"""Returns an expression that is true when a transfer takes place."""
return (self.valid & self.ready)
class Source(_Endpoint):
"""A stream source.
Parameters
----------
payload_type: Shape(N) or Layout
The payload transferred from this Source.
name: str
Base for signal names.
Attributes:
-----------
payload_type: Shape(N) or Layout
valid: Signal(1), out
ready: Signal(1), in
last: Signal(1), out
payload: Signal(N) or Record, out
"""
def __init__(self, payload_type, name=None, src_loc_at=0):
super().__init__(payload_type, name, src_loc_at)
def connect(self, sink):
"""Returns a list of statements that connects this source to a sink.
Parameters:
sink: This Sink to which to connect.
"""
assert isinstance(sink, Sink)
return self._record.connect(sink._record)
class Sink(_Endpoint):
"""A stream sink
Parameters
----------
payload: Signal(N) or Record
The payload transferred to this Sink.
name: str
Base for signal names.
Attributes:
-----------
payload_type: Shape(N) or Layout
valid: Signal(1), in
ready: Signal(1), out
last: Signal(1), in
payload: Signal(N) or Record, in
"""
def __init__(self, payload_type, name=None, src_loc_at=0):
super().__init__(payload_type, name, src_loc_at)
def glue_sources(source_in: Source, source_out: Source):
"""Combinatorially glues two sources together.
source_in is combinatorially glued to source_out. This is useful when
exposing a submodule's Source as part of the interface of the current
module.
The two sources must have identical payload types.
Parameters:
source_in:
The source that forms part of the submodule's interface.
source_out:
The source that forms part of the current module's interface.
Result:
A sequence of statements that connects the two sources.
"""
# Checking to catch simple mistakes
assert isinstance(source_in, Source)
assert isinstance(source_out, Source)
assert source_in.payload_type == source_out.payload_type
return [
source_in.ready.eq(source_out.ready),
source_out.valid.eq(source_in.valid),
source_out.last.eq(source_in.last),
source_out.payload.eq(source_in.payload),
]
def glue_sinks(sink_in: Sink, sink_out: Sink):
"""Combinatorially glues two sinks together.
sink_in is combinatorially glued to sink_out. This is useful when
exposing a submodule's Sink as part of the interface of the current
module.
The two sinks must have identical payload types.
Parameters:
sink_in:
The sink that forms part of the current module's interface.
sink_out:
The sink that forms part of the submodule's interface.
Result:
A sequence of statements that connects the two sinks.
"""
# Checking to catch simple mistakes
assert isinstance(sink_in, Sink)
assert isinstance(sink_out, Sink)
assert sink_in.payload_type == sink_out.payload_type
return [
sink_in.ready.eq(sink_out.ready),
sink_out.valid.eq(sink_in.valid),
sink_out.last.eq(sink_in.last),
sink_out.payload.eq(sink_in.payload),
]
| python |
# vim: set tabstop=4 shiftwidth=4 expandtab
##############################################################################
# Written by: Brian G. Merrell <[email protected]>
# Date: 12/03/2008
# Description: helpprovider.py wrapper script
# Used by the helpprovider-*.py tests
##############################################################################
'''Application wrapper for helpprovider.py'''
from strongwind import *
from helpers import *
import sys
# class to represent the main window.
class HelpProviderFrame(accessibles.Frame):
STREET_TIP = "Enter the street address in this text box."
CITY_TIP = "Enter the city here."
STATE_TIP = "Enter the state in this text box."
ZIP_TIP = "Enter the zip code here."
def __init__(self, accessible):
super(HelpProviderFrame, self).__init__(accessible)
self.text_boxes = self.findAllTexts(None)
try:
self.street_text_box = self.text_boxes[3]
self.city_text_box = self.text_boxes[2]
self.state_text_box = self.text_boxes[1]
self.zip_text_box = self.text_boxes[0]
except IndexError, e:
print "Could not find all the expected text boxes"
print e
sys.exit(1)
def assert_tooltip_appeared(self, message):
procedurelogger.action("Verify that a tooltip appears and that it has the correct message. Also verify that no other tooltip accessibles are found")
procedurelogger.expectedResult("Tooltip appears and reads: \"%s\"" % message)
# verify that we can only find one tooltip
tooltips = self.app.findAllToolTips(None)
assert len(tooltips) == 1, "Only one tooltip accessible should exist"
# verify that the tooltip has the message we expect
tooltip = tooltips[0]
assert tooltip.name == message, \
"The tooltip does not have the expected message"
# check the state of the tooltip just for fun
statesCheck(tooltip, "ToolTip")
def assert_descriptions(self):
# Make sure that the accessible description for each text box matches
# the tooltip message for that text box. This could be done from
# assert_tooltip_appeared, but this allows a lot of tests to run even
# if this assertion fails
for text_box in self.text_boxes:
procedurelogger.action("Click in %s" % text_box)
text_box.mouseClick()
self.keyCombo("F1")
sleep(config.SHORT_DELAY)
procedurelogger.expectedResult("A tooltip appears for %s" % \
text_box)
tooltip = self.app.findAllToolTips(None)[0]
#BUG487859, COMMENTING OUT TEST BECAUSE BUG IS AN ENHANCEMENT
#procedurelogger.action("Verify that the accessible description for the text box matches the text box's tooltip message.")
#procedurelogger.expectedResult("The accessible description \"%s\" matches the tooltip message \"%s\"" % (text_box.description, tooltip.name))
#assert text_box.description == tooltip.name
#END BUG487859
# close sample application after running the test
def quit(self):
self.altF4()
| python |
import warnings
import numpy as np
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from scipy.spatial import _distance_wrap
from scipy.linalg import norm
import MyTimer
_SIMPLE_CDIST = {}
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None):
timer = MyTimer.MyTimerCLS()
timer.refresh('enter cidst')
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
timer.refresh('asarray')
# The C code doesn't do striding.
XA = _copy_array_if_base_present(_convert_to_double(XA))
XB = _copy_array_if_base_present(_convert_to_double(XB))
timer.refresh('_copy_array_if_base_present')
s = XA.shape
sB = XB.shape
timer.refresh('get shape')
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
timer.refresh('error check')
mA = s[0]
mB = sB[0]
n = s[1]
timer.refresh('get dim')
dm = np.zeros((mA, mB), dtype=np.double)
timer.refresh(' np.zeros ')
if callable(metric):
if metric == minkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = minkowski(XA[i, :], XB[j, :], p)
elif metric == wminkowski:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = wminkowski(XA[i, :], XB[j, :], p, w)
elif metric == seuclidean:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = seuclidean(XA[i, :], XB[j, :], V)
elif metric == mahalanobis:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = mahalanobis(XA[i, :], XB[j, :], V)
else:
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i, :], XB[j, :])
timer.refresh(' if callable ')
print 'cool'
elif isinstance(metric, string_types):
mstr = metric.lower()
timer.refresh('else')
try:
validate, cdist_fn = _SIMPLE_CDIST[mstr]
XA = validate(XA)
XB = validate(XB)
cdist_fn(XA, XB, dm)
return dm
except KeyError:
pass
timer.refresh(' try')
if mstr in ['hamming', 'hamm', 'ha', 'h']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_hamming_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_hamming_wrap(XA, XB, dm)
elif mstr in ['jaccard', 'jacc', 'ja', 'j']:
if XA.dtype == bool:
XA = _convert_to_bool(XA)
XB = _convert_to_bool(XB)
_distance_wrap.cdist_jaccard_bool_wrap(XA, XB, dm)
else:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_distance_wrap.cdist_jaccard_wrap(XA, XB, dm)
elif mstr in ['minkowski', 'mi', 'm', 'pnorm']:
timer.refresh('before _convert_to_double')
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
timer.refresh('_convert_to_double')
_distance_wrap.cdist_minkowski_wrap(XA, XB, dm, p)
timer.refresh('after minkowski')
elif mstr in ['wminkowski', 'wmi', 'wm', 'wpnorm']:
timer.refresh('before _convert_to_double')
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
timer.refresh('_convert_to_double')
w = _convert_to_double(w)
_distance_wrap.cdist_weighted_minkowski_wrap(XA, XB, dm, p, w)
elif mstr in ['seuclidean', 'se', 's']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if V is not None:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must be '
'one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the '
'distances are computed.')
# The C code doesn't do striding.
VV = _copy_array_if_base_present(_convert_to_double(V))
else:
VV = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
_distance_wrap.cdist_seuclidean_wrap(XA, XB, VV, dm)
elif mstr in ['cosine', 'cos']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
_cosine_cdist(XA, XB, dm)
elif mstr in ['correlation', 'co']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
XA -= XA.mean(axis=1)[:, np.newaxis]
XB -= XB.mean(axis=1)[:, np.newaxis]
_cosine_cdist(XA, XB, dm)
elif mstr in ['mahalanobis', 'mahal', 'mah']:
XA = _convert_to_double(XA)
XB = _convert_to_double(XB)
if VI is not None:
VI = _convert_to_double(np.asarray(VI, order='c'))
VI = _copy_array_if_base_present(VI)
else:
m = mA + mB
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
X = np.vstack([XA, XB])
V = np.atleast_2d(np.cov(X.T))
del X
VI = np.linalg.inv(V).T.copy()
# (u-v)V^(-1)(u-v)^T
_distance_wrap.cdist_mahalanobis_wrap(XA, XB, VI, dm)
elif metric == 'test_euclidean':
dm = cdist(XA, XB, euclidean)
elif metric == 'test_seuclidean':
if V is None:
V = np.var(np.vstack([XA, XB]), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
dm = cdist(XA, XB, lambda u, v: seuclidean(u, v, V))
elif metric == 'test_sqeuclidean':
dm = cdist(XA, XB, lambda u, v: sqeuclidean(u, v))
elif metric == 'test_braycurtis':
dm = cdist(XA, XB, braycurtis)
elif metric == 'test_mahalanobis':
if VI is None:
X = np.vstack([XA, XB])
V = np.cov(X.T)
VI = np.linalg.inv(V)
X = None
del X
else:
VI = np.asarray(VI, order='c')
VI = _copy_array_if_base_present(VI)
# (u-v)V^(-1)(u-v)^T
dm = cdist(XA, XB, (lambda u, v: mahalanobis(u, v, VI)))
elif metric == 'test_canberra':
dm = cdist(XA, XB, canberra)
elif metric == 'test_cityblock':
dm = cdist(XA, XB, cityblock)
elif metric == 'test_minkowski':
dm = cdist(XA, XB, minkowski, p=p)
elif metric == 'test_wminkowski':
dm = cdist(XA, XB, wminkowski, p=p, w=w)
elif metric == 'test_correlation':
dm = cdist(XA, XB, correlation)
elif metric == 'test_hamming':
dm = cdist(XA, XB, hamming)
elif metric == 'test_jaccard':
dm = cdist(XA, XB, jaccard)
elif metric == 'test_chebyshev' or metric == 'test_chebychev':
dm = cdist(XA, XB, chebyshev)
elif metric == 'test_yule':
dm = cdist(XA, XB, yule)
elif metric == 'test_matching':
dm = cdist(XA, XB, matching)
elif metric == 'test_dice':
dm = cdist(XA, XB, dice)
elif metric == 'test_kulsinski':
dm = cdist(XA, XB, kulsinski)
elif metric == 'test_rogerstanimoto':
dm = cdist(XA, XB, rogerstanimoto)
elif metric == 'test_russellrao':
dm = cdist(XA, XB, russellrao)
elif metric == 'test_sokalsneath':
dm = cdist(XA, XB, sokalsneath)
elif metric == 'test_sokalmichener':
dm = cdist(XA, XB, sokalmichener)
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
timer.refresh('before return')
return dm, timer
| python |
class Color(object):
RESET = '\x1b[0m'
BLACK = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
MAGENTA = 5
CYAN = 6
WHITE = 7
NORMAL = 0
BOLD = 1
@staticmethod
def to_color_string(string,
foreground = 7,
background = None,
style = 1):
style = '\x1b[0%sm' % style
foreground = '\x1b[3%sm' % foreground
background = '' if background is None else '\x1b[4%sm' % background
preset = style + foreground + background
colored = preset + string + Color.RESET
return colored
def warn(string):
colored = Color.to_color_string(string, foreground = Color.YELLOW)
return colored | python |
import os
import unittest
import pytest
from github import GithubException
from ogr import GithubService
from ogr.abstract import PRStatus, IssueStatus
from ogr.persistent_storage import PersistentObjectStorage
from ogr.exceptions import GithubAPIException
DATA_DIR = "test_data"
PERSISTENT_DATA_PREFIX = os.path.join(
os.path.dirname(os.path.realpath(__file__)), DATA_DIR
)
class GithubTests(unittest.TestCase):
def setUp(self):
self.token = os.environ.get("GITHUB_TOKEN")
self.user = os.environ.get("GITHUB_USER")
test_name = self.id() or "all"
persistent_data_file = os.path.join(
PERSISTENT_DATA_PREFIX, f"test_github_data_{test_name}.yaml"
)
PersistentObjectStorage().storage_file = persistent_data_file
if PersistentObjectStorage().is_write_mode and (
not self.user or not self.token
):
raise EnvironmentError("please set GITHUB_TOKEN GITHUB_USER env variables")
self.service = GithubService(token=self.token)
self.ogr_project = self.service.get_project(
namespace="packit-service", repo="ogr"
)
self.ogr_fork = self.service.get_project(
namespace="packit-service", repo="ogr", is_fork=True
)
self.hello_world_project = self.service.get_project(
namespace="packit-service", repo="hello-world"
)
self.not_forked_project = self.service.get_project(
namespace="fedora-modularity", repo="fed-to-brew"
)
def tearDown(self):
PersistentObjectStorage().dump()
class Comments(GithubTests):
def test_pr_comments(self):
pr_comments = self.ogr_project.get_pr_comments(9)
assert pr_comments
assert len(pr_comments) == 2
assert pr_comments[0].comment.endswith("fixed")
assert pr_comments[1].comment.startswith("LGTM")
def test_pr_comments_reversed(self):
pr_comments = self.ogr_project.get_pr_comments(9, reverse=True)
assert pr_comments
assert len(pr_comments) == 2
assert pr_comments[0].comment.startswith("LGTM")
def test_pr_comments_filter(self):
pr_comments = self.ogr_project.get_pr_comments(9, filter_regex="fixed")
assert pr_comments
assert len(pr_comments) == 1
assert pr_comments[0].comment.startswith("@TomasTomecek")
pr_comments = self.ogr_project.get_pr_comments(
9, filter_regex="LGTM, nicely ([a-z]*)"
)
assert pr_comments
assert len(pr_comments) == 1
assert pr_comments[0].comment.endswith("done!")
def test_pr_comments_search(self):
comment_match = self.ogr_project.search_in_pr(9, filter_regex="LGTM")
assert comment_match
assert comment_match[0] == "LGTM"
comment_match = self.ogr_project.search_in_pr(
9, filter_regex="LGTM, nicely ([a-z]*)"
)
assert comment_match
assert comment_match[0] == "LGTM, nicely done"
class GenericCommands(GithubTests):
def test_description(self):
description = self.ogr_project.get_description()
assert description.startswith("One Git library to Rule")
def test_branches(self):
branches = self.ogr_project.get_branches()
assert branches
assert set(branches) == {"master"}
def test_git_urls(self):
urls = self.ogr_project.get_git_urls()
assert urls
assert len(urls) == 2
assert "git" in urls
assert "ssh" in urls
assert urls["git"] == "https://github.com/packit-service/ogr.git"
assert urls["ssh"].endswith("[email protected]:packit-service/ogr.git")
def test_username(self):
# changed to check just lenght, because it is based who regenerated data files
assert len(self.service.user.get_username()) > 3
def test_email(self):
test_str = self.service.user.get_email()
assert test_str
assert len(test_str) > 0
assert "@" in test_str
assert "." in test_str
def test_get_file(self):
file_content = self.ogr_project.get_file_content(".git_archival.txt")
assert file_content
assert isinstance(file_content, str)
assert "ref-names:" in file_content
def test_nonexisting_file(self):
with self.assertRaises(FileNotFoundError):
self.ogr_project.get_file_content(".blablabla_nonexisting_file")
def test_parent_project(self):
assert self.ogr_fork.parent.namespace == "packit-service"
assert self.ogr_fork.parent.repo == "ogr"
@unittest.skip("get_commit_flags not implemented")
def test_commit_flags(self):
flags = self.ogr_project.get_commit_flags(
commit="29ca3caefc781b4b41245df3e01086ffa4b4639e"
)
assert isinstance(flags, list)
assert len(flags) == 0
def test_get_sha_from_tag(self):
assert (
self.ogr_project.get_sha_from_tag("0.0.1")
== "29ca3caefc781b4b41245df3e01086ffa4b4639e"
)
with pytest.raises(GithubAPIException) as ex:
self.ogr_project.get_sha_from_tag("future")
assert "not found" in str(ex.value)
def test_get_tag_from_tag_name(self):
tag = self.ogr_project.get_tag_from_tag_name("0.0.1")
assert tag.name == "0.0.1"
assert tag.commit_sha == "29ca3caefc781b4b41245df3e01086ffa4b4639e"
def test_get_tag_from_nonexisting_tag_name(self):
assert not self.ogr_project.get_tag_from_tag_name("future")
def test_get_owners(self):
owners = self.ogr_project.get_owners()
assert ["packit-service"] == owners
def test_issue_permissions(self):
users = self.ogr_project.who_can_close_issue()
assert "lachmanfrantisek" in users
issue = self.ogr_project.get_issue_info(1)
assert self.ogr_project.can_close_issue("lachmanfrantisek", issue)
assert not self.ogr_project.can_close_issue("marusinm", issue)
def test_pr_permissions(self):
users = self.ogr_project.who_can_merge_pr()
assert "lachmanfrantisek" in users
assert self.ogr_project.can_merge_pr("lachmanfrantisek")
assert not self.ogr_project.can_merge_pr("marusinm")
class Issues(GithubTests):
def test_issue_list(self):
issue_list = self.ogr_fork.get_issue_list()
assert isinstance(issue_list, list)
assert not issue_list
issue_list_all = self.ogr_project.get_issue_list(status=IssueStatus.all)
assert issue_list_all
assert len(issue_list_all) >= 45
issue_list_closed = self.ogr_project.get_issue_list(status=IssueStatus.closed)
assert issue_list_closed
assert len(issue_list_closed) >= 35
issue_list = self.ogr_project.get_issue_list()
assert issue_list
assert len(issue_list) >= 3
def test_issue_info(self):
issue_info = self.ogr_project.get_issue_info(issue_id=4)
assert issue_info
assert issue_info.title.startswith("Better name")
assert issue_info.status == IssueStatus.closed
def test_issue_labels(self):
labels = self.ogr_project.get_issue_labels(issue_id=4)
assert not labels
self.ogr_project.add_issue_labels(issue_id=4, labels=["test_lb1", "test_lb2"])
labels = self.ogr_project.get_issue_labels(issue_id=4)
assert len(labels) == 2
assert labels[0].name == "test_lb1"
assert labels[1].name == "test_lb2"
class PullRequests(GithubTests):
def test_pr_list(self):
pr_list = self.ogr_fork.get_pr_list()
assert isinstance(pr_list, list)
pr_list_all = self.ogr_project.get_pr_list(status=PRStatus.all)
assert pr_list_all
assert len(pr_list_all) >= 75
pr_list_closed = self.ogr_project.get_pr_list(status=PRStatus.closed)
assert pr_list_closed
assert len(pr_list_closed) >= 70
closed_pr_numbers = []
for closed_pr in pr_list_closed:
closed_pr_numbers.append(closed_pr.id)
assert 93 in closed_pr_numbers
pr_list_merged = self.ogr_project.get_pr_list(status=PRStatus.merged)
assert pr_list_merged
assert len(pr_list_merged) >= 1
closed_pr_numbers = []
for closed_pr in pr_list_merged:
closed_pr_numbers.append(closed_pr.id)
assert 93 not in closed_pr_numbers
pr_list = self.ogr_project.get_pr_list()
assert pr_list
assert len(pr_list) >= 1
def test_pr_info(self):
pr_info = self.ogr_project.get_pr_info(pr_id=1)
assert pr_info
assert pr_info.title == "WIP: API"
assert pr_info.status == PRStatus.merged
def test_all_pr_commits(self):
commits = self.ogr_project.get_all_pr_commits(pr_id=1)
assert len(commits) == 3
assert commits[0] == "431f4a7c5cce24c3035b17c5131a3918ab989bd0"
assert commits[2] == "5d6cc05d30ef0a0d69bb42bdcaad187408a070b0"
def test_update_pr_info(self):
pr_info = self.ogr_project.get_pr_info(pr_id=1)
orig_title = pr_info.title
orig_description = pr_info.description
self.ogr_project.update_pr_info(
pr_id=1, title="changed", description="changed description"
)
pr_info = self.ogr_project.get_pr_info(pr_id=1)
assert pr_info.title == "changed"
assert pr_info.description == "changed description"
self.ogr_project.update_pr_info(
pr_id=1, title=orig_title, description=orig_description
)
pr_info = self.ogr_project.get_pr_info(pr_id=1)
assert pr_info.title == orig_title
assert pr_info.description == orig_description
def test_pr_labels(self):
labels = self.ogr_project.get_pr_labels(pr_id=1)
assert not labels
self.ogr_project.add_pr_labels(pr_id=1, labels=["test_lb1", "test_lb2"])
labels = self.ogr_project.get_pr_labels(pr_id=1)
assert len(labels) == 2
assert labels[0].name == "test_lb1"
assert labels[1].name == "test_lb2"
class Releases(GithubTests):
def test_get_release(self):
release = self.hello_world_project.get_release(tag_name="0.4.1")
assert release.title == "test"
assert release.body == "testing release"
def test_get_releases(self):
releases = self.ogr_project.get_releases()
assert releases
assert len(releases) >= 9
def test_create_release(self):
count_before = len(self.hello_world_project.get_releases())
release = self.hello_world_project.create_release(
tag="0.5.0", name="test", message="testing release"
)
count_after = len(self.hello_world_project.get_releases())
assert release.tag_name == "0.5.0"
assert release.title == "test"
assert release.body == "testing release"
assert count_before + 1 == count_after
def test_edit_release(self):
release = self.hello_world_project.get_release(tag_name="0.1.0")
origin_name = release.title
origin_message = release.body
release.edit_release(
name=f"{origin_name}-changed", message=f"{origin_message}-changed"
)
assert release.title == f"{origin_name}-changed"
assert release.body == f"{origin_message}-changed"
def test_latest_release(self):
release = self.ogr_project.get_latest_release()
assert release.tag_name == "0.5.0"
assert release.title == "0.5.0"
assert "New Features" in release.body
class Forks(GithubTests):
def test_fork(self):
assert self.ogr_fork.is_fork is True
fork_description = self.ogr_fork.get_description()
assert fork_description
@unittest.skip(
"not working with yaml file because it check exception within setup"
)
def test_nonexisting_fork(self):
self.ogr_nonexisting_fork = self.service.get_project(
repo="omfeprkfmwpefmwpefkmwpeofjwepof", is_fork=True
)
with self.assertRaises(GithubException) as ex:
self.ogr_nonexisting_fork.get_description()
s = str(ex.value.args)
assert "Not Found" in s
assert "404" in s
def test_get_fork(self):
fork = self.ogr_project.get_fork()
assert fork
assert fork.get_description()
def test_is_fork(self):
assert not self.ogr_project.is_fork
is_forked = self.ogr_project.is_forked()
assert isinstance(is_forked, bool)
# `is True` is here on purpose: we want to be sure that .is_forked() returns True object
# because Tomas had his crazy ideas and wanted to return GitProject directly,
# stop that madman
assert is_forked is True
fork = self.ogr_project.get_fork(create=False)
assert fork
assert fork.is_fork
def test_create_fork(self):
not_existing_fork = self.not_forked_project.get_fork(create=False)
assert not not_existing_fork
assert not self.not_forked_project.is_forked()
old_forks = self.not_forked_project.service.user.get_forks()
self.not_forked_project.fork_create()
assert self.not_forked_project.get_fork().get_description()
assert self.not_forked_project.is_forked()
new_forks = self.not_forked_project.service.user.get_forks()
assert len(old_forks) == len(new_forks) - 1
| python |
# django
from django import forms
# local django
from exam.models import CustomExam
from exam.validators import CustomExamValidator
class CreateCustomExams(forms.ModelForm):
"""
Form to create a custom exam.
"""
description = forms.CharField(widget=forms.Textarea)
class Meta:
# Define model to form.
model = CustomExam
fields = ('description', 'name',)
def clean(self):
"""
Get Custom Exam fields.
"""
description = self.cleaned_data.get('description')
name = self.cleaned_data.get('name')
# Verify validations in form.
self.validator_all(description, name)
def validator_all(self, description, name):
"""
Checks validator in all fields.
"""
validator = CustomExamValidator()
# Fields common all users.
validator.validator_name(name)
validator.validator_description(description)
| python |
# Generated by Django 2.2 on 2020-08-09 06:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupons', '0003_coupon_max_discount'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='max_discount',
field=models.DecimalField(decimal_places=2, default=100, max_digits=6),
),
]
| python |
import threading
from Queue import Empty, Full
from multiprocessing import Process, Queue, Value
import datetime
import os
import zmq
import logging
from logging import handlers
from platformData import *
from BEMOSSThread import BThread, BProcess
from commandProcessor import processCommand
import cgitb
cgitb.enable() #gives more detailed traceback
main_logger = logging.getLogger("filelogger")
main_logger.level = logging.DEBUG
console_logger = logging.getLogger("consolelogger")
console_logger.level = logging.INFO
fileHandler = handlers.RotatingFileHandler(filename="BEMOSS.log",maxBytes=50000000,backupCount=10) #50 MB limit
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s;%(levelname)s;%(message)s",
"%Y-%m-%d %H:%M:%S")
fileHandler.setFormatter(formatter)
main_logger.handlers = [fileHandler]
console_logger.handlers = [consoleHandler]
main_logger.propagate = False
console_logger.propagate = False
changeLogFilterQ = Queue(10)
def handelLogging():
filterheader = ""
while True:
source, header, level, message = logQueue.get()
message = header +" :" + message
try:
newfilter = changeLogFilterQ.get(False)
except Empty:
pass
else:
filterheader = newfilter
main_logger.log(level,message)
if filterheader:
if header.startswith(filterheader):
console_logger.log(level,"filtering:" + filterheader + ": " + message)
else:
console_logger.log(level, message)
def handleCommands(threadLock,stopFlag):
while True:
# Wait for next request from client
print "Creating Socket"
context = zmq.Context()
rep_socket = context.socket(zmq.REP)
rep_socket.bind(address)
message = rep_socket.recv()
print message
if message == "Exit":
stopFlag.Value = 1
break
splitmessage = message.split(" ")
if len(splitmessage) == 2 and splitmessage[0] == "filterlog": #update the console log filtering
changeLogFilterQ.put(splitmessage[1])
print("Filter requested:" + splitmessage[1])
rep_socket.send(str("Filter Requested"))
continue
with threadLock:
try:
reply = processCommand(message)
except Exception as ex:
reply = "Problem executing command: " + str(type(ex)) + " " + str(ex)
else:
print "Command Processed: " + message
if not reply:
reply = ""
rep_socket.send(str(reply))
print "Exiting handle commands Thread"
stopFlag = Value('i',0)
threadLock = threading.Lock()
command_line_thread = BThread(target=handleCommands,args=(threadLock,stopFlag))
command_line_thread.id = -1
command_line_thread.name = "commandHandler"
command_line_thread.daemon = True
command_line_thread.start()
logging_thread = BThread(target=handelLogging)
logging_thread.id = -1
logging_thread.name = "loggingHandler"
logging_thread.daemon = True
logging_thread.start()
start_time = datetime.datetime.now()
print "****BEMOSS started****"
print os.getpid()
mainThread = threading.current_thread()
mainThread.name = "MainBEMOSSThread"
mainThread.id = 0
counter = 0
while not stopFlag.value:
#check if there is any new messages in the outQueue of the agents
try:
source,destinations,topic,message = outQueue.get(True,1)
for destination in destinations:
if destination in inQueues_dict:
try: #for each destination, put the message in the destination's inQueue
inQueues_dict[destination].put((source, topic,message), False)
except Full:
_ = inQueues_dict[destination].get() #if destination inQueue is full, remove old, and put
inQueues_dict[destination].put((source, topic, message), False)
print(destination + " QueueFull")
raise
elif destination == "platformmanager":
with threadLock:
processCommand(topic + ' ' + message)
except Empty:
#continue
# counter +=1
# if counter == 10:
# counter = 0
# h = hpy()
# print "\nPrinting Memory Usage"
# info= h.heap()
# print info.byvia
pass
time_diff = datetime.datetime.now() - start_time
# if time_diff > datetime.timedelta(minutes=20):
# break
# time.sleep(0.1)
print "BEMOSS exited"
| python |
from tabulate import tabulate
table = [['one','two','three'],['four','five','six'],['seven','eight','nine']]
print(tabulate(table, tablefmt='html'))
"""Generate Report Function"""
with open('example.log') as f:
lines = f.readlines()
print lines
print(lines[2])
HTML_file= open("Report.html","w+")
HTML_file.write("<html>\n <table border=1>\n <tr>\n <td>"+lines[2]+"</td>\n </tr> \n </table>\n </html>")
print(tabulate(lines, tablefmt='html'))
| python |
'''
Environment simulators.
'''
from models.simulator import POMDPSimulator
from models.simulator_momdp import MOMDPSimulator
from models.tiger import TigerPOMDP
from models.rock_sample import RockSamplePOMDP
from models.tools.belief import DiscreteBelief
| python |
"""
Uma matriz de confusão. Não confundir com uma tabela de confusão.
A matrix de confusão possui mais do que uma duas linhas e duas colunas,
por isso difere da tabela de confusão, que possui duas linhas e duas colunas
Para criar a matriz de confusão escolhi o formato de dictionary da seguinte maneira:
O dict
- O primeiro nível do dictionary uma linha da matriz de confusão.
"""
class ConfusionMatrix:
BETA = 1
def __init__(self, model, test_set):
confusion_hash = {}
possible_classes = test_set.get_uniq_classes()
# {
# "sim": { "sim": 3, "nao": 2 }, quando previu sim, 3 realmente eram sims, dois deveriam ser naos
# "nao": { "sim": 2, "nao": 1 }
# }
for klass in possible_classes:
confusion_hash[klass] = {}
for klass_2 in possible_classes:
confusion_hash[klass][klass_2] = 0
for example in test_set.examples:
correct_klass = example.get_class()
predicted_klass = model.predict(example)
confusion_hash[predicted_klass][correct_klass] += 1
self.classes = possible_classes
self.confusion_hash = confusion_hash
def predictions_for(self, klass):
return self.confusion_hash[klass].copy()
def possible_classes(self):
return self.classes.copy()
| python |
## process_rootroopnft.py
# first let's just check how many tweets it grabbed.
with open("rootroopnft.txt", "r") as fid:
line = fid.read()
# end with open
line = line.split("Tweet(url=")
print("line[0]: ", line[0])
print("line[-1]: ", line[-1])
last_date = line[-1].split("date=datetime.datetime(")[1].split(", tzinfo=datetime.timezone.utc),")[0]
print("last_date: ", last_date) # returned 2021, 11, 23, 23, 32, 3 (also the oldest tweet I was able to fetch)
print("len line: ", len(line)) # returned 1484
| python |
import cv2
import numpy as np
class ColorTrack():
def __init__(self):
pass
def detect_green(self,frame):
return self.detect_color(frame,np.array([33,80,40]),np.array([102, 255, 255]))
def detect_red(self,frame):
return self.detect_color(frame,np.array([78, 43, 46]), np.array([99, 255, 255]))
def detect_color(self,frame,lower_bound,uper_bound):
imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(imgHSV, lower_bound, uper_bound)
kernelOpen = np.ones((5, 5))
kernelClose = np.ones((20, 20))
maskOpen = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernelOpen)
maskClose = cv2.morphologyEx(maskOpen, cv2.MORPH_CLOSE, kernelClose)
maskFinal = maskClose
conts, h = cv2.findContours(maskFinal.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(imgHSV, conts, -1, (255, 0, 0), 3)
max_x = 0
max_y = 0
max_w = 0
max_h = 0
max_area = 0
for i in range(len(conts)):
x, y, w, h = cv2.boundingRect(conts[i])
if w * h > max_area:
max_x = x
max_y = y
max_w = w
max_h = h
max_area = w * h
return max_x, max_y, max_w, max_h,max_area
| python |
from __future__ import print_function, absolute_import
import sys
import subprocess
from distutils.errors import DistutilsPlatformError
import semantic_version
class Binding:
"""
Binding Options
"""
# https://github.com/PyO3/PyO3
PyO3 = 0
# https://github.com/dgrunwald/rust-cpython
RustCPython = 1
# Bring your own binding
NoBinding = 2
# Build executable
Exec = 3
class Strip:
"""
Strip Options
"""
# do not strip symbols
No = 0
# strip debug symbols
Debug = 1
# strip all symbos
All = 2
def cpython_feature(ext=True, binding=Binding.PyO3):
version = sys.version_info
if binding in (Binding.NoBinding, Binding.Exec):
return ()
elif binding is Binding.PyO3:
if (2, 7) < version < (2, 8):
if ext:
return ("pyo3/python2", "pyo3/extension-module")
else:
return ("pyo3/python2",)
elif version > (3, 4):
if ext:
return ("pyo3/python3", "pyo3/extension-module")
else:
return ("pyo3/python3",)
else:
raise DistutilsPlatformError("Unsupported python version: %s" % sys.version)
elif binding is Binding.RustCPython:
if (2, 7) < version < (2, 8):
if ext:
return ("cpython/python27-sys", "cpython/extension-module-2-7")
else:
return ("cpython/python27-sys",)
elif (3, 3) < version:
if ext:
return ("cpython/python3-sys", "cpython/extension-module")
else:
return ("cpython/python3-sys",)
else:
raise DistutilsPlatformError("Unsupported python version: %s" % sys.version)
else:
raise DistutilsPlatformError('Unknown Binding: "{}" '.format(binding))
def get_rust_version():
try:
output = subprocess.check_output(["rustc", "-V"])
if isinstance(output, bytes):
output = output.decode("latin-1")
return semantic_version.Version(output.split(" ")[1], partial=True)
except (subprocess.CalledProcessError, OSError):
raise DistutilsPlatformError("Can not find Rust compiler")
except Exception as exc:
raise DistutilsPlatformError("Can not get rustc version: %s" % str(exc))
| python |
# imports
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from bs4 import BeautifulSoup
import time
import re
import csv
# easy function for viewing list
def printlist(list):
length=len(list)
for i in range(length):
print(list[i])
#url for page with links to all sas Viya procs by Viya product
base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsproc&docsetTarget=p1o1v16by0iotvn10m0jzzv9i3y8.htm&locale=en#'
#retrieve the html from the list of all sas procs by product
driver = webdriver.Safari()
driver.get(base_url)
time.sleep(10)
soup = BeautifulSoup(driver.page_source,"lxml")
driver.close()
#print(soup)
# Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link
bowl = soup.findAll(['h2','p'],attrs={'class':['xisDoc-title','xisDoc-paragraph']})
vcollect = []
vproduct = []
for spoon in bowl:
if spoon.name=='h2' and "SAS Products" not in spoon.text:
vproduct.append(spoon.text.strip())
if spoon.name=='p' and vproduct:
block = spoon.find('a')
if block:
link = block.get('href')
proc = ' '.join(block.text.split())
proc_short = proc.replace(': ',' ') # template shows up as template: because it has multiple links
proc_short = proc_short.split(' ',1)[0]
vcollect.append([vproduct[-1], proc, proc_short, link.strip()])
#keep the list of links for products and procedures in vdriver.csv
header=["Product","Procedure","Procedure_Short","Procedure_Link"]
with open("Projects/PROC overview/vdriver.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(vcollect)
f.close
#remove the few cases where a product starts by listing another product (not a proc): as in "includes contents of product..."
#store these separately for linking Viya and 9.4 Product clusters
prodlink = []
for idx, item in enumerate(vcollect):
if item[1] in product:
prodlink.append(vcollect[idx])
del vcollect[idx]
#keep the list of links between 9.4 and viya products in prodlink.csv
header=["Product","Procedure","Procedure_Short","Procedure_Link"]
with open("Projects/PROC overview/prodlink.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(prodlink)
f.close
# url with viya products with action sets
base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsactions&docsetTarget=actionSetsByProduct.htm&locale=en'
#retrieve the html from the list of all sas procs by product
driver = webdriver.Safari()
driver.get(base_url)
time.sleep(10)
soup = BeautifulSoup(driver.page_source,"lxml")
driver.close()
#print(soup)
# Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link
bowl = soup.findAll('div',attrs='xisDoc-toc_1 ng-scope')
#printlist(bowl)
adriver = []
for spoon in bowl:
adriver.append([spoon.text,spoon.find('a').get('href')])
#printlist(adriver)
#keep the list of links for actions in adriver.csv
header=["Product","Product_Link"]
with open("Projects/PROC overview/adriver.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(adriver)
f.close
# cycle through each product with actions and get list of actions by product - save to acollect.csv
driver = webdriver.Safari()
acollect = [] # Product | ActionSet | ActionSet_Describe | ActionSet_Link | ActionSet_LinkText
for row in adriver:
driver.get(row[1])
time.sleep(10)
action_soup = BeautifulSoup(driver.page_source,"lxml")
bowl = action_soup.findAll('tr')
for spoon in bowl:
sip = spoon.findAll('td')
if len(sip) == 3:
acollect.append([row[0],sip[1].text.strip(),' '.join(sip[2].text.split()),sip[0].find('a').get('href').strip(),' '.join(sip[0].text.split())])
#print(' '.join(sip[0].text.split()),sip[0].find('a').get('href').strip(),sip[1].text.strip(),' '.join(sip[2].text.split()))
driver.close()
#keep the list of links for actions in acollect.csv
header=["Product","ActionSet","ActionSet_Describe","ActionSet_Link","ActionSet_LinkText"]
with open("Projects/PROC overview/acollect.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(acollect)
f.close
#url for page with links to all sas procs by product
#base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=allprodsproc&docsetTarget=p1vzipzy6l8so0n1gbbh3ae63czb.htm&locale=en'
base_url='https://documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.5&docsetId=allprodsproc&docsetTarget=p1vzipzy6l8so0n1gbbh3ae63czb.htm&locale=en'
#retrieve the html from the list of all sas procs by product
driver = webdriver.Safari()
driver.get(base_url)
time.sleep(10)
soup = BeautifulSoup(driver.page_source,"lxml")
driver.close()
#print(soup)
# Build the collect list: Product | Procedure | Procedure_Short | Procedure_Link
bowl = soup.findAll(['h2','p'],attrs={'class':['xisDoc-title','xisDoc-paragraph']})
collect = []
product = []
for spoon in bowl:
#print('line - ', spoon)
if spoon.name=='h2' and "SAS Products" not in spoon.text:
product.append(spoon.text.strip())
if spoon.name=='p' and product:
block = spoon.find('a')
if block:
link = block.get('href')
proc = ' '.join(block.text.split())
proc_short = proc.replace(': ',' ') # template shows up as template: because it has multiple links
proc_short = proc_short.split(' ',1)[0]
collect.append([product[-1], proc, proc_short, link.strip()])
#remove the few cases where a product starts by listing another product (not a proc): as in "includes contents of product..."
for idx, item in enumerate(collect):
if item[1] in product:
del collect[idx]
#print(collect)
#keep the list of links for products and procedures in driver.csv
header=["Product","Procedure","Procedure_Short","Procedure_Link"]
with open("Projects/PROC overview/driver.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(collect)
f.close
# cycle through products, visit pages, look for links to overview and comparisons
#build a list of procedures
procedures = []
for row in collect:
if row[2] not in procedures:
procedures.append(row[2])
#printlist(procedures)
#function to see check if link is for desired purpose and if it needs stump url
def check_addstump(link,stump):
link=link.strip()
if link.startswith('http'):
return link
else:
return stump + link
# cycle through procedure links, check for overview and contrasted links: Collect = Product | Procedure | Procedure_Short | Procedure_Link | Overview_Link | Compared_Link
comp_stump='https://documentation.sas.com'
driver = webdriver.Safari()
#collect = collect[393:397] #subset for testing
#collect = collect[290:296] #subset for testing
for row in collect:
driver.get(row[3])
time.sleep(10)
proc_soup = BeautifulSoup(driver.page_source,"lxml")
for proc_link in proc_soup.find_all('a'):
if ("Overview" in proc_link.text) and proc_link.get('href'):
row.append(check_addstump(proc_link.get('href'),comp_stump))
if len(row) != 5:
row.append('')
for proc_link in proc_soup.find_all('a'):
comps=["Contrasted","Compared"]
if any(comp in proc_link.text for comp in comps) and proc_link.get('href'):
row.append(check_addstump(proc_link.get('href'),comp_stump))
if len(row) !=6:
row.append('')
#printlist(collect)
#keep the incompete collect list to run again from here:
header=["Product","Procedure","Procedure_Short","Procedure_Link","Overview_Link","Compared_Link"]
with open("Projects/PROC overview/precollect.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(collect)
f.close
# get list of procs mentioned on overview/compared to pages when they exist: Collect = Product | Procedure | Procecure_Short | Procedure_Link | Overview_Link | Compared_Link | Compared_PROCS (list)
header=["Product","Procedure","Procedure_Short","Procedure_Link","Overview_Link","Compared_Link",'Compared_PROCS']
with open("Projects/PROC overview/collect.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(header)
f.close
for row in collect:
row.append('')
regex = r"\b[A-Z][A-Z]+\b"
compared_procs = []
if row[5]: # get compared PROCs
driver.get(row[5])
time.sleep(10)
comp_soup = BeautifulSoup(driver.page_source,"lxml")
for comp_link in comp_soup.find_all('p'):
for match in re.finditer(regex, comp_link.text):
if (match.group() not in compared_procs) and (match.group() in procedures) and (match.group() != row[2]): #not already found, is in full list, not the current proc
compared_procs.append(match.group())
row[6]=match.group()
with open("Projects/PROC overview/collect.csv","a") as f:
writer = csv.writer(f)
writer.writerow(row)
if row[4]: # get overview PROCs - only keep ones not already covered in compared
driver.get(row[4])
time.sleep(15)
comp_soup = BeautifulSoup(driver.page_source,"lxml")
for comp_link in comp_soup.find_all('p'):
for match in re.finditer(regex, comp_link.text):
if (match.group() not in compared_procs) and (match.group() in procedures) and (match.group() != row[2]): #not already found, is in full list, not the current proc
compared_procs.append(match.group())
row[6]=match.group()
with open("Projects/PROC overview/collect.csv","a") as f:
writer = csv.writer(f)
writer.writerow(row)
if not compared_procs:
with open("Projects/PROC overview/collect.csv","a") as f:
writer = csv.writer(f)
writer.writerow(row)
driver.quit()
#printlist(collect)
| python |
"""The SquonkServer class handles get, post and delete requests against
the squonk base_url using the SquonkAuth class to refresh the
authentication token when required.
"""
import requests
import json
import logging
from email.policy import default
from collections import namedtuple
try:
from .SquonkAuth import SquonkAuth
except:
from SquonkAuth import SquonkAuth
from collections import namedtuple
# The search result.
# A namedtuple.
SearchResult = namedtuple('SearchResult', 'status_code message json')
class SquonkException(Exception):
"""A basic exception used by the Squonk API
"""
pass
class SquonkServer:
def __init__(self, auth, base_url):
# general settings
self._base_url = base_url
self._auth = auth
logging.debug('SquonkServer created:'+self._base_url)
# set a request
def send(self,type,request,form_data=None):
# Always try to refresh the access token.
# The token is only refreshed if it is close to expiry.
self._auth.check_token()
token = self._auth.get_token()
url = str(self._base_url + '/' + request)
logging.debug('SEND:' + type + ' ' + url)
response = None
if type == 'get':
headers = {'Authorization': str('bearer ' + token) }
response = requests.get(url, headers=headers, verify=True, allow_redirects=True)
else:
if type == 'post':
headers = {'Authorization': str('bearer ' + token), 'Content-Type': 'multipart/form'}
response = requests.post(url, files=form_data, headers = headers )
else:
if type == 'delete':
headers = {'Authorization': str('bearer ' + token) }
response = requests.delete(url, headers=headers, verify=True, allow_redirects=True)
else:
raise SquonkException('type must be get, post or delete')
status_code = response.status_code
logging.debug('GOT response '+str(status_code))
if not response.status_code in [200, 201]:
if response.status_code == 404:
print(response.text)
else:
print(response.content)
return response
| python |
# -*- coding: utf-8 -*-
import logging
from mathutils import Vector
class BlockDef(object):
class _BlockItem(object):
def __init__(self, name="", color=(0, 0, 0), block_def=(35, None)):
self._name = name
self._color = color
self._block_def = block_def
@property
def color(self):
return self._color
@property
def block_def(self):
return self._block_def
BLOCK_LIST = (
_BlockItem(
"White Wool",
Vector((0.95, 0.95, 0.95)),
(35, None)
),
_BlockItem(
"Orange Wool",
Vector((0.92, 0.53, 0.25)),
(35, 1)
),
_BlockItem(
"Magenta Wool",
Vector((0.73, 0.31, 0.77)),
(35, 2)
),
_BlockItem(
"Light Blue Wool",
Vector((0.43, 0.55, 0.81)),
(35, 3)
),
_BlockItem(
"Yellow Wool",
Vector((0.77, 0.71, 0.11)),
(35, 4)
),
_BlockItem(
"Lime Wool",
Vector((0.23, 0.75, 0.18)),
(35, 5)
),
_BlockItem(
"Pink Wool",
Vector((0.84, 0.54, 0.62)),
(35, 6)
),
_BlockItem(
"Grey Wool",
Vector((0.26, 0.26, 0.26)),
(35, 7)
),
_BlockItem(
"Light Grey Wool",
Vector((0.62, 0.65, 0.65)),
(35, 8)
),
_BlockItem(
"Cyan Wool",
Vector((0.15, 0.46, 0.59)),
(35, 9)
),
_BlockItem(
"Purple Wool",
Vector((0.53, 0.23, 0.80)),
(35, 10)
),
_BlockItem(
"Blue Wool",
Vector((0.15, 0.20, 0.60)),
(35, 11)
),
_BlockItem(
"Brown Wool",
Vector((0.22, 0.30, 0.09)),
(35, 12)
),
_BlockItem(
"Green Wool",
Vector((0.22, 0.30, 0.09)),
(35, 13)
),
_BlockItem(
"Red Wool",
Vector((0.65, 0.17, 0.16)),
(35, 14)
),
_BlockItem(
"Black Wool",
Vector((0, 0, 0)),
(35, 15)
),
_BlockItem(
"White Stained Clay",
Vector((0.77, 0.65, 0.60)),
(159, None)
),
_BlockItem(
"Orange Stained Clay",
Vector((0.60, 0.31, 0.14)),
(159, 1)
),
_BlockItem(
"Magenta Stained Clay",
Vector((0.56, 0.33, 0.40)),
(159, 2)
),
_BlockItem(
"Light Blue Stained Clay",
Vector((0.44, 0.42, 0.54)),
(159, 3)
),
_BlockItem(
"Yellow Stained Clay",
Vector((0.69, 0.49, 0.13)),
(159, 4)
),
_BlockItem(
"Lime Stained Clay",
Vector((0.38, 0.44, 0.20)),
(159, 5)
),
_BlockItem(
"Pink Stained Clay",
Vector((0.63, 0.30, 0.31)),
(159, 6)
),
_BlockItem(
"Gray Stained Clay",
Vector((0.22, 0.16, 0.14)),
(159, 7)
),
_BlockItem(
"Light Gray Stained Clay",
Vector((0.53, 0.42, 0.38)),
(159, 8)
),
_BlockItem(
"Cyan Stained Clay",
Vector((0.34, 0.35, 0.36)),
(159, 9)
),
_BlockItem(
"Purple Stained Clay",
Vector((0.44, 0.25, 0.31)),
(159, 10)
),
_BlockItem(
"Blue Stained Clay",
Vector((0.27, 0.22, 0.33)),
(159, 11)
),
_BlockItem(
"Brown Stained Clay",
Vector((0.28, 0.19, 0.13)),
(159, 12)
),
_BlockItem(
"Green Stained Clay",
Vector((0.29, 0.32, 0.16)),
(159, 13)
),
_BlockItem(
"Red Stained Clay",
Vector((0.56, 0.24, 0.18)),
(159, 14)
),
_BlockItem(
"Black Stained Clay",
Vector((0.13, 0.08, 0.06)),
(159, 15)
),
_BlockItem(
"Stone",
Vector((0.47, 0.47, 0.47)),
(1, None)
),
_BlockItem(
"Polished Granite",
Vector((0.63, 0.44, 0.38)),
(1, 2)
),
_BlockItem(
"Oak Wood Plank",
Vector((0.66, 0.53, 0.34)),
(5, None)
),
_BlockItem(
"Spruce Wood Plank",
Vector((0.46, 0.34, 0.20)),
(5, 1)
),
_BlockItem(
"Birch Wood Plank",
Vector((0.79, 0.73, 0.49)),
(5, 2)
),
_BlockItem(
"Jungle Wood Plank",
Vector((0.64, 0.46, 0.31)),
(5, 3)
),
_BlockItem(
"Acacia Wood Plank",
Vector((0.59, 0.32, 0.17)),
(5, 4)
),
_BlockItem(
"Sand",
Vector((0.83, 0.78, 0.60)),
(12, None)
),
_BlockItem(
"Red Sand",
Vector((0.63, 0.32, 0.12)),
(12, 1)
),
_BlockItem(
"Sponge",
Vector((0.78, 0.78, 0.31)),
(19, None)
),
_BlockItem(
"Sandstone",
Vector((0.88, 0.85, 0.64)),
(24, None)
),
_BlockItem(
"Gold Block",
Vector((0.99, 0.99, 0.36)),
(41, None)
),
_BlockItem(
"Iron Block",
Vector((0.93, 0.93, 0.93)),
(42, None)
),
)
@staticmethod
def find_nearest_color_block(target_color):
min_dist = 10
min_index = 0
logging.debug("Target_color: {}".format(target_color.to_tuple()))
for i, block in enumerate(BlockDef.BLOCK_LIST):
dist = (block.color - target_color).length
logging.debug(" i = {}, dist = {}".format(i, dist))
if dist < min_dist:
min_index = i
min_dist = dist
logging.debug(" min_index is '{}'".format(min_index))
return BlockDef.BLOCK_LIST[min_index]
| python |
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, SubmitField
from wtforms.validators import DataRequired
class QuestionsForm(FlaskForm):
class Meta:
csrf = False
# Example of defining a field. A in depth description can be found.
# field_name = FieldType(label, description="some description", validators=[])
question1 = StringField("Question 1", description="This is the form description for question number 1", validators=[DataRequired()])
question2 = StringField("q2", validators=[DataRequired()])
question3 = StringField("q3", validators=[DataRequired()])
question4 = StringField("q4", validators=[DataRequired()])
question5 = StringField("q5", validators=[DataRequired()])
question6 = StringField("q6", validators=[DataRequired()])
question7 = StringField("q7", validators=[DataRequired()]) | python |
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from scipy.special import factorial2
class Hermite:
def __init__(self, num_pol = 5):
self.h = []
def h0(x): return torch.ones_like(x)
self.h.append(h0)
def h1(x): return x
self.h.append(h1)
def h2(x): return (x**2 - 1)/np.sqrt(np.math.factorial(2))
self.h.append(h2)
def h3(x): return (x**3 - 3*x)/np.sqrt(np.math.factorial(3))
self.h.append(h3)
def h4(x): return (x**4 - 6*(x**2) + 3)/np.sqrt(np.math.factorial(4))
self.h.append(h4)
def h5(x): return (x**5 - 10*x**3 + 15*x)/np.sqrt(np.math.factorial(5))
self.h.append(h5)
def h6(x): return (x**6 - 15*x**4 + 45*x**2 - 15)/np.sqrt(np.math.factorial(6))
self.h.append(h6)
def h7(x): return (x**7 - 21*x**5 + 105*x**3 - 105*x)/np.sqrt(np.math.factorial(7))
self.h.append(h7)
def h8(x): return (x**8 - 28*x**6 + 210*x**4 - 420*x**2 + 105)/np.sqrt(np.math.factorial(8))
self.h.append(h8)
def h9(x): return (x**9 - 36*x**7 + 378*x**5 - 1260*x**3 + 945*x)/np.sqrt(np.math.factorial(9))
self.h.append(h9)
def h10(x): return (x**10 - 45*x**8 + 630*x**6 - 3150*x**4 + 4725*x**2 - 945)/np.sqrt(np.math.factorial(10))
self.h.append(h10)
self.bn1 = nn.BatchNorm2d(in_planes)
def get_initializations(self, num_pol = 5, copy_fun = 'relu'):
k = []
if copy_fun == 'relu':
for n in range(num_pol):
if n == 0:
k.append(1.0/np.sqrt(2*np.pi))
#k.append(0.0)
#k.append(0.3821)
elif n == 1:
k.append(1.0/2)
#k.append(0.0)
#k.append(0.3775)
elif n == 2:
k.append(1.0/np.sqrt(4*np.pi))
#k.append(0.0)
#k.append(0.5535)
elif n > 2 and n % 2 == 0:
#c = 1.0 * np.math.factorial(np.math.factorial(n-3))**2 / np.sqrt(2*np.pi*np.math.factorial(n))
c = 1.0 * factorial2(n-3)**2 / np.sqrt(2*np.pi*np.math.factorial(n))
k.append(c)
#k.append(0.0)
#k.append(-0.4244)
elif n >= 2 and n % 2 != 0:
k.append(0.0)
#k.append(0.2126)
#k.append(0.0655)
return k
def get_vars(self, num_pol = 5, copy_fun = 'relu', seed = 1, dtype = torch.float32):
torch.manual_seed(seed)
if copy_fun == 'relu':
k = self.get_initializations(num_pol, copy_fun)
p = 0.00001*torch.randn(num_pol, requires_grad=True) + torch.tensor(k)
p_param = torch.nn.Parameter(p)
return p_param
def hermite(self, x, k, num_pol = 5):
evals = 0.0
for i in range(num_pol):
#print('this', i)
#print('a', k[i])
#print('b', self.h[i](x))
#print('a*b', k[i]*self.h[i](x))
#eval_c = k[i]*self.h[i](x)
#if np.isnan(eval_c):
# eval_c = 0.
evals += k[i]*self.h[i](x)
return evals
def hermitePreBN(self, x, k, num_pol = 5):
evals = []
for i in range(num_pol):
evals.append(k[i]*self.h[i](x))
#print('this', i)
#print('a', k[i])
#print('b', self.h[i](x))
#print('a*b', k[i]*self.h[i](x))
#eval_c = k[i]*self.h[i](x)
#if np.isnan(eval_c):
# eval_c = 0.
return evals
| python |
import numpy as np
import math
import cv2
class PSNR():
def __init__(self, range=1):
self.range = range
def __call__(self, img1, img2):
mse = np.mean((img1 - img2) ** 2)
return 20 * math.log10(self.range / math.sqrt(mse))
class SSIM():
def __init__(self, range=1):
self.range = range
def __call__(self, img1, img2):
if not img1.shape == img2.shape:
raise ValueError("Input images must have the same dimensions.")
if img1.ndim == 2: # Grey or Y-channel image
return self._ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(self._ssim(img1, img2))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return self._ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError("Wrong input image dimensions.")
def _ssim(self, img1, img2):
C1 = (0.01 * self.range) ** 2
C2 = (0.03 * self.range) ** 2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1 ** 2
mu2_sq = mu2 ** 2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1 ** 2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2 ** 2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / (
(mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
)
return ssim_map.mean()
| python |
from excursion.sampler import *
from excursion.models import ExcursionModel, SKLearnGP
from excursion.acquisition import *
from excursion.excursion import ExcursionProblem, ExcursionResult
# # move this into the excursion result, unless we add scikit learn implementation # #
def build_result(details: ExcursionProblem, acquisition, **kwargs):
if kwargs['device'] == 'skcpu':
X_pointsgrid = details.X_pointsgrid
true_y = details.functions[0](X_pointsgrid)
else:
raise NotImplementedError("Only supports device 'SKCPU'")
acquisition = acquisition # What if they passed in their own acq, then there is no string here.
return ExcursionResult(ndim=details.ndim, thresholds=details.thresholds, true_y=true_y,
invalid_region=details.invalid_region, X_pointsgrid=details.X_pointsgrid,
X_meshgrid=details.X_meshgrid, rangedef=details.rangedef)
def build_sampler(generator: str or SampleGenerator, **kwargs):
"""Build a default random sample generator.
For the special generator called "random" the return value is None.
Parameters
----------
generator : "random", "latin_sample", "latin_hypercube" or SampleGenerator instance"
Should inherit from `skopt.sampler.SampleGenerator`.
kwargs : dict
Extra parameters provided to the generator at init time.
"""
if generator is None:
generator = "random"
elif isinstance(generator, str):
generator = generator.lower()
allowed_generator = ["random"]
if generator not in allowed_generator:
raise ValueError("Valid strings for the generator parameter "
" are: 'latin', 'latin_hypercube', or 'random' not "
"%s." % generator)
elif not isinstance(generator, SampleGenerator):
raise ValueError("generator has to be a SampleGenerator or str."
"Got %s" % (str(type(generator))))
if isinstance(generator, str):
if generator == "random":
generator = RandomChoice()
generator.set_params(**kwargs)
return generator
def build_acquisition_func(acq_function: str or AcquisitionFunction, **kwargs):
"""Build an acquisition function.
For the special acq_function called "random" the return value is None.
Parameters
----------
function : "MES", "PES", or AcquisitionFunction instance"
Should inherit from `skopt.sampler.SampleGenerator`.
kwargs : dict
Extra parameters provided to the acq_function at init time.
"""
if acq_function is None:
acq_function = "PES"
elif isinstance(acq_function, str):
acq_function = acq_function.lower()
allowed_acq_funcs = ["pes"]
if acq_function not in allowed_acq_funcs:
raise ValueError("Valid strings for the acq_function parameter "
" are: %s, not %s." % (",".join(allowed_acq_funcs), acq_function))
elif not isinstance(acq_function, AcquisitionFunction):
raise TypeError("acq_function has to be an AcquisitionFunction. Got %s" % (str(type(acq_function))))
if isinstance(acq_function, str):
if acq_function == "pes":
acq_function = SKPES()
acq_function.set_params(**kwargs)
return acq_function
def build_model(model: str or ExcursionModel, rangedef, **kwargs):
"""
Build an acquisition function.
For the special acq_function called "random" the return value is None.
Parameters
----------
model : "GPyTorchGP", "GridGP", or ExcursionModel instance"
Should inherit from `excursion.models.ExcursionModel`.
kwargs : dict
Extra parameters provided to the acq_function at init time.
"""
if model is None:
model = "sklearngp"
elif isinstance(model, str):
model = model.lower()
allowed_models = ["sklearngp"]
if model not in allowed_models:
raise ValueError("Valid strings for the model parameter are: 'SKLearnGP' not %s." % model)
elif not isinstance(model, ExcursionModel):
raise TypeError("model has to be an ExcursionModel or str. Got %s" % (str(type(model))))
if isinstance(model, str):
if model == "sklearngp":
model = SKLearnGP(ndim=len(rangedef))
model.set_params(**kwargs)
return model
| python |
def settings ():
# Input manually all extensions and copy settings
extensions = []
key = "Y"
while (key != "N"):
extension = str(input("Enter a extension to search and organize: ")).strip().replace(".", "").lower()
extensions.append(extension)
key = str(input("Continue? Y/N: ")).strip().upper()
answer = str(input("Would you like to copy all files? Yes/No: "))
available_copy = True if answer.strip().lower() == "yes" else False
answer = str(input("Would you like to search recursively? Yes/No: "))
recursiveSearch = True if answer.strip().lower() == "yes" else False
return extensions, available_copy, recursiveSearch
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resources/treeDialog.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_TreeDialog(object):
def setupUi(self, TreeDialog):
TreeDialog.setObjectName("TreeDialog")
TreeDialog.resize(912, 804)
self.gridLayout = QtWidgets.QGridLayout(TreeDialog)
self.gridLayout.setObjectName("gridLayout")
self.groups = QtWidgets.QComboBox(TreeDialog)
self.groups.setObjectName("groups")
self.gridLayout.addWidget(self.groups, 0, 0, 1, 1)
self.showButton = QtWidgets.QPushButton(TreeDialog)
self.showButton.setObjectName("showButton")
self.gridLayout.addWidget(self.showButton, 0, 1, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(TreeDialog)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout.addWidget(self.lineEdit, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(TreeDialog)
self.label.setText("")
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 0, 3, 1, 1)
self.treeView = QtWidgets.QTreeView(TreeDialog)
self.treeView.setObjectName("treeView")
self.gridLayout.addWidget(self.treeView, 1, 0, 1, 3)
self.tableWidget = QtWidgets.QTableWidget(TreeDialog)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(0)
self.tableWidget.setRowCount(0)
self.gridLayout.addWidget(self.tableWidget, 1, 3, 1, 3)
self.buttonBox = QtWidgets.QDialogButtonBox(TreeDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 6)
self.checkBox = QtWidgets.QCheckBox(TreeDialog)
self.checkBox.setObjectName("checkBox")
self.gridLayout.addWidget(self.checkBox, 0, 5, 1, 1)
self.retranslateUi(TreeDialog)
self.buttonBox.accepted.connect(TreeDialog.accept)
self.buttonBox.rejected.connect(TreeDialog.reject)
QtCore.QMetaObject.connectSlotsByName(TreeDialog)
def retranslateUi(self, TreeDialog):
_translate = QtCore.QCoreApplication.translate
TreeDialog.setWindowTitle(_translate("TreeDialog", "Dialog"))
self.showButton.setText(_translate("TreeDialog", "Show"))
self.checkBox.setText(_translate("TreeDialog", "Select all"))
| python |
# Crie um programa que leia algo e mostre seu tipo pimitivo e todas as informações possíveis sobre ele.
print('=-'*7, 'DESAFIO 4', '=-'*7)
n = input('Digite algo: ')
print('O tipo primitivo desse valor é {}.'.format(type(n))) # Ponto de melhoria!
print('Só tem espaços? {}'.format(n.isspace()))
print('É um número? {}'.format(n.isnumeric()))
print('É alfanumérico? {}'.format(n.isalnum()))
print('É alfabético? {}'.format(n.isalpha()))
print('Está em minúsculas? {}'.format(n.islower()))
print('Está em maiúsculas? {}'.format(n.isupper()))
print('Está capitalizada? {}'.format(n.istitle()))
| python |
import sys
import math
import json
class Point:
def __init__(self, x, y, z, index):
self.x = x
self.y = y
self.z = z
self.index = index
def getDist(a, b):
return math.sqrt((a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y) + (a.z - b.z)*(a.z - b.z))
for arg in sys.argv:
filename = arg
input = open('../problems/input/tsp.txt', 'r')
pts = []
for line in input:
l = line.split()
index = int(l[0])
x = int(l[1])
y = int(l[2])
z = int(l[3])
pts.append(Point(x, y, z, index))
nums = []
with open(filename) as fileIn:
for line in fileIn:
for w in line.split(' '):
if len(w) > 0:
try:
nums.append(int(w))
except ValueError:
print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"}))
sys.exit(-1)
for a in nums:
if a > 500 or a < 1:
print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"}))
sys.exit(-1)
beenTo = []
for a in range(0, 500):
beenTo.append(False)
dist = 0.0
for a in range(1, len(nums)):
if beenTo[nums[a] - 1]:
print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"}))
sys.exit(-1)
beenTo[nums[a] - 1] = True
b = a - 1
dist += getDist(pts[nums[b] - 1], pts[nums[a] - 1])
if beenTo[nums[0] - 1]:
print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"}))
sys.exit(-1)
beenTo[nums[0] - 1] = True
dist += getDist(pts[nums[0] - 1], pts[nums[-1] - 1])
for a in beenTo:
if not(a):
print(json.dumps({"isError": True, "message": "There was a problem with your submission. Fix your file and try again"}))
sys.exit(-1)
print(json.dumps({"isError": False, "score": dist, "message": "You got a score of " + str(dist) + "!"}))
| python |
from scipy import interp
import numpy as np
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from itertools import cycle
import matplotlib.pyplot as plt
def plot_roc_pr(
y_pred : np.ndarray,
y_test : np.ndarray
) -> None:
"""
Plots the ROC + Precision recall curves for
"""
n_classes = y_test.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
# First aggregate all false positive rates
lw =2
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(15,6))
ax1.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='gold', linestyle=':', linewidth=4)
ax1.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='deeppink', linestyle=':', linewidth=4)
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
for i, color in zip(range(n_classes), colors):
ax1.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
ax1.plot([0, 1], [0, 1], 'k--', lw=lw)
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.05])
ax1.set_xlabel('False Positive Rate')
ax1.set_ylabel('True Positive Rate')
ax1.set_title('Extension of Receiver operating characteristic to multi-class')
ax1.legend(loc="lower left")
# ax1.show()
# setup plot details
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_pred[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_pred[:, i])
# A "micro-average": quantifying score on all classes jointly
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_pred.ravel())
average_precision["micro"] = average_precision_score(y_test, y_pred,
average="micro")
colors = cycle(['navy', 'turquoise', 'darkorange', 'cornflowerblue', 'teal'])
# plt.figure(figsize=(7, 8))
f_scores = np.linspace(0.2, 0.8, num=4)
lines = []
labels = []
for f_score in f_scores:
x = np.linspace(0.01, 1)
y = f_score * x / (2 * x - f_score)
l, = ax2.plot(recall["micro"], precision["micro"], color='gold', lw=2)
lines.append(l)
labels.append('micro-average Precision-recall (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i, color in zip(range(n_classes), colors):
l, = ax2.plot(recall[i], precision[i], color=color, lw=2)
lines.append(l)
labels.append('Precision-recall for class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
# fig = plt.gcf()
# fig.subplots_adjust(bottom=0.25)
ax2.set_xlim([0.0, 1.0])
ax2.set_ylim([0.0, 1.05])
ax2.set_xlabel('Recall')
ax2.set_ylabel('Precision')
ax2.set_title('Extension of Precision-Recall curve to multi-class')
ax2.legend(lines, labels) #, loc=(0, -.38), prop=dict(size=14))
| python |
class withdelta(object):
"""
Wraps any object into the `value` property, and adds a `delta` floating point property
that can be used to store extra information, such as percentage of improvement over a
over a different values.
All the attributes are forwarded to `value`, except for `value` and `delta`. This means
that you can call any weird method on withdelta, and this will reflect the implementation
of the current object stored in `value`.
Use val_of to quickly unwrap any object from its withdelta wrapper.
"""
value = None
delta = None
def __getattr__(self, name):
if name in ['value', 'delta']:
return super(withdelta, self).__getattr__(name)
else:
return getattr(self.value, name)
def __setattr__(self, name, value):
if name in ['value', 'delta']:
super(withdelta, self).__setattr__(name, value)
else:
setattr(self.value, name, value)
def __repr__(self):
return 'withdelta(' + str(self.value) + ', ' + str(self.delta) + ')'
def __init__(self, obj, delta = float('NaN')):
self.value = obj
self.delta = delta
def val_of(obj):
"""
Returns `obj.value` if obj is a withdelta instance, otherwise just obj.
"""
return obj.value if isinstance(obj, withdelta) else obj
| python |
class Dummy(object):
def purge(self, path):
pass
| python |
"""
cluster_graph.py
ClusterGraph are a class for tracking all possible smirks decorators in a group
(or cluster) of molecular fragments. Moving forward these will be used to
find the minimum number of smirks decorators that are required to have a
set of smirks patterns that maintain a given clustering of fragments.
"""
import networkx as nx
from functools import total_ordering
from chemper.graphs.single_graph import SingleGraph
from chemper.graphs.environment import ChemicalEnvironment as CE
from chemper.mol_toolkits import mol_toolkit
@total_ordering
class ClusterGraph(SingleGraph):
"""
ChemPerGraphs are a graph based class for storing atom and bond information.
They use the chemper.mol_toolkits Atoms, Bonds, and Mols
"""
@total_ordering
class AtomStorage:
"""
AtomStorage tracks information about an atom
"""
def __init__(self, atoms=None, label=None):
"""
Parameters
----------
atoms : ChemPer Atom or list of ChemPer Atoms
this is one or more atoms whose information should be stored
label : int
SMIRKS index (:n) for writing SMIRKS
if the value is less than zero it is used for storage purposes
only as SMIRKS can only be written with positive integer indices
"""
self.decorators = set()
if atoms is not None:
# check if this is a single atom
if 'Atom' in str(type(atoms)):
atoms = [atoms]
# otherwise it should be iterable
for atom in atoms:
self.decorators.add(self.make_atom_decorators(atom))
self.label = label
def __lt__(self, other):
"""
Overrides the default implementation
This method was primarily written for making SMIRKS patterns predictable.
If atoms are sortable, then the SMIRKS patterns are always the same making
tests easier to write. However, the specific sorting was created to also make SMIRKS
output as human readable as possible, that is to at least make it easier for a
human to see how the indexed atoms are related to each other.
It is typically easier for humans to read SMILES/SMARTS/SMIRKS with less branching (indicated with ()).
For example in:
[C:1]([H])([H])~[N:2]([C])~[O:3]
it is easier to see that the atoms C~N~O are connected in a "line" instead of:
[C:1]([N:2]([O:3])[C])([H])[H]
which is equivalent, but with all the () it is hard for a human to read the branching
Parameters
----------
other : AtomStorage
Returns
-------
is_less_than : boolean
self is less than other
"""
# if either smirks index is None, then you can't directly compare
# make a temporary index that is negative if it was None
self_index = self.label if self.label is not None else -1000
other_index = other.label if other.label is not None else -1000
# if either index is greater than 0, the one that is largest should go at the end of the list
if self_index > 0 or other_index > 0:
return self_index < other_index
# Both SMIRKS indices are not positive or None so compare the SMIRKS patterns instead
return self.as_smirks() < other.as_smirks()
def __eq__(self, other): return self.as_smirks() == other.as_smirks() and self.label == other.label
def __hash__(self): return id(self)
def __str__(self): return self.as_smirks()
def make_atom_decorators(self, atom):
"""
extract information from a ChemPer Atom that would be useful in a smirks
parameters
----------
atom : ChemPer atom object
returns
-------
decorators : tuple of str
tuple of all possible decorators for this atom
"""
aromatic = 'a' if atom.is_aromatic() else 'A'
charge = atom.formal_charge()
if charge >= 0:
charge = '+%i' % charge
else:
charge = '%i' % charge
min_ring_size = atom.min_ring_size()
if min_ring_size == 0:
ring = '!r'
else:
ring = 'r%i' % min_ring_size
return (
'#%i' % atom.atomic_number(),
'H%i' % atom.hydrogen_count(),
'X%i' % atom.connectivity(),
'x%i' % atom.ring_connectivity(),
ring,
charge,
aromatic,
)
def as_smirks(self, compress=False):
"""
Parameters
----------
compress : boolean
should decorators common to all sets be combined
for example '#6X4,#7X3;+0!r...'
Returns
-------
smirks : str
how this atom would be represented in a SMIRKS string
with the minimal combination of SMIRKS decorators
"""
if len(self.decorators) == 0:
if self.label is None or self.label <= 0:
return '[*]'
return '[*:%i]' % self.label
if compress and len(self.decorators) > 1:
base_smirks = self._compress_smirks()
else:
base_smirks = ','.join(sorted([''.join(l) for l in self.decorators]))
if self.label is None or self.label <= 0:
return '[%s]' % base_smirks
return '[%s:%i]' % (base_smirks, self.label)
def _sort_decs(self, dec_set, wild=True):
"""
Parameters
----------
dec_set : list like
single set of atom decorators
wild : boolean
insert * for decorator lists with no #n decorator
Returns
-------
sorted_dec_set : list
same set of decorators sorted with atomic number or * first
"""
temp_dec_set = list(dec_set)
atom_num = [i for i in temp_dec_set if '#' in i]
if len(atom_num) == 0 and wild:
atom_num = ["*"]
temp_dec_set = set(temp_dec_set) - set(atom_num)
aro = [i for i in temp_dec_set if 'a' in i.lower()]
temp_dec_set = set(temp_dec_set) - set(aro)
return atom_num + sorted(list(temp_dec_set)) + aro
def _compress_smirks(self):
"""
Returns
-------
smirks : str
This SMIRKS is compressed with all common decorators and'd to
the end of the pattern
"""
set_decs = [set(d) for d in self.decorators]
ands = set_decs[0]
for d_set in set_decs:
ands = ands & d_set
# check for atomic number in the "ands"
atomic = [a for a in ands if '#' in a]
if len(atomic) == 1:
# remove from and
ands.remove(atomic[0])
# put in all sets
for s in set_decs:
s.add(atomic[0])
or_sets = [self._sort_decs(d.difference(ands)) for d in set_decs]
ors = [''.join(o) for o in or_sets]
# add commas between ors
base = ','.join(sorted(ors))
# add and decorators
if len(ands) > 0:
base += ';'+ ';'.join(self._sort_decs(ands, wild=False))
return base
def add_atom(self, atom):
"""
Expand current AtomStorage by adding information about
a new ChemPer Atom
Parameters
----------
atom : ChemPer Atom
"""
self.decorators.add(self.make_atom_decorators(atom))
def compare_atom(self, atom):
"""
Compares decorators in this AtomStorage with the provided
ChemPer atom. The decorators are compared separately and
the highest score is returned. For example,
if this storage had two sets of decorators
- #7H1X3x0!r+0A
- #6H1X4x0!r+0A
and the input atom would have the decorators:
- #6H1X3x2!r+0a
The score is calculated by finding the number of decorators
in common which would be
- #7H1X3x0!r+0A and #6H1X3x2r6+0a
have 3 decorators in common (H1,X3,+0)
- #6H1X4x0!r+0A and #6H1X3x2r6+0a
also have 3 decorators in common (#6, H1, +0)
However, we weight atoms with the same atomic number as more
similar by dividing the score by 10 if the atomic numbers do
not agree. Therefore the final scores will be:
- 0.3 for #7H1X3x0!r+0A
- 3 for #6H1X4x0!r+0A
The highest score for any set of decorators is returned so
3 is the returned score in this example.
Parameters
----------
atom : ChemPer Atom
Returns
-------
score : float
A score describing how similar the input atom is to any set of
decorators currently in this storage, based on its SMIRKS decorators.
This score ranges from 0 to 7. 7 comes from the number of decorators
on any atom, if this atom matches perfectly with one of the
current decorator sets then 7 decorators agree.However, if the atomic
number doesn't agree, then that set of decorators is considered
less ideal, thus if the atomic numbers don't agree, then the score
is given by the number other decorators divided by 10.
If the current storage is empty, then the score is given as 7
since any atom matches a wildcard atom.
"""
# If decorators is empty (no known atom information, return 7 (current max)
if len(self.decorators) == 0:
return 7
score = 0
decs = self.make_atom_decorators(atom)
for ref in self.decorators:
# get atomic number for this set of decorators
current = len(set(ref) & set(decs))
# if atomic numbers don't agree, get the number of common decorators / 10
# if there are no matching atomic numbers, priority should still be given
# when the current atom matches stored decorators most closely
if ref[0] != decs[0]:
current = current / 10.0
if current > score:
score = current
return score
@total_ordering
class BondStorage:
"""
BondStorage tracks information about a bond
"""
def __init__(self, bonds=None, label=None):
"""
Parameters
----------
bonds : list of ChemPer Bonds
this is one or more bonds whose information should be stored
label : a label for the object, it can be anything
unlike atoms, bonds in smirks don't have labels
so this is only used for labeling the object if wanted
"""
self.order = set()
self.ring = set()
self.order_dict = {1:'-', 1.5:':', 2:'=', 3:'#'}
if bonds is not None:
if 'Bond' in str(type(bonds)):
bonds = [bonds]
for bond in bonds:
self.order.add(bond.get_order())
self.ring.add(bond.is_ring())
self.label = label
def __str__(self): return self.as_smirks()
def __lt__(self, other):
if self.as_smirks() == other.as_smirks():
return self.label < other.label
return self.as_smirks() < other.as_smirks()
def __eq__(self, other):
return self.label == other.label and self.as_smirks() == other.as__smirks()
def __hash__(self): return id(self)
def as_smirks(self):
"""
Returns
-------
smirks : str
how this bond would be represented in a SMIRKS string
using only the required number of
"""
if len(self.order) == 0:
order = '~'
else:
order = ','.join([self.order_dict.get(o, '~') for o in sorted(list(self.order))])
# the ring set has booleans, if the length of the set is 1 then only ring (@) or non-ring (!@)
# bonds haven been added to this storage and we AND that decorator to the end of the bond
if len(self.ring) == 1:
if list(self.ring)[0]:
return order+';@'
else:
return order+';!@'
return order
def add_bond(self, bond):
"""
Expand current BondStorage by adding information about
a new ChemPer Bond
Parameters
----------
bond : ChemPer Bond
"""
self.order.add(bond.get_order())
self.ring.add(bond.is_ring())
def compare_bond(self, bond):
"""
Parameters
----------
bond : ChemPer Bond
bond you want to compare to the current storage
Returns
-------
score : int (0,1,2)
A score describing how similar the input bond is to any set of decorators currently
in this storage, based on its SMIRKS decorators.
1 for the bond order +
1 base on if this is a ring bond
"""
score = 0
if bond.get_order() in self.order or len(self.order) == 0:
score += 1
# the ring set has booleans, if the length of the set is 1 then only ring or non-ring
# bonds haven been added to this storage. That is the only time the ring contributes to the score
if len(self.ring) == 1 and list(self.ring)[0] == bond.is_ring():
score += 1
return score
# Initiate ClusterGraph
def __init__(self, mols=None, smirks_atoms_lists=None, layers=0):
"""
Initialize a SingleGraph from a molecule and list of indexed atoms
For the example, imagine we wanted to get a SMIRKS that
would match the carbon-carbon bonds in ethane and propane.
The carbon atoms are have indices (0,1) in ethane and (0,1) and (1,2)
in propane. For this example, we will assume we also want to include
the atoms one bond away from the indexed atoms (1 layer away).
Parameters
----------
mols : list of molecules (optional)
default = None (makes an empty graph)
these can be ChemPer Mols or molecule objects from
any supported toolkit (currently OpenEye or RDKit)
smirks_atoms_lists : list of list of tuples (optional)
default = None (must be paired with mols=None)
There is a list of tuples for each molecule, where each tuple specifies
a molecular fragment using the atoms' indices.
In the ethane and propane example, the `smirks_atoms_lists` would be
[ [ (0,1) ], [ (0,1), (1,2) ] ]
with one carbon-carbon bond in ethane and two carbon-carbon bonds in propane
layers : int (optional)
default = 0
layers specifies how many bonds away from the indexed atoms should be included in the
the SMIRKS patterns.
Instead of an int, the string 'all' would lead to all atoms in the molecules
being included in the SMIRKS (not recommended)
"""
SingleGraph.__init__(self)
self.mols = list()
self.smirks_atoms_lists = list()
self.layers = layers
self._symmetry_funct = self._no_symmetry
if mols is not None:
temp_mols = [mol_toolkit.Mol(m) for m in mols]
if len(temp_mols) != len(smirks_atoms_lists):
raise Exception('Number of molecules and smirks dictionaries should be equal')
for idx, mol in enumerate(temp_mols):
self.add_mol(mol, smirks_atoms_lists[idx])
def as_smirks(self, compress=False):
"""
Parameters
----------
compress : boolean
returns the shorter version of atom SMIRKS patterns
that is atoms have decorators "anded" to the end rather than listed
in each set that are OR'd together.
For example "[#6AH2X3x0!r+0,#6AH1X3x0!r+0:1]-;!@[#1AH0X1x0!r+0]"
compresses to: "[#6H2,#6H1;AX3x0!r+0:1]-;!@[#1AH0X1x0!r+0]"
Returns
-------
SMIRKS : str
a SMIRKS string matching the exact atom and bond information stored
"""
# The atom compression is different, but otherwise this is the
# same function as the parent class (SingleGraph)
return SingleGraph.as_smirks(self, compress)
def get_symmetry_funct(self, sym_label):
"""
Determine the symmetry function that should be used
when adding atoms to this graph.
For example, imagine a user is trying to make a
SMIRKS for all of the C-H bonds in methane. In most
toolkits the index for the carbon is 0 and the hydrogens are 1,2,3,4.
The final SMIRKS should have the form [#6AH4X4x0!r+0:1]-;!@[#1AH0X1x0!r+0]
no matter what order the atoms are input into ClusterGraph.
So if the user provides (0,1), (0,2), (3,0), (4,0) ClusterGraph
should figure out that the carbons in (3,0) and (4,0) should be in
the atom index :1 place like they were in the first set of atoms.
Bond atoms in (1,2) or (2,1) are symmetric, for angles its (1,2,3) or (3,2,1)
for proper torsions (1,2,3,4) or (4,3,2,1) and for
improper torsions (1,2,3,4), (3,2,1,4), (4,2,1,3).
For any other fragment type the atoms will be added to the graph in
the order they are provided since the symmetry function is unknown.
# TODO: In theory you could generalize this for generic linear fragments
# where those with an odd number of atoms behave like angles and an
# even number behave like proper torsions, however I think that is
# going to be outside the scope of ChemPer for the foreseeable future.
Parameters
----------
sym_label : str or None
type of symmetry, options which will change the way symmetry is
handled in the graph are "bond", "angle", "ProperTorsion", and "ImproperTorsion"
Returns
-------
symmetry_funct : function
returns the function that should be used to handle the appropriate symmetry
"""
if sym_label is None:
return self._no_symmetry
if sym_label.lower() == 'bond':
return self._bond_symmetry
if sym_label.lower() == 'angle':
return self._angle_symmetry
if sym_label.lower() == 'propertorsion':
return self._proper_torsion_symmetry
if sym_label.lower() == 'impropertorsion':
return self._improper_torsion_symmetry
return self._no_symmetry
def add_mol(self, input_mol, smirks_atoms_list):
"""
Expand the information in this graph by adding a new molecule
Parameters
----------
input_mol : ChemPer Mol
smirks_atoms_list : list of tuples
This is a list of tuples with atom indices [ (indices), ... ]
"""
mol = mol_toolkit.Mol(input_mol)
if len(smirks_atoms_list) == 0:
return
if len(self.mols) == 0:
self._add_first_smirks_atoms(mol, smirks_atoms_list[0])
self._symmetry_funct = self.get_symmetry_funct(CE(self.as_smirks()).get_type())
self._add_mol(mol, smirks_atoms_list[1:])
else:
self._add_mol(mol, smirks_atoms_list)
self.mols.append(mol)
self.smirks_atoms_lists.append(smirks_atoms_list)
def _add_first_smirks_atoms(self, mol, smirks_atoms):
"""
private function for adding the first molecule to an empty ClusterGraph
add_mol calls this if the graph is empty
Parameters
----------
mol : ChemPer Mol
smirks_atoms : tuple
tuple of atom indices for the first atoms to add to the graph. i.e. (0, 1)
"""
atom_dict = dict()
for key, atom_index in enumerate(smirks_atoms, 1):
atom_dict[atom_index] = key
atom1 = mol.get_atom_by_index(atom_index)
new_atom_storage = self.AtomStorage([atom1], key)
self._graph.add_node(new_atom_storage)
self.atom_by_label[key] = new_atom_storage
# Check for bonded atoms already in the graph
for neighbor_key in range(len(smirks_atoms), 0, -1):
if neighbor_key not in self.atom_by_label:
continue
# check if atoms are already connected on the graph
neighbor_storage = self.atom_by_label[neighbor_key]
if nx.has_path(self._graph, new_atom_storage, neighbor_storage):
continue
# check if atoms are connected in the molecule
atom2 = mol.get_atom_by_index(smirks_atoms[neighbor_key-1])
bond = mol.get_bond_by_atoms(atom1, atom2)
if bond is not None: # Atoms are connected add edge
bond_smirks = tuple(sorted([neighbor_key, key]))
bond_storage = self.BondStorage([bond], bond_smirks)
self.bond_by_label[bond_smirks] = bond_storage
self._graph.add_edge(new_atom_storage,
neighbor_storage,
bond=bond_storage)
# for each indexed atoms add unindexed atoms for the number of specified layers
for atom_label, atom_index in enumerate(smirks_atoms, 1):
atom = mol.get_atom_by_index(atom_index)
storage = self.atom_by_label[atom_label]
self._add_layers(mol, atom, storage, self.layers, atom_dict, is_first=True)
def _add_layers(self, mol, atom, storage, layers, idx_dict, is_first=False):
"""
Parameters
----------
mol : ChemPer Mol
molecule containing provided atom
atom : ChemPer Atom
storage: AtomStorage
corresponding to the ChemPer Atom provided
layers : int or 'all'
number of layers left to add (or all)
idx_dict : dict
form {atom index: label} for this smirks_list in this molecule
"""
# if layers is 0 there are no more atoms to add so end the recursion
if layers == 0:
return
# find atom neighbors that are not already included in SMIRKS indexed atoms
atom_neighbors = [(a, mol.get_bond_by_atoms(a,atom)) for a in atom.get_neighbors() \
if a.get_index() not in idx_dict]
# get the smirks indices already added to the storage
# This includes all previous layers since the idx_dict is updated as you go
storage_labels = [e for k,e in idx_dict.items()]
# similar to atoms find neighbors already in the graph that haven't already been used
storage_neighbors = [(s, self.get_connecting_bond(s, storage)) for s in self.get_neighbors(storage) \
if s.label not in storage_labels]
new_pairs = list()
# if this is the first set of atoms added, just make a new
# storage for all neighboring atoms
if is_first:
min_smirks = storage.label * 10
if min_smirks > 0:
min_smirks = min_smirks * -1
for a, b in atom_neighbors:
new_bond_smirks = tuple(sorted([storage.label, min_smirks]))
adding_new_storage = self.add_atom(a,b,storage,
min_smirks, new_bond_smirks)
idx_dict[a.get_index()] = min_smirks
self.atom_by_label[min_smirks] = adding_new_storage
min_smirks -= 1
new_pairs.append((a, adding_new_storage))
else: # this isn't the first set of atoms so you need to
# pair up the atoms with their storage
pairs = self.find_pairs(atom_neighbors, storage_neighbors)
for new_atom, new_bond, new_storage_atom, new_storage_bond in pairs:
# if no storage is paired to this atom skip it
if new_storage_atom is None:
continue
# if there is no atom paired to a storage remove that branch
if new_atom is None:
self.remove_atom(new_storage_atom)
continue
# add atom and bond information to the storage
new_storage_atom.add_atom(new_atom)
new_storage_bond.add_bond(new_bond)
new_pairs.append((new_atom, new_storage_atom))
idx_dict[new_atom.get_index()] = new_storage_atom.label
# Repeat for the extra layers
if layers == 'all':
new_layers = 'all'
else:
new_layers = layers - 1
if new_layers == 0:
return
for new_atom, new_storage in new_pairs:
self._add_layers(mol, new_atom, new_storage, new_layers, idx_dict, is_first)
def find_pairs(self, atoms_and_bonds, storages):
"""
Find pairs is used to determine which current AtomStorage from storages
atoms should be paired with.
This function takes advantage of the maximum scoring function in networkx
to find the pairing with the highest "score".
Scores are determined using functions in the atom and bond storage objects
that compare those storages to the new atom or bond.
If there are less atoms than storages then the atoms with the lowest pair are
assigned a None pairing.
Parameters
----------
atoms_and_bonds : list of tuples in form (ChemPer Atom, ChemPer Bond, ...)
storages: list of tuples in form (AtomStorage, BondStorage, ...)
Tuples can be of any length as long as they are the same, so for example, in
a bond you might only care about the outer atoms for comparison so you would compare
(atom1,) and (atom2,) with (atom_storage1,) and (atom_storage2,)
However, in a torsion, you might want the atoms and bonds for each outer bond
so in that case you would compare
(atom1, bond1, atom2) and (atom4, bond3, atom3)
with the corresponding storage objects.
Returns
-------
pairs : list of lists
pairs of atoms and storage objects that are most similar,
these lists always come in the form (all atom/bonds, all storage objects)
for the bond example above you might get
[ [atom1, storage1], [atom2, storage2] ]
for the torsion example you might get
[ [atom4, bond4, atom3, atom_storage1, bond_storage1, atom_storage2],
[atom1, bond1, atom2, atom_storage4, bond_storage3, atom_storage3]
"""
# store paired stets of atoms/bonds and corresponding storages
pairs = list()
# check for odd cases
combo = atoms_and_bonds + storages
# 1. both lists are empty
if len(combo) == 0:
return pairs
nones = [None] * len(combo[0])
# 2. no atom/bond storage
if len(atoms_and_bonds) == 0:
for storage_set in storages:
pairs.append(nones + list(storage_set))
return pairs
# 3. no storages
if len(storages) == 0:
for atom_set in atoms_and_bonds:
pairs.append(list(atom_set) + nones)
return pairs
g = nx.Graph()
atom_dict = dict()
storage_dict = dict()
# create a bipartite graph with atoms/bonds on one side
for idx, atom_set in enumerate(atoms_and_bonds):
g.add_node(idx+1, bipartite=0)
atom_dict[idx+1] = atom_set
# and atom/bond storage objects on the other
for idx, storage_set in enumerate(storages):
g.add_node((idx*-1)-1, bipartite=1)
storage_dict[(idx*-1)-1] = storage_set
# Fill in the weight on each edge of the graph using the compare_atom/bond functions
for a_idx, atom_set in atom_dict.items():
for s_idx, storage_set in storage_dict.items():
# sum up score for every entry in the atom and storage set
score = 0
for sa, a in zip(storage_set, atom_set):
if isinstance(sa, self.BondStorage):
score += sa.compare_bond(a)
else:
score += sa.compare_atom(a)
# score can't be zero so save score+1
g.add_edge(a_idx,s_idx,weight=score+1)
# calculate maximum matching, that is the pairing of atoms/bonds to
# storage objects that leads the the highest overall score
matching = nx.algorithms.max_weight_matching(g,maxcardinality=False)
# track the atoms assigned a paired storage object
pair_set = set()
# store all pairs
for idx_1, idx_2 in matching:
pair_set.add(idx_1)
pair_set.add(idx_2)
if idx_1 in atom_dict:
atom_set = atom_dict[idx_1]
storage_set = storage_dict[idx_2]
else:
atom_set = atom_dict[idx_2]
storage_set = storage_dict[idx_1]
pairs.append(list(atom_set) + list(storage_set))
# check for missing atom storages
for a_idx, atom_set in atom_dict.items():
if a_idx not in pair_set:
pairs.append(list(atom_set) + nones)
# check for missing atoms
for s_idx, storage_set in storage_dict.items():
if s_idx not in pair_set:
pairs.append(nones + list(storage_set))
return pairs
def _add_mol(self, mol, smirks_atoms_list):
"""
private function for adding a new molecule
This is used by add_mol if the graph is not empty, allowing the user to
not have to track if the graph already has information before adding molecules
Parameters
----------
mol : any Mol
smirks_atoms_list : list of dicts
This is a list of dictionaries of the form [{smirks index: atom index}]
each atom (by index) in the dictionary will be added the relevant
AtomStorage by smirks index
"""
for smirks_atoms in smirks_atoms_list:
atom_dict = dict()
sorted_smirks_atoms = self._symmetry_funct(mol, smirks_atoms)
for key, atom_index in enumerate(sorted_smirks_atoms, 1):
atom_dict[atom_index] = key
atom1 = mol.get_atom_by_index(atom_index)
self.atom_by_label[key].add_atom(atom1)
for neighbor_key, neighbor_index in enumerate(sorted_smirks_atoms, 1):
# check for connecting bond
atom2 = mol.get_atom_by_index(neighbor_index)
bond = mol.get_bond_by_atoms(atom1, atom2)
if bond is not None and (neighbor_key, key) in self.bond_by_label:
bond_smirks = tuple(sorted([neighbor_key, key]))
self.bond_by_label[bond_smirks].add_bond(bond)
for atom_label, atom_index in enumerate(sorted_smirks_atoms, 1):
atom = mol.get_atom_by_index(atom_index)
storage = self.atom_by_label[atom_label]
self._add_layers(mol, atom, storage, self.layers, atom_dict)
def _no_symmetry(self, mol, smirks_atoms):
"""
No change is made to the atom order for this molecule
"""
return smirks_atoms
def _bond_symmetry(self, mol, smirks_atoms):
"""
Returns a tuple of two atom indices in the order that
leads to the atoms that match with previously stored atoms.
Parameters
-----------
mol : ChemPer Mol
smirks_atoms : two tuple
tuple of atom indices
Returns
--------
ordered_smirks_atoms : two tuple
tuple of atom indices as they should be added to the graph
"""
# pair atoms and bonds
atom1 = mol.get_atom_by_index(smirks_atoms[0])
atom2 = mol.get_atom_by_index(smirks_atoms[1])
# Find potential storages for those atoms and bonds
atoms_and_bonds = [(atom1,), (atom2,)]
storages = [
(self.atom_by_label[1],),
(self.atom_by_label[2],)
]
pairs = self.find_pairs(atoms_and_bonds, storages)
ordered_smirks_atoms = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[1].label)]
return tuple(ordered_smirks_atoms)
def _angle_symmetry(self, mol, smirks_atoms):
"""
Returns a tuple of three atom indices in the order that
leads to the atoms that match with previously stored atoms.
Parameters
-----------
mol : ChemPer Mol
smirks_atoms : three tuple
tuple of atom indices
Returns
--------
ordered_smirks_atoms : three tuple
tuple of atom indices as they should be added to the graph
"""
# get all three atoms
atom1 = mol.get_atom_by_index(smirks_atoms[0])
atom2 = mol.get_atom_by_index(smirks_atoms[1])
atom3 = mol.get_atom_by_index(smirks_atoms[2])
# get both bonds
bond1 = mol.get_bond_by_atoms(atom1, atom2)
bond2 = mol.get_bond_by_atoms(atom2, atom3)
if None in (bond1, bond2):
return smirks_atoms
# save atom and bond pairs that could be reordered
atoms_and_bonds = [(atom1, bond1), (atom3, bond2)]
# find current atom and bond storage
storages = [
(self.atom_by_label[1], self.bond_by_label[(1,2)]),
(self.atom_by_label[3], self.bond_by_label[(2,3)])
]
pairs = self.find_pairs(atoms_and_bonds, storages)
order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[2].label)]
return tuple((order[0], smirks_atoms[1], order[1]))
def _proper_torsion_symmetry(self, mol, smirks_atoms):
"""
Returns a tuple of four atom indices for a proper torsion
reordered to match with previously stored atoms.
Parameters
-----------
mol : ChemPer Mol
smirks_atoms : four tuple
tuple of atom indices
Returns
--------
ordered_smirks_atoms : four tuple
tuple of atom indices as they should be added to the graph
"""
# get all four atoms
atom1 = mol.get_atom_by_index(smirks_atoms[0])
atom2 = mol.get_atom_by_index(smirks_atoms[1])
atom3 = mol.get_atom_by_index(smirks_atoms[2])
atom4 = mol.get_atom_by_index(smirks_atoms[3])
# get two relevant bonds
bond1 = mol.get_bond_by_atoms(atom1, atom2)
bond3 = mol.get_bond_by_atoms(atom3, atom4)
if None in (bond1, bond3):
return smirks_atoms
# make pairs
atoms_and_bonds = [ (atom2, bond1, atom1), (atom3, bond3, atom4) ]
# get atom and bond storages
storages = [
(self.atom_by_label[2], self.bond_by_label[(1,2)], self.atom_by_label[1]),
(self.atom_by_label[3], self.bond_by_label[(3,4)], self.atom_by_label[4])
]
pairs = self.find_pairs(atoms_and_bonds, storages)
order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[3].label)]
if order[0] == smirks_atoms[1]:
return smirks_atoms
temp = list(smirks_atoms)
temp.reverse()
return tuple(temp)
def _improper_torsion_symmetry(self, mol, smirks_atoms):
"""
Returns a tuple of four atom indices for an improper torsion
reordered to match with previously stored atoms.
Parameters
-----------
mol : ChemPer Mol
smirks_atoms : four tuple
tuple of atom indices
Returns
--------
ordered_smirks_atoms : four tuple
tuple of atom indices as they should be added to the graph
"""
# get all four atoms
atom1 = mol.get_atom_by_index(smirks_atoms[0])
atom2 = mol.get_atom_by_index(smirks_atoms[1])
atom3 = mol.get_atom_by_index(smirks_atoms[2])
atom4 = mol.get_atom_by_index(smirks_atoms[3])
# get all three bonds
bond1 = mol.get_bond_by_atoms(atom1, atom2)
bond2 = mol.get_bond_by_atoms(atom2, atom3)
bond3 = mol.get_bond_by_atoms(atom2, atom4)
if None in (bond1, bond2, bond3):
return smirks_atoms
# make pairs of atoms and bonds to be reordered
atoms_and_bonds = [
(atom1, bond1), (atom3, bond2), (atom4, bond3)
]
# find current atom and bond storages
storages = [
(self.atom_by_label[1], self.bond_by_label[(1,2)]),
(self.atom_by_label[3], self.bond_by_label[(2,3)]),
(self.atom_by_label[4], self.bond_by_label[(2,4)])
]
pairs = self.find_pairs(atoms_and_bonds, storages)
order = [p[0].get_index() for p in sorted(pairs, key=lambda x: x[2].label)]
return tuple((order[0], smirks_atoms[1], order[1], order[2]))
| python |
from abc import ABC
from typing import Any
class IWord(ABC):
command: Any
class Word(IWord):
def __init__(self, command=None):
self.command = command
self.address = 0
def dump(self): return self.command.dump()
@property
def original(self): return self.command.original
def set_instance_params(self, **kwargs): self.command.set_instance_params(**kwargs)
def execute(self): return self.command.execute()
| python |
#!/usr/bin/env python3
import argparse
import os
def main(dir):
with open(os.path.join(dir, 'text'), 'w', encoding='utf-8') as out_f:
for line in open(os.path.join(dir, 'text.ort2'), encoding='utf-8'):
key, sent = line.strip().split(None, 1)
if len(sent) > 0 and sent[0] == "*":
sent = sent[1:]
sent = sent.replace("[sta]", " ").replace(" ", " ").replace(" ", " ")
sent = sent.replace("_", "")
print("{} {}".format(key, sent), file=out_f)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('dir')
parser.add_argument('lexicon')
args = parser.parse_args()
main(args.dir)
| python |
import json
import unittest
import urllib.request
from multiprocessing.dummy import Pool
from tests.gunicorn_utils import run_gunicorn
def run_code_in_snekbox(code: str) -> tuple[str, int]:
body = {"input": code}
json_data = json.dumps(body).encode("utf-8")
req = urllib.request.Request("http://localhost:8060/eval")
req.add_header("Content-Type", "application/json; charset=utf-8")
req.add_header("Content-Length", str(len(json_data)))
with urllib.request.urlopen(req, json_data, timeout=30) as response:
response_data = response.read().decode("utf-8")
return response_data, response.status
class IntegrationTests(unittest.TestCase):
def test_memory_limit_separate_per_process(self):
"""
Each NsJail process should have its own memory limit.
The memory used by one process should not contribute to the memory cap of other processes.
See https://github.com/python-discord/snekbox/issues/83
"""
with run_gunicorn():
code = "import time; ' ' * 33000000; time.sleep(0.1)"
processes = 3
args = [code] * processes
with Pool(processes) as p:
results = p.map(run_code_in_snekbox, args)
responses, statuses = zip(*results)
self.assertTrue(all(status == 200 for status in statuses))
self.assertTrue(all(json.loads(response)["returncode"] == 0 for response in responses))
| python |
import numpy as np
import tensorflow as tf
import random as rn
from keras.layers import multiply,concatenate,Embedding
from keras.layers.merge import dot
from keras import backend as K
from keras.models import Sequential
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/fchollet/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
rn.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
x1 = np.array([]).reshape(0,4)
x1 = np.append(x1,np.array([1,2,3,4]).reshape(1,4),axis=0)
x1 = np.append(x1,np.array([3,4,5,6]).reshape(1,4),axis=0)
x1 = np.append(x1,np.array([5,6,7,8]).reshape(1,4),axis=0)
y1 = np.array([]).reshape(0,4)
y1 = np.append(y1,np.array([7,8,9,10]).reshape(1,4),axis=0)
y1 = np.append(y1,np.array([9,10,11,12]).reshape(1,4),axis=0)
y1 = np.append(y1,np.array([11,12,13,14]).reshape(1,4),axis=0)
print(x1-y1)
x = tf.placeholder(tf.float64, [3, 4])
y = tf.placeholder(tf.float64, [3, 4])
labels = tf.placeholder(tf.float64, [256])
xxx = K.sum(K.square(x-y),1,keepdims=True)
yyy = dot([x,K.transpose(y)],(0,1))
zzz = tf.matmul(tf.transpose(x,perm=[0,1]),tf.transpose(y,perm=[1,0]))
hhh = multiply([x,y])
labels_equal = tf.equal(tf.expand_dims(labels, 0), tf.expand_dims(labels, 1))
labels_not_equal = tf.logical_not(labels_equal)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
xxxx = sess.run(xxx, feed_dict={x:x1,y:y1})
print(xxxx)
yyyy = sess.run(yyy, feed_dict={x:x1,y:y1})
print(yyyy)
zzzz = sess.run(zzz, feed_dict={x:x1,y:y1})
print(zzzz)
hhhh = sess.run(hhh, feed_dict={x:x1,y:y1})
print(hhhh)
labels_test = sess.run(labels_equal, feed_dict={labels:np.random.randint(256, size=(256))})
labels_test_not_equal = sess.run(labels_not_equal, feed_dict={labels:np.random.randint(256, size=(256))})
print(labels_test)
# Rest of code follows ...
# x = K.variable(value=x1)
# y = K.variable(value=y1)
#
# z = K.dot(x,K.transpose(y))
#
# # Here you need to use K.eval() instead of z.eval() because this uses the backend session
# print(K.eval(z))
# x_batch = K.ones(shape=(32, 20, 1))
# y_batch = K.ones(shape=(32, 30, 20))
# xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
# print(K.int_shape(xy_batch_dot))
#Lambda(lambda x: K.batch_dot(x, x, axes=(2, 2)), output_shape=lambda s: (s[0], s[1], s[1]))
# def multiply(x,n):
# x_prime = tf.reshape(x, (-1, n, 1))
# x_transpose = tf.transpose(x_prime, perm=[0,2, 1])
# return tf.batch_matmul(x_transpose,x_prime)
# Lambda(lambda x: multiply(x, n), output_shape =(n, n))
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch, input_length).
# the largest integer (i.e. word index) in the input should be no larger than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64) | python |
from __future__ import print_function
import os
import sys
from burlap import ServiceSatchel
from burlap.constants import *
from burlap.decorators import task
class ApacheSatchel(ServiceSatchel):
name = 'apache'
post_deploy_command = 'reload'
templates = [
'{site_template}',
]
@property
def packager_system_packages(self):
mod_lst = []
if self.env.modevasive_enabled:
mod_lst.append('libapache2-mod-evasive')
if self.env.modsecurity_enabled:
mod_lst.append('libapache2-modsecurity')
if self.env.modrpaf_enabled:
mod_lst.append('libapache2-mod-rpaf')
if self.env.visitors_enabled:
#TODO:fix? package removed in Ubuntu 16?
mod_lst.append('visitors')
return {
FEDORA: ['httpd'] + mod_lst,
UBUNTU: ['apache2'] + mod_lst,
(UBUNTU, '12.04'): ['apache2', 'libapache2-mod-wsgi'] + mod_lst,
(UBUNTU, '12.10'): ['apache2', 'libapache2-mod-wsgi'] + mod_lst,
(UBUNTU, '14.04'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst,
(UBUNTU, '14.10'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst,
(UBUNTU, '16.04'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst,
(UBUNTU, '16.10'): ['apache2', 'libapache2-mod-wsgi', 'apache2-utils'] + mod_lst,
}
def set_defaults(self):
self.env.service_commands = {
# START:{
# UBUNTU: 'service network-manager start',
# },
# STOP:{
# UBUNTU: 'service network-manager stop',
# },
# DISABLE:{
# UBUNTU: 'chkconfig network-manager off',
# },
# ENABLE:{
# UBUNTU: 'chkconfig network-manager on',
# },
# RESTART:{
# UBUNTU: 'service network-manager restart',
# },
# STATUS:{
# UBUNTU: 'service network-manager status',
# },
START:{
FEDORA: 'systemctl start httpd.service',
UBUNTU: 'service apache2 start',
},
STOP:{
FEDORA: 'systemctl stop httpd.service',
UBUNTU: 'service apache2 stop',
},
DISABLE:{
FEDORA: 'systemctl disable httpd.service',
UBUNTU: 'chkconfig apache2 off',
(UBUNTU, '14.04'): 'update-rc.d -f apache2 remove',
},
ENABLE:{
FEDORA: 'systemctl enable httpd.service',
UBUNTU: 'chkconfig apache2 on',
(UBUNTU, '14.04'): 'update-rc.d apache2 defaults',
},
RELOAD:{
FEDORA: 'systemctl reload httpd.service',
UBUNTU: 'service apache2 reload',
},
RESTART:{
FEDORA: 'systemctl restart httpd.service',
#UBUNTU: 'service apache2 restart',
# Note, the sleep 5 is necessary because the stop/start appears to
# happen in the background but gets aborted if Fabric exits before
# it completes.
UBUNTU: 'service apache2 restart; sleep 3',
},
}
# An Apache-conf file and filename friendly string that uniquely identifies
# your web application.
self.env.application_name = None
# The Jinja-formatted template file used to render site configurations.
self.env.site_template = 'apache/apache_site.template.conf'
self.env.error_log = '/var/log/apache2/error.log'
self.env.log_level = 'warn'
self.env.auth_basic = False
self.env.auth_basic_authuserfile = '{apache_docroot}/.htpasswd_{apache_site}'
self.env.auth_basic_users = [] # [(user,password)]
# If true, activates a rewrite rule that causes domain.com to redirect
# to www.domain.com.
self.env.enforce_subdomain = True
self.env.ssl = True
self.env.ssl_chmod = 440
# A list of path patterns that should have HTTPS enforced.
self.env.ssl_secure_paths_enforce = True
self.env.ssl_secure_paths = ['/admin/(.*)']
self.env.web_user = 'www-data'
self.env.web_group = 'www-data'
self.env.wsgi_user = 'www-data'
self.env.wsgi_group = 'www-data'
self.env.chmod = 775
self.env.mods_enabled = ['rewrite', 'wsgi', 'ssl']
# The value of the Apache's ServerName field. Usually should be set
# to the domain.
self.env.server_name = None
self.env.server_aliases_template = ''
self.env.docroot = '/usr/local/{apache_application_name}'
self.env.ports_path = '{apache_root}/ports.conf'
self.env.ssl_path = '{apache_root}/ssl'
self.env.domain_with_sub_template = ''
self.env.domain_without_sub_template = ''
self.env.domain_with_sub = None
self.env.domain_without_sub = None
self.env.wsgi_enabled = False
self.env.wsgi_template = 'django/django.template.wsgi'
self.env.wsgi_python_path = None
self.env.wsgi_scriptalias = None
self.env.wsgi_server_memory_gb = 8
self.env.wsgi_processes = 5
self.env.wsgi_threads = 15
self.env.domain_redirect_templates = [] # [(wrong_domain,right_domain)]
self.env.domain_redirects = [] # [(wrong_domain,right_domain)]
self.env.extra_rewrite_rules = ''
self.env.modrpaf_enabled = False
self.env.visitors_enabled = False
self.env.modevasive_enabled = False
self.env.modevasive_DOSEmailNotify = 'admin@localhost'
self.env.modevasive_DOSPageInterval = 1 # seconds
self.env.modevasive_DOSPageCount = 2
self.env.modevasive_DOSSiteCount = 50
self.env.modevasive_DOSSiteInterval = 1 # seconds
self.env.modevasive_DOSBlockingPeriod = 10 # seconds
self.env.modsecurity_enabled = False
self.env.modsecurity_download_url = 'https://github.com/SpiderLabs/owasp-modsecurity-crs/tarball/master'
# OS specific default settings.
self.env.specifics = type(self.genv)()
self.env.specifics[LINUX] = type(self.genv)()
self.env.specifics[LINUX][FEDORA] = type(self.genv)()
self.env.specifics[LINUX][FEDORA].root = '/etc/httpd'
self.env.specifics[LINUX][FEDORA].conf = '/etc/httpd/conf/httpd.conf'
self.env.specifics[LINUX][FEDORA].sites_available = '/etc/httpd/sites-available'
self.env.specifics[LINUX][FEDORA].sites_enabled = '/etc/httpd/sites-enabled'
self.env.specifics[LINUX][FEDORA].log_dir = '/var/log/httpd'
self.env.specifics[LINUX][FEDORA].pid = '/var/run/httpd/httpd.pid'
self.env.specifics[LINUX][UBUNTU] = type(self.genv)()
self.env.specifics[LINUX][UBUNTU].root = '/etc/apache2'
self.env.specifics[LINUX][UBUNTU].conf = '/etc/apache2/httpd.conf'
self.env.specifics[LINUX][UBUNTU].sites_available = '/etc/apache2/sites-available'
self.env.specifics[LINUX][UBUNTU].sites_enabled = '/etc/apache2/sites-enabled'
self.env.specifics[LINUX][UBUNTU].log_dir = '/var/log/apache2'
self.env.specifics[LINUX][UBUNTU].pid = '/var/run/apache2/apache2.pid'
self.env.delete_site_command = None
self.env.manage_httpd_conf = True
self.env.manage_ports_conf = True
self.env.manage_site_conf = True
self.env.ssl_certificates = None
self.env.ssl_certificates_templates = []
# Apache site config files use a similar syntax to our template syntax,
# so instead of having to escape all of Apache's variables, we list them here so
# our templating system knows to not try interpolating them.
self.env.ignored_template_variables = [
'APACHE_LOG_DIR',
'GLOBAL',
'DOCUMENT_ROOT',
'SCRIPT_FILENAME',
'SERVER_NAME',
'REQUEST_URI',
'GROUP',
'Referer',
'User-Agent',
'X-Forwarded-For',
'HTTP:X-Forwarded-Proto',
'HTTPS',
'HTTP',
'HTTP_HOST',
'HTTP_USER_AGENT',
'REMOTE_ADDR',
]
# The local and remote relative directory where the SSL certificates are stored.
self.env.ssl_dir_local = 'ssl'
# An optional segment to insert into the domain, customizable by role.
# Useful for easily keying domain-local.com/domain-dev.com/domain-staging.com.
self.env.locale = ''
self.env.sync_sets = {} # {name:[dict(local_path='static/', remote_path='$AWS_BUCKET:/')]}
# This will be appended to the custom Apache configuration file.
self.env.httpd_conf_append = []
@task
def enable_mod(self, name):
self.sudo('a2enmod %s' % name)
@task
def disable_mod(self, name):
with self.settings(warn_only=True):
self.sudo('a2dismod %s' % name)
@task
def enable_site(self, name):
self.sudo('a2ensite %s' % name)
@task
def disable_site(self, name):
self.sudo('a2dissite %s' % name)
@task
def optimize_wsgi_processes(self):
"""
Based on the number of sites per server and the number of resources on the server,
calculates the optimal number of processes that should be allocated for each WSGI site.
"""
r = self.local_renderer
#r.env.wsgi_processes = 5
r.env.wsgi_server_memory_gb = 8
verbose = self.verbose
all_sites = list(self.iter_sites(site=ALL, setter=self.set_site_specifics))
#(current_mem/current_sites)/current_process = ()
#(16/x)/(8/16) = y
#(16/x)*(16/8) = y
#(16*16)/(num_sites*8) = y
# @task
# def visitors(self, force=0):
# """
# Generates an Apache access report using the Visitors command line tool.
# Requires the APACHE2_VISITORS service to be enabled for the current host.
# """
# if not int(force):
# assert ApacheVisitors.name.upper() in self.genv.services or ApacheVisitors.name.lower() in self.genv.services, \
# 'Visitors has not been configured for this host.'
# self.run('visitors -o text /var/log/apache2/%(apache_application_name)s-access.log* | less' % self.genv)
def create_local_renderer(self):
"""
Instantiates a new local renderer.
Override this to do any additional initialization.
"""
r = super(ApacheSatchel, self).create_local_renderer()
# Dynamically set values based on target operating system.
os_version = self.os_version
apache_specifics = r.env.specifics[os_version.type][os_version.distro]
r.env.update(apache_specifics)
return r
# def iter_certificates(self):
# if self.verbose:
# print('apache_ssl_domain:', self.genv.apache_ssl_domain, file=sys.stderr)
# for cert_type, cert_file_template in self.genv.apache_ssl_certificates_templates:
# if self.verbose:
# print('cert_type, cert_file_template:', cert_type, cert_file_template, file=sys.stderr)
# _local_cert_file = os.path.join(self.genv.apache_ssl_dir_local, cert_file_template % self.genv)
# local_cert_file = self.find_template(_local_cert_file)
# assert local_cert_file, 'Unable to find local certificate file: %s' % (_local_cert_file,)
# remote_cert_file = os.path.join(self.genv.apache_ssl_dir, cert_file_template % self.genv)
# yield cert_type, local_cert_file, remote_cert_file
#
# @task
# def install_ssl(self, site=ALL):
# from burlap.common import iter_sites
# verbose = self.verbose
#
# for site, site_data in iter_sites(site=site, setter=self.set_site_specifics):
#
# site_secure = site+'_secure'
# if site_secure not in self.genv.sites:
# continue
# self.set_site_specifics(site_secure)
#
# self.sudo_or_dryrun('mkdir -p %(apache_ssl_dir)s' % self.genv)
#
# if self.genv.apache_ssl:
# for cert_type, local_cert_file, remote_cert_file in self.iter_certificates():
# if verbose:
# print('='*80)
# print('Installing certificate %s...' % (remote_cert_file,))
# self.put_or_dryrun(
# local_path=local_cert_file,
# remote_path=remote_cert_file,
# use_sudo=True)
#
# self.sudo_or_dryrun('mkdir -p %(apache_ssl_dir)s' % self.genv)
# self.sudo_or_dryrun('chown -R %(apache_web_user)s:%(apache_web_group)s %(apache_ssl_dir)s' % self.genv)
# self.sudo_or_dryrun('chmod -R %(apache_ssl_chmod)s %(apache_ssl_dir)s' % self.genv)
@task
def install_auth_basic_user_file(self, site=None):
"""
Installs users for basic httpd auth.
"""
r = self.local_renderer
hostname = self.current_hostname
target_sites = self.genv.available_sites_by_host.get(hostname, None)
for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics):
if self.verbose:
print('~'*80, file=sys.stderr)
print('Site:', _site, file=sys.stderr)
print('env.apache_auth_basic:', r.env.auth_basic, file=sys.stderr)
# Only load site configurations that are allowed for this host.
if target_sites is not None:
assert isinstance(target_sites, (tuple, list))
if _site not in target_sites:
continue
if not r.env.auth_basic:
continue
assert r.env.auth_basic_users, 'No apache auth users specified.'
for username, password in r.env.auth_basic_users:
r.env.auth_basic_username = username
r.env.auth_basic_password = password
r.env.apache_site = _site
r.env.fn = r.format(r.env.auth_basic_authuserfile)
if self.files.exists(r.env.fn):
r.sudo('htpasswd -b {fn} {auth_basic_username} {auth_basic_password}')
else:
r.sudo('htpasswd -b -c {fn} {auth_basic_username} {auth_basic_password}')
@task
def install_auth_basic_user_file_all(self):
self.install_auth_basic_user_file(site='all')
@task
def view_error_log(self):
self.run('tail -f {apache_error_log}')
@task
def sync_media(self, sync_set=None, clean=0, iter_local_paths=0):
"""
Uploads select media to an Apache accessible directory.
"""
#from burlap.dj import render_remote_paths
# Ensure a site is selected.
self.genv.SITE = self.genv.SITE or self.genv.default_site
# apache.get_apache_settings()
#render_remote_paths()
r = self.local_renderer
clean = int(clean)
self.vprint('Getting site data for %s...' % self.genv.SITE)
self.set_site_specifics(self.genv.SITE)
#site_data = self.genv.sites[self.genv.SITE]
#self.genv.update(site_data)
sync_sets = r.env.sync_sets
if sync_set:
sync_sets = [sync_set]
ret_paths = []
for _sync_set in sync_sets:
for paths in r.env.sync_sets[_sync_set]:
#print 'paths:',paths
r.env.sync_local_path = os.path.abspath(paths['local_path'] % self.genv)
if paths['local_path'].endswith('/') and not r.env.sync_local_path.endswith('/'):
r.env.sync_local_path += '/'
if iter_local_paths:
ret_paths.append(r.env.sync_local_path)
continue
r.env.sync_remote_path = paths['remote_path'] % self.genv
if clean:
r.sudo('rm -Rf {apache_sync_remote_path}')
print('Syncing %s to %s...' % (r.env.sync_local_path, r.env.sync_remote_path))
r.env.tmp_chmod = paths.get('chmod', r.env.chmod)
#with settings(warn_only=True):
r.sudo('mkdir -p {apache_sync_remote_path}')
r.sudo('chmod -R {apache_tmp_chmod} {apache_sync_remote_path}')
r.local('rsync -rvz --progress --recursive --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {apache_sync_local_path} {user}@{host_string}:{apache_sync_remote_path}')
r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_sync_remote_path}')
if iter_local_paths:
return ret_paths
def get_media_timestamp(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
from burlap.common import get_last_modified_timestamp
data = 0
for path in self.sync_media(iter_local_paths=1):
data = min(data, get_last_modified_timestamp(path) or data)
#TODO:hash media names and content
if self.verbose:
print('date:', data)
return data
@task
def record_manifest(self):
"""
Called after a deployment to record any data necessary to detect changes
for a future deployment.
"""
manifest = super(ApacheSatchel, self).record_manifest()
manifest['available_sites'] = self.genv.available_sites
manifest['available_sites_by_host'] = self.genv.available_sites_by_host
manifest['media_timestamp'] = self.get_media_timestamp()
return manifest
@task
def configure_modevasive(self):
"""
Installs the mod-evasive Apache module for combating DDOS attacks.
https://www.linode.com/docs/websites/apache-tips-and-tricks/modevasive-on-apache
"""
r = self.local_renderer
if r.env.modevasive_enabled:
self.install_packages()
# Write conf for each Ubuntu version since they don't conflict.
fn = r.render_to_file('apache/apache_modevasive.template.conf')
# Ubuntu 12.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/mod-evasive.conf',
use_sudo=True)
# Ubuntu 14.04
r.put(
local_path=fn,
remote_path='/etc/apache2/mods-available/evasive.conf',
use_sudo=True)
self.enable_mod('evasive')
else:
# print('self.last_manifest:', self.last_manifest)
# print('a:', self.last_manifest.apache_modevasive_enabled)
# print('b:', self.last_manifest.modevasive_enabled)
if self.last_manifest.modevasive_enabled:
self.disable_mod('evasive')
@task
def configure_modsecurity(self):
"""
Installs the mod-security Apache module.
https://www.modsecurity.org
"""
r = self.local_renderer
if r.env.modsecurity_enabled and not self.last_manifest.modsecurity_enabled:
self.install_packages()
# Write modsecurity.conf.
fn = self.render_to_file('apache/apache_modsecurity.template.conf')
r.put(local_path=fn, remote_path='/etc/modsecurity/modsecurity.conf', use_sudo=True)
# Write OWASP rules.
r.env.modsecurity_download_filename = '/tmp/owasp-modsecurity-crs.tar.gz'
r.sudo('cd /tmp; wget --output-document={apache_modsecurity_download_filename} {apache_modsecurity_download_url}')
r.env.modsecurity_download_top = r.sudo(
"cd /tmp; "
"tar tzf %(apache_modsecurity_download_filename)s | sed -e 's@/.*@@' | uniq" % self.genv)
r.sudo('cd /tmp; tar -zxvf %(apache_modsecurity_download_filename)s' % self.genv)
r.sudo('cd /tmp; cp -R %(apache_modsecurity_download_top)s/* /etc/modsecurity/' % self.genv)
r.sudo('mv /etc/modsecurity/modsecurity_crs_10_setup.conf.example /etc/modsecurity/modsecurity_crs_10_setup.conf')
r.sudo('rm -f /etc/modsecurity/activated_rules/*')
r.sudo('cd /etc/modsecurity/base_rules; '
'for f in * ; do ln -s /etc/modsecurity/base_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.sudo('cd /etc/modsecurity/optional_rules; '
'for f in * ; do ln -s /etc/modsecurity/optional_rules/$f /etc/modsecurity/activated_rules/$f ; done')
r.env.httpd_conf_append.append('Include "/etc/modsecurity/activated_rules/*.conf"')
self.enable_mod('evasive')
self.enable_mod('headers')
elif not self.env.modsecurity_enabled and self.last_manifest.modsecurity_enabled:
self.disable_mod('modsecurity')
@task
def configure_modrpaf(self):
"""
Installs the mod-rpaf Apache module.
https://github.com/gnif/mod_rpaf
"""
r = self.local_renderer
if r.env.modrpaf_enabled:
self.install_packages()
self.enable_mod('rpaf')
else:
if self.last_manifest.modrpaf_enabled:
self.disable_mod('mod_rpaf')
@task
def configure_site(self, full=1, site=None, delete_old=0):
"""
Configures Apache to host one or more websites.
"""
from burlap import service
r = self.local_renderer
print('Configuring Apache...', file=sys.stderr)
site = site or self.genv.SITE
if int(delete_old) and site == ALL:
# Delete all existing enabled and available sites.
r.sudo('rm -f {sites_available}/*')
r.sudo('rm -f {sites_enabled}/*')
if r.env.manage_site_conf:
# Run an optional customizable command to clear or delete old sites before writing the new ones.
if r.env.delete_site_command:
r.sudo(r.env.delete_site_command)
for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics):
r = self.local_renderer
#r.env.site = site
if self.verbose:
print('-'*80, file=sys.stderr)
print('Site:', _site, file=sys.stderr)
print('-'*80, file=sys.stderr)
r.env.apache_site = _site
r.env.server_name = r.format(r.env.domain_template)
print('r.env.server_name:', r.env.server_name)
# Write WSGI template
if r.env.wsgi_enabled:
r.pc('Writing WSGI template for site %s...' % _site)
r.env.wsgi_scriptalias = r.format(r.env.wsgi_scriptalias)
fn = self.render_to_file(r.env.wsgi_template)
r.env.wsgi_dir = r.env.remote_dir = os.path.split(r.env.wsgi_scriptalias)[0]
r.sudo('mkdir -p {remote_dir}')
r.put(local_path=fn, remote_path=r.env.wsgi_scriptalias, use_sudo=True)
# Write site configuration.
r.pc('Writing site configuration for site %s...' % _site)
from functools import partial
genv = r.collect_genv()
genv['current_hostname'] = self.current_hostname
print('*'*80)
print('apache_wsgi_scriptalias:', genv.apache_wsgi_scriptalias)
print('apache_auth_basic_authuserfile:', self.env.auth_basic_authuserfile)
r.env.auth_basic_authuserfile = r.format(self.env.auth_basic_authuserfile)
fn = self.render_to_file(
self.env.site_template,
extra=genv,
formatter=partial(r.format, ignored_variables=self.env.ignored_template_variables))
r.env.site_conf = _site+'.conf'
r.env.site_conf_fqfn = os.path.join(r.env.sites_available, r.env.site_conf)
r.put(local_path=fn, remote_path=r.env.site_conf_fqfn, use_sudo=True)
self.enable_site(_site)
self.clear_local_renderer()
# Enable modules.
for mod_name in r.env.mods_enabled:
with self.settings(warn_only=True):
self.enable_mod(mod_name)
if int(full):
# Write master Apache configuration file.
if r.env.manage_httpd_conf:
fn = self.render_to_file('apache/apache_httpd.template.conf')
r.put(local_path=fn, remote_path=r.env.conf, use_sudo=True)
# Write Apache listening ports configuration.
if r.env.manage_ports_conf:
fn = self.render_to_file('apache/apache_ports.template.conf')
r.put(local_path=fn, remote_path=r.env.ports_path, use_sudo=True)
r.sudo('chown -R {apache_web_user}:{apache_web_group} {apache_root}')
@task(precursors=['packager', 'user', 'hostname', 'ip'])
def configure(self):
self.configure_modevasive()
self.configure_modsecurity()
self.configure_modrpaf()
self.configure_site(full=1, site=ALL)
self.install_auth_basic_user_file(site=ALL)
self.sync_media()
#self.install_ssl(site=ALL)
apache = ApacheSatchel()
| python |
import requests
import nels_master_api
def get_nels_ids():
try:
ids = []
response = requests.get(nels_master_api.get_full_url("users/ids" ),auth=(nels_master_api.CLIENT_KEY, nels_master_api.CLIENT_SECRET))
if(response.status_code == requests.codes.ok):
json_response = response.json()
for uid in json_response:
ids.append(uid[u'id'])
return ids
except:
return None
def get_user(nels_id):
try:
response = requests.get(nels_master_api.get_full_url("users/%s" %nels_id ),auth=(nels_master_api.CLIENT_KEY, nels_master_api.CLIENT_SECRET))
if(response.status_code == requests.codes.ok):
return response.json()
except:
return None | python |
import pandas as pd
import math
data = pd.read_csv('data/DATALOG2.CSV', delimiter=",",
names=['date', 'time', 'lat', 'lon', 'vgps', 'velocity', 'course', 'heading', 'pitch', 'roll'])
# data['vhead'] = data['velocity']*math.cos(math.pi/180*(data['course']-data['heading']))
data['drift'] = data.apply(lambda row: math.fabs(row['velocity'] *
math.sin(math.pi / 180 * math.fabs(row['course'] - row['heading']))),
axis=1)
data['vhead'] = data.apply(lambda row: math.fabs(row['velocity'] *
math.cos(math.pi / 180 * (row['course'] - row['heading']))), axis=1)
print(data)
| python |
import logging
import multiprocessing
import multiprocessing_logging
import os
log_level_from_env = os.environ.get('LOGLEVEL', '').upper()
log_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(funcName)s %(message)s'
log_level = logging.DEBUG if log_level_from_env == 'DEBUG' else logging.INFO
logging.basicConfig(format=log_format, level=log_level)
logger = logging.getLogger(__name__)
mp_logger = multiprocessing.get_logger()
# mp_handler = logging.StreamHandler()
# mp_handler.setLevel(log_level)
# mp_handler.setFormatter(logging.Formatter(log_format))
# mp_logger.addHandler(mp_handler)
# Handle records from parallel processes to the main process so that they are handled correctly.
multiprocessing_logging.install_mp_handler()
def _make_debug_record(message):
fn, lno, func, sinfo = logger.findCaller()
record = logger.makeRecord(logger.name, logging.DEBUG, fn, lno, message, None, None,
func=func, extra=None, sinfo=sinfo)
return record
def debug(message: str):
record = _make_debug_record(message)
logger.handle(record)
| python |
## Data and Visual Analytics - Homework 4
## Georgia Institute of Technology
## Applying ML algorithms to detect seizure
import numpy as np
import pandas as pd
import time
from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate, train_test_split
from sklearn.metrics import accuracy_score, classification_report
from sklearn.svm import SVC
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, normalize
######################################### Reading and Splitting the Data ###############################################
# XXX
# TODO: Read in all the data. Replace the 'xxx' with the path to the data set.
# XXX
data = pd.read_csv('seizure_dataset.csv')
# Separate out the x_data and y_data.
x_data = data.loc[:, data.columns != "y"]
y_data = data.loc[:, "y"]
# The random state to use while splitting the data.
random_state = 100
# XXX
# TODO: Split 70% of the data into training and 30% into test sets. Call them x_train, x_test, y_train and y_test.
# Use the train_test_split method in sklearn with the paramater 'shuffle' set to true and the 'random_state' set to 100.
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size = 0.3, random_state = random_state)
# XXX
# ############################################### Linear Regression ###################################################
# XXX
# TODO: Create a LinearRegression classifier and train it.
linearReg = LinearRegression().fit(x_train,y_train)
# XXX
# XXX
# TODO: Test its accuracy (on the training set) using the accuracy_score method.
print("For Linear Regression:")
y_predict_train = linearReg.predict(x_train)
y_predict_train_round = [round(k) for k in y_predict_train]
train_score = accuracy_score(y_train, y_predict_train_round)
print(" Accuracy for training set: " + str(train_score))
# TODO: Test its accuracy (on the testing set) using the accuracy_score method.
y_predict_test = linearReg.predict(x_test)
y_predict_test_round = [round(k) for k in y_predict_test]
test_score = accuracy_score(y_test, y_predict_test_round)
print(" Accuracy for testing set: " + str(test_score))
# Note: Use y_predict.round() to get 1 or 0 as the output.
# XXX
# ############################################### Multi Layer Perceptron #################################################
# XXX
# TODO: Create an MLPClassifier and train it.
mlpReg = MLPClassifier().fit(x_train,y_train)
# XXX
# XXX
# TODO: Test its accuracy on the training set using the accuracy_score method.
print("For Multi Layer Perceptron:")
y_predict_train_mlp = mlpReg.predict(x_train)
y_predict_train_mlp_round = [round(k) for k in y_predict_train_mlp]
train_mlp_score = accuracy_score(y_train, y_predict_train_mlp_round)
print(" Accuracy for training set: " + str(train_mlp_score))
# TODO: Test its accuracy on the test set using the accuracy_score method.
y_predict_test_mlp = mlpReg.predict(x_test)
y_predict_test_mlp_round = [round(k) for k in y_predict_test_mlp]
test_mlp_score = accuracy_score(y_test, y_predict_test_mlp_round)
print(" Accuracy for testing set: " + str(test_mlp_score))
# XXX
# ############################################### Random Forest Classifier ##############################################
# XXX
# TODO: Create a RandomForestClassifier and train it.
rfReg = RandomForestClassifier().fit(x_train, y_train)
# XXX
# XXX
# TODO: Test its accuracy on the training set using the accuracy_score method.
print("For Random Forest Classifier:")
y_predict_train_rf = rfReg.predict(x_train)
y_predict_train_rf_round = [round(k) for k in y_predict_train_rf]
train_rf_score = accuracy_score(y_train, y_predict_train_rf_round)
print(" (Default) Accuracy for training set: " + str(train_rf_score))
# TODO: Test its accuracy on the test set using the accuracy_score method.
y_predict_test_rf = rfReg.predict(x_test)
y_predict_test_rf_round = [round(k) for k in y_predict_test_rf]
test_rf_score = accuracy_score(y_test, y_predict_test_rf_round)
print(" (Default) Accuracy for testing set: " + str(test_rf_score))
# -----------------------------------------------------------------------
rfReg_best = RandomForestClassifier(n_estimators=60, max_depth=50).fit(x_train, y_train)
y_predict_train_rf_best = rfReg_best.predict(x_train)
y_predict_train_rf_round_best = [round(k) for k in y_predict_train_rf_best]
train_rf_score_best = accuracy_score(y_train, y_predict_train_rf_round_best)
print(" (Best) Accuracy for training set: " + str(train_rf_score_best))
# TODO: Test its accuracy on the test set using the accuracy_score method.
y_predict_test_rf_best = rfReg_best.predict(x_test)
y_predict_test_rf_round_best = [round(k) for k in y_predict_test_rf_best]
test_rf_score_best = accuracy_score(y_test, y_predict_test_rf_round_best)
print(" (Best) Accuracy for testing set: " + str(test_rf_score_best))
# XXX
# XXX
# TODO: Tune the hyper-parameters 'n_estimators' and 'max_depth'.
# Print the best params, using .best_params_, and print the best score, using .best_score_.
parameters_rf = {'n_estimators':[10, 20, 40, 60, 80, 100, 120, 140],
'max_depth':[6, 8, 10, 30, 50, 75, 100]}
rfReg_tune = RandomForestClassifier()
rlf = GridSearchCV(rfReg_tune, parameters_rf, cv = 10)
rlf.fit(x_train, y_train)
print(" Best paramaters after CV:")
print(" "+str(rlf.best_params_))
print(" "+str(rlf.best_score_))
# XXX
# ############################################ Support Vector Machine ###################################################
# XXX
# TODO: Pre-process the data to standardize or normalize it, otherwise the grid search will take much longer
x_train_nor = normalize(x_train)
x_test_nor = normalize(x_test)
# TODO: Create a SVC classifier and train it.
rfReg = SVC(gamma = 'auto').fit(x_train_nor, y_train)
# XXX
# XXX
# TODO: Test its accuracy on the training set using the accuracy_score method.
print("For Support Vector Machine:")
y_predict_train_rf = rfReg.predict(x_train_nor)
y_predict_train_rf_round = [round(k) for k in y_predict_train_rf]
train_rf_score = accuracy_score(y_train, y_predict_train_rf_round)
print(" (Default) Accuracy for training set: " + str(train_rf_score))
# TODO: Test its accuracy on the test set using the accuracy_score method.
y_predict_test_rf = rfReg.predict(x_test_nor)
y_predict_test_rf_round = [round(k) for k in y_predict_test_rf]
test_rf_score = accuracy_score(y_test, y_predict_test_rf_round)
print(" (Default) Accuracy for testing set: " + str(test_rf_score))
# -----------------------------------------------------------
rfReg_best = SVC(gamma = 'auto', kernel='linear', C=0.001).fit(x_train_nor, y_train)
y_predict_train_rf_best = rfReg_best.predict(x_train_nor)
y_predict_train_rf_round_best = [round(k) for k in y_predict_train_rf_best]
train_rf_score_best = accuracy_score(y_train, y_predict_train_rf_round_best)
print(" (Best) Accuracy for training set: " + str(train_rf_score_best))
# TODO: Test its accuracy on the test set using the accuracy_score method.
y_predict_test_rf_best = rfReg_best.predict(x_test_nor)
y_predict_test_rf_round_best = [round(k) for k in y_predict_test_rf_best]
test_rf_score_best = accuracy_score(y_test, y_predict_test_rf_round_best)
print(" (Best) Accuracy for testing set: " + str(test_rf_score_best))
# XXX
# XXX
# TODO: Tune the hyper-parameters 'C' and 'kernel' (use rbf and linear).
# Print the best params, using .best_params_, and print the best score, using .best_score_.
parameters_rf = {'kernel':('linear', 'rbf'), 'C':[0.001, 0.01, 0.1, 1, 10, 100]}
rfReg_tune = SVC(gamma = 'auto')
clf = GridSearchCV(rfReg_tune, parameters_rf, cv = 10, return_train_score=True)
clf.fit(x_train_nor, y_train)
print(" Best paramaters after CV:")
print(" "+str(clf.best_params_))
print(" "+str(clf.best_score_))
print("mean training score:")
print(clf.cv_results_['mean_train_score'])
print("mean testing score:")
print(clf.cv_results_['mean_test_score'])
print("mean fit time:")
print(clf.cv_results_['mean_fit_time'])
# XXX
| python |
#!/usr/bin/env python
from __future__ import absolute_import
import os
import shutil
import time
import datetime
from flask.ext.script import Manager
from modelconvert import create_app
from modelconvert.utils import fs
app = create_app()
manager = Manager(app)
@manager.command
def run():
app.run(threaded=True)
@manager.command
def celeryworker():
"""
Runs celery worker within the Flask app context
"""
from modelconvert.extensions import celery
with app.app_context():
if app.config['DEBUG']:
celery.worker_main(['worker', '-E', '-l', 'DEBUG'])
else:
celery.worker_main(['worker', '-E', '-l', 'INFO'])
#
# FIXME: move this to a celerybeats task
#
@manager.command
def cleanup(longevity=151200, uploads=False):
"""
Removes generated files. Use cleanup -h for more info
"""
download_path = os.path.abspath(app.config["DOWNLOAD_PATH"])
upload_path = os.path.abspath(app.config["UPLOAD_PATH"])
# simple protection against dummies. However it is questionable to
# give them Unix rm command in this case ;)
if not 'tmp/downloads' in download_path or download_path == '/':
print("You are using a non-standard location for the download path.")
print("Please create your own deletion procedure. If your fs is")
print("mounted with mtime support, this command will work fine:\n")
print(" find /your/path -mtime +30 -exec rm -rf '{}' \;\n")
exit(-1)
#longevity = 6300 * 24
longevity = int(longevity)
current_time = time.time();
print("Removing files older than {0}".format(datetime.timedelta(seconds=longevity)))
def _clean(path, longevity):
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
filepath = os.path.join(root, name)
filetime = os.path.getmtime(filepath)
if current_time - filetime > longevity:
print("Removing file %s" % filepath)
os.remove(filepath)
for name in dirs:
dirpath = os.path.join(root, name)
#dirtime = os.path.getmtime(dirpath)
#if current_time - dirtime > longevity:
if not os.listdir(dirpath):
print("Removing directory %s" % dirpath)
os.rmdir(dirpath)
_clean(download_path, longevity)
if uploads:
_clean(upload_path, longevity)
@manager.command
def purge():
""" Kill all files in download paths NOW"""
cleanup(0, uploads=True)
@manager.command
def mkdirs():
"""
Create required directories from settings
"""
dirs = [
app.config['UPLOAD_PATH'],
app.config['DOWNLOAD_PATH'],
]
for directory in dirs:
directory = os.path.abspath(directory)
print("Creating directory {0}".format(directory))
fs.mkdir_p(directory)
if __name__ == "__main__":
manager.run()
| python |
"""Configuration classes for ``varfish-cli case *`` commands."""
import attr
import uuid
import typing
from ..common import CommonConfig
@attr.s(frozen=True, auto_attribs=True)
class CaseConfig:
"""Configuration for the ``varfish-cli case`` command."""
#: Global configuration.
global_config: CommonConfig
@staticmethod
def create(args, global_config, toml_config=None):
# toml_config = toml_config or {}
return CaseConfig(global_config=global_config)
@attr.s(frozen=True, auto_attribs=True)
class CaseListConfig:
"""Configuration for the ``varfish-cli case list`` command."""
#: Case configuration.
case_config: CaseConfig
#: UUID of the case to pull.
project_uuid: uuid.UUID
@staticmethod
def create(args, case_config, toml_config=None):
_ = toml_config
# toml_config = toml_config or {}
return CaseListConfig(case_config=case_config, project_uuid=args.project_uuid)
@attr.s(frozen=True, auto_attribs=True)
class CaseListImportInfoConfig:
"""Configuration for the ``varfish-cli case list-import-info`` command."""
#: Case configuration.
case_config: CaseConfig
#: UUID of the case to pull.
project_uuid: uuid.UUID
#: Optionally, owner to query for.
owner: typing.Optional[str] = None
@staticmethod
def create(args, case_config, toml_config=None):
# toml_config = toml_config or {}
return CaseListImportInfoConfig(
case_config=case_config, project_uuid=args.project_uuid, owner=args.owner
)
@attr.s(frozen=True, auto_attribs=True)
class CaseCreateImportInfoConfig:
"""Configuration for the ``varfish-cli case create-import-info`` command."""
#: Case configuration.
case_config: CaseConfig
#: Suffix to append to the case name.
case_name_suffix: str
#: UUID of the case to pull.
project_uuid: uuid.UUID
#: Path to files to import.
paths: typing.List[str]
#: Regular expression to use for modifying family.
strip_family_regex: str
#: Whether to force resubmittal of old
resubmit: bool
#: Whether to force creation of fresh case import info.
force_fresh: bool
#: Expected genome build.
genomebuild: str
@staticmethod
def create(args, case_config, strip_family_regex, toml_config=None):
_ = toml_config
# toml_config = toml_config or {}
return CaseCreateImportInfoConfig(
case_config=case_config,
project_uuid=args.project_uuid,
paths=args.paths,
strip_family_regex=args.strip_family_regex,
case_name_suffix=args.case_name_suffix,
resubmit=args.resubmit,
force_fresh=args.force_fresh,
genomebuild=args.genomebuild,
)
| python |
import subprocess
import sys
import os
import time
import cProfile
def prepare_io(list_of_files, exe_file, input_path, output_path, job_number):
# read file names
with open(list_of_files, "r") as files_to_read:
list_files = files_to_read.read().split("\n")
job_number = int(job_number) - 1
input_file = list_files[job_number]
output_dir = os.path.join(output_path, input_file).replace(".vcf.gz", "/")
zip_output_path = os.path.join(output_path, input_file).replace(".vcf.gz", ".tar.xz")
to_read = os.path.join(input_path, input_file)
if not os.path.isdir(output_dir):
subprocess.run("mkdir {}".format(output_dir), shell=True, stdout=subprocess.PIPE)
logs_path = os.path.join(output_path, "logs")
profs_path = os.path.join(output_path, "profs")
if not os.path.isdir(output_dir):
subprocess.run("mkdir {}".format(output_dir), shell=True, stdout=subprocess.PIPE)
if not os.path.isdir(logs_path):
subprocess.run("mkdir {}".format(logs_path), shell=True, stdout=subprocess.PIPE)
if not os.path.isdir(profs_path):
subprocess.run("mkdir {}".format(profs_path), shell=True, stdout=subprocess.PIPE)
log_file = open(os.path.join(logs_path, input_file).replace(".vcf.gz", "_logs.txt"), "a")
log_file.write("{} \n".format(input_file))
log_file.flush()
exe = "{} {} {}".format(exe_file, to_read, output_dir)
start = time.time()
if job_number == 0:
# run vcf to tensor -- c++ code
prof = cProfile.Profile()
prof.enable()
subprocess.run(exe, shell=True, stdout=subprocess.PIPE)
end = time.time()
prof.disable()
prof_path = os.path.join(profs_path, input_file).replace(".vcf.gz", "sample.prof")
prof.dump_stats(prof_path)
elapsed = (end - start) / 360
log_file.write("{} was done in {} hours \n".format(exe, elapsed))
log_file.flush()
else:
subprocess.run(exe, shell=True, stdout=subprocess.PIPE)
end = time.time()
elapsed = (end - start) / 360
log_file.write("{} was done in {} hours \n".format(exe, elapsed))
log_file.flush()
# zip output files
exe_2 = "tar -cjf {} {}".format(zip_output_path, output_dir)
start = time.time()
subprocess.run(exe_2, shell=True, stdout=subprocess.PIPE)
end = time.time()
elapsed = (end - start) / 360
log_file.write("{} was done in {} hours \n".format(exe_2, elapsed))
log_file.flush()
# remove residual files
exe_3 = "rsync -a --delete /home/eniktab/LocalBin/empty/ {}".format(output_dir)
log_file.write("{} started \n".format(exe_3))
subprocess.run(exe_3, shell=True, stdout=subprocess.PIPE)
log_file.write("{} was done \n".format(exe_3))
log_file.flush()
log_file.close()
def main(argv):
prepare_io(list_of_files=argv[0], exe_file=argv[1], input_path=argv[2], output_path=argv[3], job_number=argv[4])
if __name__ == "__main__":
main(sys.argv[1:])
| python |
import numpy as np
# Collection of activation functions
# Reference: https://en.wikipedia.org/wiki/Activation_function
class Sigmoid():
def __call__(self, x):
return 1 / (1 + np.exp(-x))
def gradient(self, x):
return self.__call__(x) * (1 - self.__call__(x))
class Softmax():
def __call__(self, x):
e_x = np.exp(x - np.max(x, axis=-1, keepdims=True))
return e_x / np.sum(e_x, axis=-1, keepdims=True)
def gradient(self, x):
p = self.__call__(x)
return p * (1 - p)
class TanH():
def __call__(self, x):
return 2 / (1 + np.exp(-2*x)) - 1
def gradient(self, x):
return 1 - np.power(self.__call__(x), 2)
class ReLU():
def __call__(self, x):
return np.where(x >= 0, x, 0)
def gradient(self, x):
return np.where(x >= 0, 1, 0)
class LeakyReLU():
def __init__(self, alpha=0.2):
self.alpha = alpha
def __call__(self, x):
return np.where(x >= 0, x, self.alpha * x)
def gradient(self, x):
return np.where(x >= 0, 1, self.alpha)
class ELU():
def __init__(self, alpha=0.1):
self.alpha = alpha
def __call__(self, x):
return np.where(x >= 0.0, x, self.alpha * (np.exp(x) - 1))
def gradient(self, x):
return np.where(x >= 0.0, 1, self.__call__(x) + self.alpha)
class SELU():
# Reference : https://arxiv.org/abs/1706.02515,
# https://github.com/bioinf-jku/SNNs/blob/master/SelfNormalizingNetworks_MLP_MNIST.ipynb
def __init__(self):
self.alpha = 1.6732632423543772848170429916717
self.scale = 1.0507009873554804934193349852946
def __call__(self, x):
return self.scale * np.where(x >= 0.0, x, self.alpha*(np.exp(x)-1))
def gradient(self, x):
return self.scale * np.where(x >= 0.0, 1, self.alpha * np.exp(x))
class SoftPlus():
def __call__(self, x):
return np.log(1 + np.exp(x))
def gradient(self, x):
return 1 / (1 + np.exp(-x))
| python |
#!/usr/bin/env python3.7
import sys
from blist import blist
from collections import defaultdict
# Solution to the day 9 puzzle from Advent of Code 2018.
# https://adventofcode.com/2018/day/9
def parse_data(filename):
""" Load the data from FILENAME. """
data = list()
with open(filename) as f:
elements = f.readline().rstrip().split(' ')
data = [int(elements[0]), int(elements[6])]
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
players, marbles = parse_data(sys.argv[1])
current_player = 0
board = blist([0])
current_marble = 0
scores = defaultdict(int)
for i in range(marbles):
marble_value = i + 1
if marble_value % 23 == 0:
current_marble = (current_marble - 7) % len(board)
scores[current_player] += (marble_value
+ board.pop(current_marble))
else:
current_marble = ((current_marble + 1) % len(board)) + 1
board.insert(current_marble,marble_value)
current_player = (current_player + 1) % players
print("For "
+ str(players)
+ " players with "
+ str(marbles)
+ " marbles, the high score is "
+ str(max(scores.values()))
+ ".")
else:
print("Usage: " + sys.argv[0] + " <data-file>")
| python |
import time
import hashlib
import requests
import urllib3
from lxml import etree
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def xdl_proxy(orderno, secret, host, port):
host_port = host + ":" + port
# get sign
timestamp = str(int(time.time()))
string = ""
string = "orderno=" + orderno + "," + "secret=" + secret + "," + "timestamp=" + timestamp
string = string.encode()
md5_string = hashlib.md5(string).hexdigest()
sign = md5_string.upper()
# get auth
auth = "sign=" + sign + "&" + "orderno=" + orderno + "&" + "timestamp=" + timestamp
proxy = { "http": "http://" + host_port, "https": "https://" + host_port}
return proxy, auth | python |
# -*- coding: utf8 -*-
"""
======================================
Project Name: NLP
File Name: utils
Author: czh
Create Date: 2021/8/6
--------------------------------------
Change Activity:
======================================
"""
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.nn.parameter import Parameter
import numpy as np
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim=0, center=True, scale=True, epsilon=None, conditional=False,
hidden_units=None, hidden_initializer='xaiver'):
"""
:param input_dim: inputs.shape[-1]
:param cond_dim: cond.shape[-1]
:param center:
:param scale:
:param epsilon:
:param conditional: 如果为True,则是条件LayerNorm
:param hidden_units:
:param hidden_initializer:
"""
super(LayerNorm, self).__init__()
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features=self.cond_dim, out_features=self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features=self.cond_dim, out_features=input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier': # glorot_uniform
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
# 下面这两个为什么都初始化为0呢?
# 为了防止扰乱原来的预训练权重,两个变换矩阵可以全零初始化(单层神经网络可以用全零初始化,连续的多层神经网络才不应当用全零初始化),
# 这样在初始状态,模型依然保持跟原来的预训练模型一致。
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
"""
如果是条件Layer Norm,则cond不是None
"""
gamma = 1
beta = 0
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
# for _ in range(K.ndim(inputs) - K.ndim(cond)): # K.ndim: 以整数形式返回张量中的轴数。
# TODO: 这两个为什么有轴数差呢? 为什么在 dim=1 上增加维度??
# 为了保持维度一致,cond可以是(batch_size, cond_dim)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1) # cond = K.expand_dims(cond, 1)
# cond在加入beta和gamma之前做一次线性变换,以保证与input维度一致
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs ** 2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) ** 2
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
def sequence_masking(x: torch.Tensor, mask: torch.Tensor, value=0.0, axis=None):
"""为序列条件mask的函数
mask: 形如(batch_size, seq_len)的0-1矩阵;
value: mask部分要被替换成的值,可以是'-inf'或'inf';
axis: 序列所在轴,默认为1;
"""
if mask is None:
return x
else:
if mask.dtype != x.dtype:
mask = mask.to(x.dtype)
if value == '-inf':
value = -1e12
elif value == 'inf':
value = 1e12
if axis is None:
axis = 1
elif axis < 0:
axis = x.ndim + axis
assert axis > 0, 'axis must be greater than 0'
for _ in range(axis - 1):
mask = torch.unsqueeze(mask, 1)
for _ in range(x.ndim - mask.ndim):
mask = torch.unsqueeze(mask, mask.ndim)
return x * mask + value * (1 - mask)
def _generate_relative_positions_matrix(length, max_relative_position,
cache=False):
"""Generates matrix of relative positions between inputs."""
if not cache:
range_vec = torch.arange(length)
range_mat = range_vec.repeat(length).view(length, length)
distance_mat = range_mat - torch.t(range_mat)
else:
distance_mat = torch.arange(-length + 1, 1, 1).unsqueeze(0)
distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
final_mat = distance_mat_clipped + max_relative_position
return final_mat
def _generate_relative_positions_embeddings(seq_length, embed_dim, max_relative_position=127):
vocab_size = max_relative_position * 2 + 1
range_vec = torch.arange(seq_length)
range_mat = range_vec.repeat(seq_length).view(seq_length, seq_length)
distance_mat = range_mat - torch.t(range_mat)
distance_mat_clipped = torch.clamp(distance_mat, -max_relative_position, max_relative_position)
final_mat = distance_mat_clipped + max_relative_position
embeddings_table = np.zeros([vocab_size, embed_dim])
for pos in range(vocab_size):
for i in range(embed_dim // 2):
embeddings_table[pos, 2 * i] = np.sin(pos / np.power(10000, 2 * i / embed_dim))
embeddings_table[pos, 2 * i + 1] = np.cos(pos / np.power(10000, 2 * i / embed_dim))
embeddings_table_tensor = torch.tensor(embeddings_table).float()
flat_relative_positions_matrix = final_mat.view(-1)
one_hot_relative_positions_matrix = func.one_hot(flat_relative_positions_matrix,
num_classes=vocab_size).float()
embeddings = torch.matmul(one_hot_relative_positions_matrix, embeddings_table_tensor)
my_shape = list(final_mat.size())
my_shape.append(embed_dim)
embeddings = embeddings.view(my_shape)
# print(embeddings.shape)
return embeddings
# Test:
# print(_generate_relative_positions_embeddings(6, 32, 4)[0, 0, :])
class HandshakingKernel(nn.Module):
"""
TPLinker 方法
"""
def __init__(self, hidden_size, shaking_type, inner_enc_type):
super().__init__()
self.shaking_type = shaking_type
if shaking_type == "cat":
self.combine_fc = nn.Linear(hidden_size * 2, hidden_size)
elif shaking_type == "cat_plus":
self.combine_fc = nn.Linear(hidden_size * 3, hidden_size)
elif shaking_type == "cln":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True)
elif shaking_type == "cln_plus":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True)
self.inner_context_cln = LayerNorm(hidden_size, hidden_size, conditional=True)
self.inner_enc_type = inner_enc_type
if inner_enc_type == "mix_pooling":
self.lamtha = Parameter(torch.rand(hidden_size))
elif inner_enc_type == "lstm":
self.inner_context_lstm = nn.LSTM(hidden_size, hidden_size, num_layers=1, bidirectional=False,
batch_first=True)
def enc_inner_hiddens(self, seq_hiddens, inner_enc_type="lstm"):
# seq_hiddens: (batch_size, seq_len, hidden_size)
def pool(seqence, pooling_type):
if pooling_type == "mean_pooling":
pooling = torch.mean(seqence, dim=-2) # (batch_size, hidden_size)
elif pooling_type == "max_pooling":
pooling, _ = torch.max(seqence, dim=-2) # (batch_size, hidden_size)
elif pooling_type == "mix_pooling":
pooling = self.lamtha * torch.mean(seqence, dim=-2) + (1 - self.lamtha) * torch.max(seqence, dim=-2)[0]
else:
raise ValueError("'pooling_type must be one of the list: "
"['mean_pooling', 'max_pooling', 'mix_pooling']'")
return pooling
if "pooling" in inner_enc_type:
inner_context = torch.stack(
[pool(seq_hiddens[:, :i + 1, :], inner_enc_type) for i in range(seq_hiddens.size()[1])], dim=1)
elif inner_enc_type == "lstm":
inner_context, _ = self.inner_context_lstm(seq_hiddens)
else:
raise ValueError("'inner_enc_type' must be one of the list: "
"['mean_pooling', 'max_pooling', 'mix_pooling', 'lstm']")
return inner_context
def forward(self, seq_hiddens):
"""
seq_hiddens: (batch_size, seq_len, hidden_size)
return:
shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size) (32, 5+4+3+2+1, 5)
"""
seq_len = seq_hiddens.size()[-2]
shaking_hiddens_list = []
for ind in range(seq_len):
hidden_each_step = seq_hiddens[:, ind, :]
visible_hiddens = seq_hiddens[:, ind:, :] # ind: only look back
repeat_hiddens = hidden_each_step[:, None, :].repeat(1, seq_len - ind, 1)
if self.shaking_type == "cat":
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens], dim=-1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cat_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens, inner_context], dim=-1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cln":
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
elif self.shaking_type == "cln_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
shaking_hiddens = self.inner_context_cln(shaking_hiddens, inner_context)
else:
raise ValueError("'shaking_type' must be one of the list: "
"['cat', 'cat_plus', 'cln', 'cln_plus']")
shaking_hiddens_list.append(shaking_hiddens)
long_shaking_hiddens = torch.cat(shaking_hiddens_list, dim=1)
return long_shaking_hiddens
class MyMaths:
@staticmethod
def handshaking_len2matrix_size(hsk_len):
matrix_size = int((2 * hsk_len + 0.25) ** 0.5 - 0.5)
return matrix_size
class MyMatrix:
@staticmethod
def get_shaking_idx2matrix_idx(matrix_size):
"""
:param matrix_size:
:return: a list mapping shaking sequence points to matrix points
"""
shaking_idx2matrix_idx = [(ind, end_ind) for ind in range(matrix_size) for end_ind in
list(range(matrix_size))[ind:]]
return shaking_idx2matrix_idx
@staticmethod
def get_matrix_idx2shaking_idx(matrix_size):
"""
:param matrix_size:
:return: a matrix mapping matrix points to shaking sequence points
"""
matrix_idx2shaking_idx = [[0 for _ in range(matrix_size)] for _ in range(matrix_size)]
shaking_idx2matrix_idx = MyMatrix.get_shaking_idx2matrix_idx(matrix_size)
for shaking_ind, matrix_ind in enumerate(shaking_idx2matrix_idx):
matrix_idx2shaking_idx[matrix_ind[0]][matrix_ind[1]] = shaking_ind
return matrix_idx2shaking_idx
@staticmethod
def mirror(shaking_seq):
"""
copy upper region to lower region
:param shaking_seq:
:return:
"""
batch_size, handshaking_seq_len, hidden_size = shaking_seq.size()
matrix_size = MyMaths.handshaking_len2matrix_size(handshaking_seq_len)
map_ = MyMatrix.get_matrix_idx2shaking_idx(matrix_size)
mirror_select_ids = [map_[i][j] if i <= j else map_[j][i] for i in range(matrix_size) for j in
range(matrix_size)]
mirror_select_vec = torch.tensor(mirror_select_ids).to(shaking_seq.device)
matrix = torch.index_select(shaking_seq, dim=1, index=mirror_select_vec)
matrix = matrix.view(batch_size, matrix_size, matrix_size, hidden_size)
return matrix
@staticmethod
def upper_reg2seq(ori_tensor):
"""
drop lower triangular part and flat upper triangular part to sequence
:param ori_tensor: (batch_size, matrix_size, matrix_size, hidden_size)
:return: (batch_size, matrix_size + ... + 1, hidden_size)
"""
tensor = ori_tensor.permute(0, 3, 1, 2).contiguous()
uppder_ones = torch.ones([tensor.size()[-2], tensor.size()[-1]]).long().triu().to(ori_tensor.device)
upper_diag_ids = torch.nonzero(uppder_ones.view(-1), as_tuple=False).view(-1)
# flat_tensor: (batch_size, matrix_size * matrix_size, hidden_size)
flat_tensor = tensor.view(tensor.size()[0], tensor.size()[1], -1).permute(0, 2, 1)
tensor_upper = torch.index_select(flat_tensor, dim=1, index=upper_diag_ids)
return tensor_upper
@staticmethod
def lower_reg2seq(ori_tensor):
"""
drop upper triangular part and flat lower triangular part to sequence
:param ori_tensor: (batch_size, matrix_size, matrix_size, hidden_size)
:return: (batch_size, matrix_size + ... + 1, hidden_size)
"""
tensor = ori_tensor.permute(0, 3, 1, 2).contiguous()
lower_ones = torch.ones([tensor.size()[-2], tensor.size()[-1]]).long().tril().to(ori_tensor.device)
lower_diag_ids = torch.nonzero(lower_ones.view(-1), as_tuple=False).view(-1)
# flat_tensor: (batch_size, matrix_size * matrix_size, hidden_size)
flat_tensor = tensor.view(tensor.size()[0], tensor.size()[1], -1).permute(0, 2, 1)
tensor_lower = torch.index_select(flat_tensor, dim=1, index=lower_diag_ids)
return tensor_lower
@staticmethod
def shaking_seq2matrix(sequence):
"""
map sequence tensor to matrix tensor; only upper region has values, pad 0 to the lower region
:param sequence:
:return:
"""
# sequence: (batch_size, seq_len, hidden_size)
batch_size, seq_len, hidden_size = sequence.size()
matrix_size = MyMaths.handshaking_len2matrix_size(seq_len)
map_ = MyMatrix.get_matrix_idx2shaking_idx(matrix_size)
index_ids = [map_[i][j] if i <= j else seq_len for i in range(matrix_size) for j in range(matrix_size)]
sequence_w_ze = func.pad(sequence, (0, 0, 0, 1), "constant", 0)
index_tensor = torch.LongTensor(index_ids).to(sequence.device)
long_seq = torch.index_select(sequence_w_ze, dim=1, index=index_tensor)
return long_seq.view(batch_size, matrix_size, matrix_size, hidden_size)
class SingleSourceHandshakingKernel(nn.Module):
def __init__(self, hidden_size, shaking_type, only_look_after=True, distance_emb_dim=-1):
super().__init__()
self.shaking_types = shaking_type.split("+")
self.only_look_after = only_look_after
cat_length = 0
if "cat" in self.shaking_types:
self.cat_fc = nn.Linear(hidden_size * 2, hidden_size)
cat_length += hidden_size
if "cmm" in self.shaking_types:
self.cat_fc = nn.Linear(hidden_size * 4, hidden_size)
self.guide_fc = nn.Linear(hidden_size, hidden_size)
self.vis_fc = nn.Linear(hidden_size, hidden_size)
cat_length += hidden_size
if "mul" in self.shaking_types:
self.guide_fc = nn.Linear(hidden_size, hidden_size)
self.vis_fc = nn.Linear(hidden_size, hidden_size)
self.mul_fc = nn.Linear(hidden_size, hidden_size)
if "cln" in self.shaking_types:
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional=True)
cat_length += hidden_size
if "lstm" in self.shaking_types:
assert only_look_after is True
self.lstm4span = nn.LSTM(hidden_size,
hidden_size,
num_layers=1,
bidirectional=False,
batch_first=True)
cat_length += hidden_size
elif "gru" in self.shaking_types:
assert only_look_after is True
self.lstm4span = nn.GRU(hidden_size,
hidden_size,
num_layers=1,
bidirectional=False,
batch_first=True)
cat_length += hidden_size
if "bilstm" in self.shaking_types:
assert only_look_after is True
self.lstm4span = nn.LSTM(hidden_size,
hidden_size // 2,
num_layers=1,
bidirectional=False,
batch_first=True)
self.lstm4span_back = nn.LSTM(hidden_size,
hidden_size // 2,
num_layers=1,
bidirectional=False,
batch_first=True)
cat_length += hidden_size
elif "bigru" in self.shaking_types:
assert only_look_after is True
self.lstm4span = nn.GRU(hidden_size,
hidden_size // 2,
num_layers=1,
bidirectional=False,
batch_first=True)
self.lstm4span_back = nn.GRU(hidden_size,
hidden_size // 2,
num_layers=1,
bidirectional=False,
batch_first=True)
cat_length += hidden_size
if "biaffine" in self.shaking_types:
self.biaffine = nn.Bilinear(hidden_size, hidden_size, hidden_size)
cat_length += hidden_size
self.distance_emb_dim = distance_emb_dim
if distance_emb_dim > 0:
self.dist_emb = nn.Embedding(512, distance_emb_dim)
self.dist_ids_matrix = None # for cache
cat_length += distance_emb_dim
self.aggr_fc = nn.Linear(cat_length, hidden_size)
def forward(self, seq_hiddens):
"""
seq_hiddens: (batch_size, seq_len, hidden_size_x)
return:
if only look after:
shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size); e.g. (32, 5+4+3+2+1, 5)
else:
shaking_hiddenss: (batch_size, seq_len * seq_len, hidden_size)
"""
# seq_len = seq_hiddens.size()[1]
batch_size, seq_len, vis_hidden_size = seq_hiddens.size()
guide = seq_hiddens[:, :, None, :].repeat(1, 1, seq_len, 1)
visible = guide.permute(0, 2, 1, 3)
feature_pre_list = []
if self.only_look_after:
if len({"lstm", "bilstm", "gru", "bigru"}.intersection(self.shaking_types)) > 0:
# batch_size, _, matrix_size, vis_hidden_size = visible.size()
# mask lower triangle part
upper_visible = visible.permute(0, 3, 1, 2).triu().permute(0, 2, 3, 1).contiguous()
# visible4lstm: (batch_size * matrix_size, matrix_size, hidden_size)
visible4lstm = upper_visible.view(batch_size * seq_len, seq_len, -1)
span_pre, _ = self.lstm4span(visible4lstm)
span_pre = span_pre.view(batch_size, seq_len, seq_len, -1)
if len({"bilstm", "bigru"}.intersection(self.shaking_types)) > 0:
# mask upper triangle part
lower_visible = visible.permute(0, 3, 1, 2).tril().permute(0, 2, 3, 1).contiguous()
visible4lstm_back = lower_visible.view(batch_size * seq_len, seq_len, -1)
visible4lstm_back = torch.flip(visible4lstm_back, [1, ])
span_pre_back, _ = self.lstm4span_back(visible4lstm_back)
span_pre_back = torch.flip(span_pre_back, [1, ])
span_pre_back = span_pre_back.view(batch_size, seq_len, seq_len, -1)
span_pre_back = span_pre_back.permute(0, 2, 1, 3)
span_pre = torch.cat([span_pre, span_pre_back], dim=-1)
# drop lower triangle and convert matrix to sequence
# span_pre: (batch_size, shaking_seq_len, hidden_size)
span_pre = MyMatrix.upper_reg2seq(span_pre)
feature_pre_list.append(span_pre)
# guide, visible: (batch_size, shaking_seq_len, hidden_size)
guide = MyMatrix.upper_reg2seq(guide)
visible = MyMatrix.upper_reg2seq(visible)
if "cat" in self.shaking_types:
tp_cat_pre = torch.cat([guide, visible], dim=-1)
tp_cat_pre = torch.relu(self.cat_fc(tp_cat_pre))
feature_pre_list.append(tp_cat_pre)
if "cmm" in self.shaking_types: # cat and multiple
tp_cat_pre = torch.cat([guide, visible,
torch.abs(guide - visible),
torch.mul(self.guide_fc(guide), self.vis_fc(visible))], dim=-1)
tp_cat_pre = torch.relu(self.cat_fc(tp_cat_pre))
feature_pre_list.append(tp_cat_pre)
if "cln" in self.shaking_types:
tp_cln_pre = self.tp_cln(visible, guide)
feature_pre_list.append(tp_cln_pre)
if "biaffine" in self.shaking_types:
biaffine_pre = self.biaffine(guide, visible)
biaffine_pre = torch.relu(biaffine_pre)
feature_pre_list.append(biaffine_pre)
if self.distance_emb_dim > 0:
if self.dist_ids_matrix is None or \
self.dist_ids_matrix.size()[0] != batch_size or \
self.dist_ids_matrix.size()[1] != seq_len: # need to update cached distance ids
t = torch.arange(0, seq_len).to(seq_hiddens.device)[:, None].repeat(1, seq_len)
self.dist_ids_matrix = torch.abs(t - t.permute(1, 0)).long()[None, :, :].repeat(batch_size, 1, 1)
if self.only_look_after: # matrix to handshaking seq
self.dist_ids_matrix = MyMatrix.upper_reg2seq(
self.dist_ids_matrix[:, :, :, None]).view(batch_size, -1)
dist_embeddings = self.dist_emb(self.dist_ids_matrix)
feature_pre_list.append(dist_embeddings)
output_hiddens = self.aggr_fc(torch.cat(feature_pre_list, dim=-1))
return output_hiddens
class CrossLSTM(nn.Module):
def __init__(self,
in_feature_dim=None,
out_feature_dim=None,
num_layers=1,
hv_comb_type="cat"
):
super().__init__()
self.vertical_lstm = nn.LSTM(in_feature_dim,
out_feature_dim // 2,
num_layers=num_layers,
bidirectional=True,
batch_first=True)
self.horizontal_lstm = nn.LSTM(in_feature_dim,
out_feature_dim // 2,
num_layers=num_layers,
bidirectional=True,
batch_first=True)
self.hv_comb_type = hv_comb_type
if hv_comb_type == "cat":
self.combine_fc = nn.Linear(out_feature_dim * 2, out_feature_dim)
elif hv_comb_type == "add":
pass
elif hv_comb_type == "interpolate":
self.lamtha = Parameter(torch.rand(out_feature_dim)) # [0, 1)
def forward(self, matrix):
# matrix: (batch_size, matrix_ver_len, matrix_hor_len, hidden_size)
batch_size, matrix_ver_len, matrix_hor_len, hidden_size = matrix.size()
hor_context, _ = self.horizontal_lstm(matrix.view(-1, matrix_hor_len, hidden_size))
hor_context = hor_context.view(batch_size, matrix_ver_len, matrix_hor_len, hidden_size)
ver_context, _ = self.vertical_lstm(
matrix.permute(0, 2, 1, 3).contiguous().view(-1, matrix_ver_len, hidden_size))
ver_context = ver_context.view(batch_size, matrix_hor_len, matrix_ver_len, hidden_size)
ver_context = ver_context.permute(0, 2, 1, 3)
comb_context = None
if self.hv_comb_type == "cat":
comb_context = torch.relu(self.combine_fc(torch.cat([hor_context, ver_context], dim=-1)))
elif self.hv_comb_type == "interpolate":
comb_context = self.lamtha * hor_context + (1 - self.lamtha) * ver_context
elif self.hv_comb_type == "add":
comb_context = (hor_context + ver_context) / 2
return comb_context
class CrossConv(nn.Module):
def __init__(self,
channel_dim,
hor_dim,
ver_dim
):
super(CrossConv, self).__init__()
self.alpha = Parameter(torch.randn([channel_dim, hor_dim, 1]))
self.beta = Parameter(torch.randn([channel_dim, 1, ver_dim]))
def forward(self, matrix_tensor):
# matrix_tensor: (batch_size, ver_dim, hor_dim, hidden_size)
# hor_cont: (batch_size, hidden_size (channel dim), ver_dim, 1)
hor_cont = torch.matmul(matrix_tensor.permute(0, 3, 1, 2), self.alpha)
# ver_cont: (batch_size, hidden_size, 1, hor_dim)
ver_cont = torch.matmul(self.beta, matrix_tensor.permute(0, 3, 1, 2))
# cross_context: (batch_size, ver_dim, hor_dim, hidden_size)
cross_context = torch.matmul(hor_cont, ver_cont).permute(0, 2, 3, 1)
return cross_context
class CrossPool(nn.Module):
def __init__(self, hidden_size):
super(CrossPool, self).__init__()
self.lamtha = Parameter(torch.rand(hidden_size))
def mix_pool(self, tensor, dim):
return self.lamtha * torch.mean(tensor, dim=dim) + (1 - self.lamtha) * torch.max(tensor, dim=dim)[0]
def forward(self, matrix_tensor):
# matrix_tensor: (batch_size, ver_dim, hor_dim, hidden_size)
# hor_cont: (batch_size, hidden_size, ver_dim, 1)
hor_cont = self.mix_pool(matrix_tensor, dim=2)[:, :, None, :].permute(0, 3, 1, 2)
# ver_cont: (batch_size, hidden_size, 1, hor_dim)
ver_cont = self.mix_pool(matrix_tensor, dim=1)[:, None, :, :].permute(0, 3, 1, 2)
# cross_context: (batch_size, ver_dim, hor_dim, hidden_size)
cross_context = torch.matmul(hor_cont, ver_cont).permute(0, 2, 3, 1)
return cross_context
class EdgeUpdate(nn.Module):
def __init__(self, hidden_dim, dim_e, dropout_ratio=0.5):
super(EdgeUpdate, self).__init__()
self.hidden_dim = hidden_dim
self.dim_e = dim_e
self.dropout = dropout_ratio
self.W = nn.Linear(self.hidden_dim * 2 + self.dim_e, self.dim_e)
def forward(self, edge, node1, node2):
"""
:param edge: [batch, seq, seq, dim_e]
:param node1: [batch, seq, seq, dim]
:param node2: [batch, seq, seq, dim]
:return:
"""
node = torch.cat([node1, node2], dim=-1) # [batch, seq, seq, dim * 2]
edge = self.W(torch.cat([edge, node], dim=-1))
return edge # [batch, seq, seq, dim_e]
class GraphConvLayer(nn.Module):
""" A GCN module operated on dependency graphs. """
def __init__(self, dep_embed_dim, gcn_dim, pooling='avg'):
super(GraphConvLayer, self).__init__()
self.gcn_dim = gcn_dim
self.dep_embed_dim = dep_embed_dim
self.pooling = pooling
self.W = nn.Linear(self.gcn_dim, self.gcn_dim)
self.highway = EdgeUpdate(gcn_dim, self.dep_embed_dim, dropout_ratio=0.5)
def forward(self, weight_adj, node_hiddens):
"""
:param weight_adj: [batch, seq, seq, dim_e]
:param node_hiddens: [batch, seq, dim]
:return:
"""
batch, seq, dim = node_hiddens.shape
weight_adj = weight_adj.permute(0, 3, 1, 2) # [batch, dim_e, seq, seq]
node_hiddens = node_hiddens.unsqueeze(1).expand(batch, self.dep_embed_dim, seq, dim)
ax = torch.matmul(weight_adj, node_hiddens) # [batch, dim_e, seq, dim]
if self.pooling == 'avg':
ax = ax.mean(dim=1)
elif self.pooling == 'max':
ax, _ = ax.max(dim=1)
elif self.pooling == 'sum':
ax = ax.sum(dim=1)
# Ax: [batch, seq, dim]
gcn_outputs = self.W(ax)
weights_gcn_outputs = func.relu(gcn_outputs)
node_outputs = weights_gcn_outputs
# Edge update weight_adj[batch, dim_e, seq, seq]
weight_adj = weight_adj.permute(0, 2, 3, 1).contiguous() # [batch, seq, seq, dim_e]
node_outputs1 = node_outputs.unsqueeze(1).expand(batch, seq, seq, dim)
node_outputs2 = node_outputs1.permute(0, 2, 1, 3).contiguous()
edge_outputs = self.highway(weight_adj, node_outputs1, node_outputs2)
return edge_outputs, node_outputs
class Indexer:
def __init__(self, tag2id, max_seq_len, spe_tag_dict):
self.tag2id = tag2id
self.max_seq_len = max_seq_len
self.spe_tag_dict = spe_tag_dict
def index_tag_list_w_matrix_pos(self, tags):
"""
:param tags: [[pos_i, pos_j, tag1], [pos_i, pos_j, tag2], ...]
:return:
"""
for t in tags:
if t[2] in self.tag2id:
t[2] = self.tag2id[t[2]]
else:
t[2] = self.spe_tag_dict["[UNK]"]
return tags
@staticmethod
def pad2length(tags, padding_tag, length):
if len(tags) < length:
tags.extend([padding_tag] * (length - len(tags)))
return tags[:length]
def index_tag_list(self, tags):
"""
tags: [t1, t2, t3, ...]
"""
tag_ids = []
for t in tags:
if t not in self.tag2id:
tag_ids.append(self.spe_tag_dict["[UNK]"])
else:
tag_ids.append(self.tag2id[t])
if len(tag_ids) < self.max_seq_len:
tag_ids.extend([self.spe_tag_dict["[PAD]"]] * (self.max_seq_len - len(tag_ids)))
return tag_ids[:self.max_seq_len]
@staticmethod
def get_shaking_idx2matrix_idx(matrix_size):
return MyMatrix.get_shaking_idx2matrix_idx(matrix_size)
@staticmethod
def get_matrix_idx2shaking_idx(matrix_size):
return MyMatrix.get_matrix_idx2shaking_idx(matrix_size)
@staticmethod
def points2multilabel_shaking_seq(points, matrix_size, tag_size):
"""
Convert points to a shaking sequence tensor
points: [(start_ind, end_ind, tag_id), ]
return:
shaking_seq: (shaking_seq_len, tag_size)
"""
matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size)
shaking_seq_len = matrix_size * (matrix_size + 1) // 2
shaking_seq = torch.zeros(shaking_seq_len, tag_size).long()
for sp in points:
shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]]
shaking_seq[shaking_idx][sp[2]] = 1
return shaking_seq
@staticmethod
def points2multilabel_shaking_seq_batch(batch_points, matrix_size, tag_size):
"""
Convert points to a shaking sequence tensor in batch (for training tags)
batch_points: a batch of points, [points1, points2, ...]
points: [(start_ind, end_ind, tag_id), ]
return:
batch_shaking_seq: (batch_size_train, shaking_seq_len, tag_size)
"""
matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size)
shaking_seq_len = matrix_size * (matrix_size + 1) // 2
batch_shaking_seq = torch.zeros(len(batch_points), shaking_seq_len, tag_size).long()
for batch_id, points in enumerate(batch_points):
for sp in points:
shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]]
batch_shaking_seq[batch_id][shaking_idx][sp[2]] = 1
return batch_shaking_seq
@staticmethod
def points2shaking_seq_batch(batch_points, matrix_size):
"""
Convert points to a shaking sequence tensor
batch_points: a batch of points, [points1, points2, ...]
points: [(start_ind, end_ind, tag_id), ]
return:
batch_shaking_seq: (batch_size_train, shaking_seq_len)
"""
matrix_idx2shaking_idx = Indexer.get_matrix_idx2shaking_idx(matrix_size)
shaking_seq_len = matrix_size * (matrix_size + 1) // 2
batch_shaking_seq = torch.zeros(len(batch_points), shaking_seq_len).long()
for batch_id, points in enumerate(batch_points):
for sp in points:
try:
shaking_idx = matrix_idx2shaking_idx[sp[0]][sp[1]]
except Exception as e:
raise e
else:
batch_shaking_seq[batch_id][shaking_idx] = sp[2]
return batch_shaking_seq
@staticmethod
def points2matrix_batch(batch_points, matrix_size):
"""
Convert points to a matrix tensor
batch_points: a batch of points, [points1, points2, ...]
points: [(start_ind, end_ind, tag_id), ]
return:
batch_matrix: (batch_size_train, matrix_size, matrix_size)
"""
batch_matrix = torch.zeros(len(batch_points), matrix_size, matrix_size).long()
for batch_id, points in enumerate(batch_points):
for pt in points:
batch_matrix[batch_id][pt[0]][pt[1]] = pt[2]
return batch_matrix
@staticmethod
def points2multilabel_matrix_batch(batch_points, matrix_size, tag_size):
"""
Convert points to a matrix tensor for multi-label tasks
batch_points: a batch of points, [points1, points2, ...]
points: [(i, j, tag_id), ]
return:
batch_matrix: shape: (batch_size_train, matrix_size, matrix_size, tag_size) # element 0 or 1
"""
batch_matrix = torch.zeros(len(batch_points), matrix_size, matrix_size, tag_size).long()
for batch_id, points in enumerate(batch_points):
for pt in points:
batch_matrix[batch_id][pt[0]][pt[1]][pt[2]] = 1
return batch_matrix
@staticmethod
def shaking_seq2points(shaking_tag):
"""
shaking_tag -> points
shaking_tag: shape: (shaking_seq_len, tag_size)
points: [(start_ind, end_ind, tag_id), ]
"""
points = []
shaking_seq_len = shaking_tag.size()[0]
matrix_size = int((2 * shaking_seq_len + 0.25) ** 0.5 - 0.5)
shaking_idx2matrix_idx = Indexer.get_shaking_idx2matrix_idx(matrix_size)
nonzero_points = torch.nonzero(shaking_tag, as_tuple=False)
for point in nonzero_points:
shaking_idx, tag_idx = point[0].item(), point[1].item()
pos1, pos2 = shaking_idx2matrix_idx[shaking_idx]
point = (pos1, pos2, tag_idx)
points.append(point)
return points
@staticmethod
def matrix2points(matrix_tag):
"""
matrix_tag -> points
matrix_tag: shape: (matrix_size, matrix_size, tag_size)
points: [(i, j, tag_id), ]
"""
points = []
nonzero_points = torch.nonzero(matrix_tag, as_tuple=False)
for point in nonzero_points:
i, j, tag_idx = point[0].item(), point[1].item(), point[2].item()
point = (i, j, tag_idx)
points.append(point)
return points
| python |
'''
Author: Mario Liu
Description: Module to detect faces with R200 camera.
Adapted from
https://docs.opencv.org/3.4.3/d7/d8b/tutorial_py_face_detection.html
'''
import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import cv2
import pyrealsense as pyrs
face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
with pyrs.Service() as serv:
with serv.Device() as dev:
dev.apply_ivcam_preset(0)
cnt = 0
last = time.time()
smoothing = 0.9
fps_smooth = 30
while True:
cnt += 1
if (cnt % 10) == 0:
now = time.time()
dt = now - last
fps = 10/dt
fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing))
last = now
dev.wait_for_frames()
# color
c = dev.color
c = cv2.cvtColor(c, cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(c, cv2.COLOR_BGR2GRAY)
# detect face
faces = face_cascade.detectMultiScale(c, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(c,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = c[y:y+h, x:x+w]
# find distance to center
cx = int(round(x+(w/2)))
cy = int(round(y+(h/2)))
depth = dev.depth[cy][cx]
print("Face found at distance: " + str(depth/10.0) + " cm")
# depth
d = dev.depth * dev.depth_scale * 1000
d = cv2.applyColorMap(d.astype(np.uint8), cv2.COLORMAP_RAINBOW)
# join color and depth
cd = np.concatenate((c, d), axis=1)
cv2.putText(cd, str(fps_smooth)[:4], (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0))
cv2.imshow('', cd)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | python |
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('sud2.jpeg',0)
img = cv2.medianBlur(img,5)
ret,th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
th2 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY,11,2)
th3 = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
titles = ['Original Image', 'Global Thresholding (v = 127)',
'Adaptive Mean Thresholding', 'Adaptive Gaussian Thresholding']
images = [img, th1, th2, th3]
for i in range(4):
plt.subplot(2,2,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show() | python |
#
# Imported module functions
#
#https://camo.githubusercontent.com/582226b9ba41bcbc13eaa81d2764092abb443bd416578c175bc2c1c5742d0647/68747470733a2f2f692e696d6775722e636f6d2f6b7a6978316a492e706e67
# Use our SimpleRequests module for this experimental version.
from SimpleRequests import SimpleRequest
from SimpleRequests.SimpleRequest import error
# Use the datetime module for generating timestamps and snowflakes.
from datetime import datetime, timedelta,timezone
# Use the time module for generating timestamps that are backwards compatible with Python 2.
from time import mktime
# Use the os module for creating directories and writing files.
from os import makedirs, getcwd, path
# Use the mimetypes module to determine the mimetype of a file.
from mimetypes import MimeTypes
# Use the sqlite3 module to access SQLite databases.
from sqlite3 import connect, Row, IntegrityError
# Use the random module to choose from a list at random.
from random import choice
# Convert JSON to a Python dictionary for ease of traversal.
from json import loads
import dateutil.parser
import textmine as tx
from concurrent.futures import ThreadPoolExecutor as pool
import logging
import asyncio
from contextlib import suppress
#
# Lambda functions
#
logging.basicConfig(filename='./output.log', filemode='w', format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Return a random string of a specified length.
random_str = lambda length: ''.join([choice('0123456789ABCDEF') for i in range(length)])
# Get the mimetype string from an input filename.
mimetype = lambda name: MimeTypes().guess_type(name)[0] \
if MimeTypes().guess_type(name)[0] is not None \
else 'application/octet-stream'
# Return a Discord snowflake from a timestamp.
snowflake = lambda timestamp_s: (timestamp_s * 1000 - 1420070400000) << 22
# Return a timestamp from a Discord snowflake.
timestamp = lambda snowflake_t: ((snowflake_t >> 22) + 1420070400000) / 1000.0
time_dff = 4
#
# Global functions
#
class Message(object):
def __init__(self, id, user_id, timestamp, content):
self.id = id
self.user_id = user_id
self.timestamp = timestamp
self.content = content
def snowtodatetime(snowflake_value):
ts = ((snowflake_value / 4194304) + 1420070400000)/1000
timestamp = datetime.utcfromtimestamp(ts)
return timestamp
def utctosnow(timestamp):
return((timestamp*1000) - 1420070400000) * 4194304
def get_day(day, month, year):
"""Get the timestamps from 00:00 to 23:59 of the given day.
:param day: The target day.
:param month: The target month.
:param year: The target year.
"""
min_time = mktime((year, month, day, 0, 0, 0, -1, -1, -1))
max_time = mktime((year, month, day, 23, 59, 59, -1, -1, -1))
return {
'00:00': snowflake(int(min_time)),
'23:59': snowflake(int(max_time))
}
def safe_name(name):
"""Convert name to a *nix/Windows compliant name.
:param name: The filename to convert.
"""
output = ""
for char in name:
if char not in '\\/<>:"|?*':
output += char
return output
def create_query_body(**kwargs):
"""Generate a search query string for Discord."""
query = ""
for key, value in kwargs.items():
if value is True and key != 'nsfw':
query += '&has=%s' % key[:-1]
if key == 'nsfw':
query += '&include_nsfw=%s' % str(value).lower()
return query
class DiscordConfig(object):
"""Just a class used to store configs as objects."""
class Discord:
"""Experimental Discord scraper class."""
def __init__(self, config='config.json', apiver='v6'):
"""Discord constructor.
:param config: The configuration JSON file.
:param apiver: The current Discord API version.
"""
with open(config, 'r') as configfile:
configdata = loads(configfile.read())
cfg = type('DiscordConfig', (object,), configdata)()
if cfg.token == "" or cfg.token is None:
error('You must have an authorization token set in %s' % config)
exit(-1)
self.api = apiver
self.buffer = cfg.buffer
self.headers = {
'user-agent': cfg.agent,
'authorization': cfg.token
}
self.types = cfg.types
self.query = create_query_body(
images=cfg.query['images'],
files=cfg.query['files'],
embeds=cfg.query['embeds'],
links=cfg.query['links'],
videos=cfg.query['videos'],
nsfw=cfg.query['nsfw']
)
self.directs = cfg.directs if len(cfg.directs) > 0 else {}
self.servers = cfg.servers if len(cfg.servers) > 0 else {}
# Save us the time by exiting out when there's nothing to scrape.
if len(cfg.directs) == 0 and len(cfg.servers) == 0:
error('No servers or DMs were set to be grabbed, exiting.')
exit(0)
'''
dbdir = path.join(getcwd(), 'data')
if not path.exists(dbdir):
makedirs(dbdir)
dbfile = path.join(dbdir, 'users.db')
self.db = connect(dbfile)
self.c = self.db.cursor()
self.c.row_factory = Row
'''
self.tx_obj = tx.NLPstock()
self.start_time = None
self.end_time = None
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def get_server_name(self, serverid, isdm=False):
"""Get the server name by its ID.
:param serverid: The server ID.
:param isdm: A flag to check whether we're in a DM or not.
"""
if isdm:
return serverid
request = SimpleRequest(self.headers).request
server = request.grab_page('https://discordapp.com/api/%s/guilds/%s' % (self.api, serverid))
if server is not None and len(server) > 0:
return '%s_%s' % (serverid, safe_name(server['name']))
else:
error('Unable to fetch server name from id, generating one instead.')
return '%s_%s' % (serverid, random_str(12))
def get_channel_name(self, channelid, isdm=False):
"""Get the channel name by its ID.
:param channelid: The channel ID.
:param isdm: A flag to check whether we're in a DM or not.
"""
if isdm:
return channelid
request = SimpleRequest(self.headers).request
channel = request.grab_page('https://discordapp.com/api/%s/channels/%s' % (self.api, channelid))
if channel is not None and len(channel) > 0:
return '%s_%s' % (channelid, safe_name(channel['name']))
else:
error('Unable to fetch channel name from id, generating one instead.')
return '%s_%s' % (channelid, random_str(12))
@staticmethod
def create_folders(server, channel):
"""Create the folder structure.
:param server: The server name.
:param channel: The channel name.
"""
folder = path.join(getcwd(), 'data', server, channel)
if not path.exists(folder):
makedirs(folder)
return folder
def download(self, url, folder):
"""Download the contents of a URL.
:param url: The target URL.
:param folder: The target folder.
"""
request = SimpleRequest(self.headers).request
request.set_header('user-agent', 'Mozilla/5.0 (X11; Linux x86_64) Chrome/78.0.3904.87 Safari/537.36')
filename = safe_name('%s_%s' % (url.split('/')[-2], url.split('/')[-1]))
if not path.exists(filename):
request.stream_file(url, folder, filename, self.buffer)
def check_config_mimetypes(self, source, folder):
"""Check the config settings against the source mimetype.
:param source: Response from Discord search.
:param folder: Folder where the data will be stored.
"""
for attachment in source['attachments']:
if self.types['images'] is True:
if mimetype(attachment['proxy_url']).split('/')[0] == 'image':
self.download(attachment['proxy_url'], folder)
if self.types['videos'] is True:
if mimetype(attachment['proxy_url']).split('/')[0] == 'video':
self.download(attachment['proxy_url'], folder)
if self.types['files'] is True:
if mimetype(attachment['proxy_url']).split('/')[0] not in ['image', 'video']:
self.download(attachment['proxy_url'], folder)
@staticmethod
def insert_text(server, channel, message):
"""Insert the text data into our SQLite database file.
:param server: The server name.
:param channel: The channel name.
:param message: Our message object.
"""
dbdir = path.join(getcwd(), 'data')
if not path.exists(dbdir):
makedirs(dbdir)
dbfile = path.join(dbdir, 'text.db')
db = connect(dbfile)
c = db.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS text_%s_%s (
id TEXT,
name TEXT,
content TEXT,
timestamp TEXT
)''' % (server, channel))
c.execute('INSERT INTO text_%s_%s VALUES (?,?,?,?)' % (server, channel), (
message['author']['id'],
'%s#%s' % (message['author']['username'], message['author']['discriminator']),
message['content'],
message['timestamp']
))
#print(message.keys())
#print(f"{message['author']['id']} {message['author']['username']} {message['author']['discriminator']} {message['timestamp']}")
#dt_time = dateutil.parser.isoparse(message['timestamp'])
#ts_comp = dt_time.replace(tzinfo=timezone.utc).timestamp()
print(f"{message['content']} {message['timestamp']}")
db.commit()
db.close()
def check_AH(self, dt):
start = dt.replace(hour=9, minute=30, second=0, microsecond=0)
end = dt.replace(hour=16, minute=0, second=0, microsecond=0)
if dt > start:
if dt > end:
return True
else:
return False
else:
return True
def insert_text_player(self, server, channel, message, message_hour):
"""Insert the text data into our SQLite database file.
:param server: The server name.
:param channel: The channel name.
:param message: Our message object.
"""
global time_dff
dbdir = path.join(getcwd(), 'data')
if not path.exists(dbdir):
makedirs(dbdir)
dbfile = path.join(dbdir, 'user.db')
db = connect(dbfile)
c = db.cursor()
'''
if self.check_AH(message_hour+timedelta(hours= -time_dff)):
self.tx_obj.AH = True
logging.info(f"staring after hours for the day {message_hour+timedelta(hours= -time_dff)}")
else:
self.tx_obj.AH = False
'''
self.tx_obj.current_time = message_hour
#try:
stock_string = self.tx_obj.get_stocks(message)
#except Exception as e:
#logging.error(f"getting stocks error {e} {message}")
mentions = message["mentions"]
if mentions:
try:
reference = message['message_reference']
try:
c.execute("SELECT * FROM text_%s_%s WHERE id = ?" % (server, mentions[0]['id']) , (reference['message_id'],))
#rows = self.c.fetchall()
#mention_stock_string = rows[-1]
#print("EXECUTING finding message from refered user: ", mentions[0]['id'])
except Exception as e:
#print("cant find token table from user ", mentions[0]['id'])
pass
except KeyError:
#print("not reply simply pin acess last topics org")
try:
c.execute('SELECT * FROM text_%s_%s ORDER BY id DESC LIMIT 1' % (server, mentions[0]['id']))
#print("EXECUTING finding last message from pinned user: ", mentions[0]['id'])
except Exception:
pass
result = c.fetchone()
if result:
#print(f"ORG from {mentions[0]['id']} is {result[-1]} {result[2]}")
stocks_temp = result[-1].split()
stock_string += stocks_temp
stock_string = set(stock_string)
#stock_string += mention_stock_string
stock_string = ' '.join(stock_string)
c.execute('''CREATE TABLE IF NOT EXISTS text_%s_%s (
id TEXT NOT NULL PRIMARY KEY,
name TEXT,
content TEXT,
timestamp TEXT,
stocks TEXT
)''' % (server, message['author']['id']))
c.execute('INSERT INTO text_%s_%s VALUES (?,?,?,?,?)' % (server, message['author']['id']), (
message['id'],
channel,
message['content'],
message['timestamp'],
stock_string
))
#print(message.keys())
#print(f"{message['author']['id']} {message['author']['username']} {message['author']['discriminator']} {message['timestamp']}")
#dt_time = dateutil.parser.isoparse(message['timestamp'])
#ts_comp = dt_time.replace(tzinfo=timezone.utc).timestamp()
print(f"{message['content']} - stocks: {stock_string}")
db.commit()
db.close()
def grab_data_test(self, folder, server, channel, isdm=False, inter=30):
"""Scan and grab the attachments.
:param folder: The folder name.
:param server: The server name.
:param channel: The channel name.
:param isdm: A flag to check whether we're in a DM or not.
:param inter: interval of scrape in seconds
"""
date = datetime.now()
target_day = date + timedelta(days=-200)
while target_day.day <= date.day:
print(f"getting data for {date} target is {target_day}")
#start_snow = int(utctosnow(date.replace(day = date.day-1, hour=0, minute=0, second=0, microsecond=0, tzinfo=timezone.utc).timestamp()))
#end_snow = int(utctosnow(date.replace(hour=23, minute=59, second=59, microsecond=59, tzinfo=timezone.utc).timestamp()))
today = get_day(target_day.day, target_day.month, target_day.year)
start_snow = today["00:00"]
end_snow = today['23:59']
print(f"{start_snow}-{end_snow}")
print()
request = SimpleRequest(self.headers).request
request.set_header('referer', 'https://discordapp.com/channels/@me/%s' % channel)
content = request.grab_page(
'https://discordapp.com/api/%s/channels/%s/messages/search?min_id=%s&max_id=%s&%s' %
(self.api, channel, start_snow, end_snow, self.query)
)
try:
if content['messages'] is not None:
for messages in content['messages'][::-1]:
for message in messages[::-1]:
#self.check_config_mimetypes(message, folder)
if self.types['text']:
if len(message['content']) > 0:
try:
self.insert_text_player(server, channel, message)
except IntegrityError:
pass
except TypeError as e:
print("type error on getting message ", e)
#break
target_day += timedelta(days=1)
def grab_server_data(self):
"""Scan and grab the attachments within a server."""
for server, channels in self.servers.items():
for channel in channels:
folder = self.create_folders(
self.get_server_name(server),
self.get_channel_name(channel)
)
self.grab_data_current(folder, server, channel)
def grab_dm_data(self):
"""Scan and grab the attachments within a direct message."""
for alias, channel in self.directs.items():
folder = self.create_folders(
path.join('Direct Messages', alias),
channel
)
self.grab_data(folder, alias, channel, True)
async def grab_data_current(self, server, channel, isdm=False, inter=30):
#the end time
"""Scan and grab the attachments.
:param folder: The folder name.
:param server: The server name.
:param channel: The channel name.
:param isdm: A flag to check whether we're in a DM or not.
:param inter: interval of scrape in seconds
"""
global time_dff
inter_before = datetime.now() + timedelta(hours=time_dff)
print("current time is ", inter_before)
inter_after = inter_before + timedelta(seconds=inter)
#ts_value_now = dt_time.replace(tzinfo=timezone.utc).timestamp()
while True:
current_time = datetime.now() + timedelta(hours=time_dff)
#print(f"waiting for {inter_after}, current {current_time}")
if current_time >= inter_after:
#inter_before -= timedelta(seconds=5) #offset to get the overlap message
start_snow_dt = inter_before.replace(tzinfo=timezone.utc) + timedelta(seconds=-2)
start_snow = int(utctosnow(start_snow_dt.timestamp()))
end_snow_dt = inter_after.replace(tzinfo=timezone.utc) + timedelta(seconds=2)
end_snow = int(utctosnow(end_snow_dt.timestamp()))
print(f"Processing time interval {inter_before} to {current_time}")
request = SimpleRequest(self.headers).request
request.set_header('referer', 'https://discordapp.com/channels/%s/%s' % (server, channel))
content = request.grab_page(
'https://discordapp.com/api/%s/guilds/%s/messages/search?channel_id=%s&min_id=%s&max_id=%s&%s' %
(self.api, server, channel, start_snow, end_snow, self.query)
)
if content:
if content['messages'] is not None:
for messages in content['messages'][::-1]:
for message in messages[::-1]:
#self.check_config_mimetypes(message, folder)
#print(message['id'])
if self.types['text'] is True:
if len(message['content']) > 0:
try:
self.insert_text_player(server, channel, message, start_snow_dt)
except IntegrityError:
logging.error(f"{message['id']} exists by {message['author']['id']} {message['content']} {message['author']['username']}")
else:
logging.info(f"{start_snow_dt}-{end_snow_dt} no content {content}")
inter_before = current_time
inter_after = inter_before + timedelta(seconds=inter)
print()
await asyncio.sleep(0.5)
def grab_data(self, folder, server, channel, isdm=False):
"""Scan and grab the attachments.
:param folder: The folder name.
:param server: The server name.
:param channel: The channel name.
:param isdm: A flag to check whether we're in a DM or not.
"""
date = datetime.today()
while date.year >= 2021:
request = SimpleRequest(self.headers).request
today = get_day(date.day, date.month, date.year)
if not isdm:
request.set_header('referer', 'https://discordapp.com/channels/%s/%s' % (server, channel))
content = request.grab_page(
'https://discordapp.com/api/%s/guilds/%s/messages/search?channel_id=%s&min_id=%s&max_id=%s&%s' %
(self.api, server, channel, today['00:00'], today['23:59'], self.query)
)
else:
request.set_header('referer', 'https://discordapp.com/channels/@me/%s' % channel)
content = request.grab_page(
'https://discordapp.com/api/%s/channels/%s/messages/search?min_id=%s&max_id=%s&%s' %
(self.api, channel, today['00:00'], today['23:59'], self.query)
)
try:
if content['messages'] is not None:
for messages in content['messages']:
for message in messages:
#self.check_config_mimetypes(message, folder)
if self.types['text'] is True:
if len(message['content']) > 0:
self.insert_text(server, channel, message)
except TypeError:
continue
break
date += timedelta(days=-1)
def grab_server_data(self):
"""Scan and grab the attachments within a server."""
for server, channels in self.servers.items():
for channel in channels:
print(f'Scraping data from {self.get_server_name(server)} {self.get_channel_name(channel)}')
self.loop.create_task(self.grab_data_current(server, channel))
self.loop.run_forever()
def grab_dm_data(self):
"""Scan and grab the attachments within a direct message."""
for alias, channel in self.directs.items():
folder = self.create_folders(
path.join('Direct Messages', alias),
channel
)
self.grab_data(folder, alias, channel, True)
#
# Initializer
#
if __name__ == '__main__':
ds = Discord()
ds.grab_server_data()
#ds.grab_dm_data()
| python |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class SpecimenContainerTypeCode(GenericTypeCode):
"""
SpecimenContainerType
From: http://hl7.org/fhir/ValueSet/specimen-container-type in valuesets.xml
Checks on the patient prior specimen collection. All SNOMED CT concepts
descendants of 706041008 |Device for body fluid and tissue
collection/transfer/processing (physical object)|
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://snomed.info/sct
"""
codeset: FhirUri = "http://snomed.info/sct"
| python |
import json
from itertools import groupby
from operator import itemgetter
import django
from django import forms
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from django.http import HttpResponse
from django.shortcuts import render
from django.templatetags.static import static
from django.urls import reverse, NoReverseMatch
from django.views.decorators.csrf import csrf_exempt
from linkcheck import update_lock
from linkcheck.linkcheck_settings import RESULTS_PER_PAGE
from linkcheck.models import Link
from linkcheck.utils import get_coverage_data
@staff_member_required
def coverage(request):
coverage_data = get_coverage_data()
if request.GET.get('config', False):
# Just render the suggested linklist code
template = 'linkcheck/suggested_configs.html'
context = {'coverage_data': [x['suggested_config'] for x in coverage_data]}
else:
# Render a nice report
template = 'linkcheck/coverage.html'
context = {'coverage_data': coverage_data}
return render(request, template, context)
@staff_member_required
@csrf_exempt
def report(request):
outerkeyfunc = itemgetter('content_type_id')
content_types_list = []
if request.method == 'POST':
ignore_link_id = request.GET.get('ignore', None)
if ignore_link_id is not None:
link = Link.objects.get(id=ignore_link_id)
link.ignore = True
link.save()
if request.is_ajax():
json_data = json.dumps({'link': ignore_link_id})
return HttpResponse(json_data, content_type='application/javascript')
unignore_link_id = request.GET.get('unignore', None)
if unignore_link_id is not None:
link = Link.objects.get(id=unignore_link_id)
link.ignore = False
link.save()
if request.is_ajax():
json_data = json.dumps({'link': unignore_link_id})
return HttpResponse(json_data, content_type='application/javascript')
recheck_link_id = request.GET.get('recheck', None)
if recheck_link_id is not None:
link = Link.objects.get(id=recheck_link_id)
url = link.url
url.check_url(external_recheck_interval=0)
links = [x[0] for x in url.links.values_list('id')]
if request.is_ajax():
json_data = json.dumps({
'links': links,
'message': url.message,
'colour': url.colour,
})
return HttpResponse(json_data, content_type='application/javascript')
link_filter = request.GET.get('filters', 'show_invalid')
qset = Link.objects.order_by('-url__last_checked')
if link_filter == 'show_valid':
qset = qset.filter(ignore=False, url__status__exact=True)
report_type = 'Good Links'
elif link_filter == 'show_unchecked':
qset = qset.filter(ignore=False, url__last_checked__exact=None)
report_type = 'Untested Links'
elif link_filter == 'ignored':
qset = qset.filter(ignore=True)
report_type = 'Ignored Links'
else:
qset = qset.filter(ignore=False, url__status__exact=False)
report_type = 'Broken Links'
paginated_links = Paginator(qset, RESULTS_PER_PAGE, 0, True)
try:
page = int(request.GET.get('page', '1'))
except:
page = 0
# offset = (page - 1) * RESULTS_PER_PAGE
links = paginated_links.page(page)
# This code groups links into nested lists by content type and object id
# It's a bit nasty but we can't use groupby unless be get values()
# instead of a queryset because of the 'Object is not subscriptable' error
t = sorted(links.object_list.values(), key=outerkeyfunc)
for tk, tg in groupby(t, outerkeyfunc):
innerkeyfunc = itemgetter('object_id')
objects = []
tg = sorted(tg, key=innerkeyfunc)
for ok, og in groupby(tg, innerkeyfunc):
content_type = ContentType.objects.get(pk=tk)
og = list(og)
try:
object = None
if content_type.model_class():
object = content_type.model_class().objects.get(pk=ok)
except ObjectDoesNotExist:
pass
try:
admin_url = object.get_admin_url() # TODO allow method name to be configurable
except AttributeError:
try:
admin_url = reverse('admin:%s_%s_change' % (content_type.app_label, content_type.model), args=[ok])
except NoReverseMatch:
admin_url = None
objects.append({
'object': object,
'link_list': Link.objects.in_bulk([x['id'] for x in og]).values(), # Convert values_list back to queryset. Do we need to get values() or do we just need a list of ids?
'admin_url': admin_url,
})
content_types_list.append({
'content_type': content_type,
'object_list': objects
})
# Pass any querystring data back to the form minus page
rqst = request.GET.copy()
if 'page' in rqst:
del rqst['page']
return render(request, 'linkcheck/report.html', {
'content_types_list': content_types_list,
'pages': links,
'filter': link_filter,
'media': forms.Media(js=[static(get_jquery_min_js())]),
'qry_data': rqst.urlencode(),
'report_type': report_type,
'ignored_count': Link.objects.filter(ignore=True).count(),
},
)
def get_jquery_min_js():
"""
Return the location of jquery.min.js. It's an entry point to adapt the path
when it changes in Django.
"""
return 'admin/js/vendor/jquery/jquery.min.js'
def get_status_message():
if update_lock.locked():
return "Still checking. Please refresh this page in a short while. "
else:
broken_links = Link.objects.filter(ignore=False, url__status=False).count()
if broken_links:
return (
"<span style='color: red;'>We've found {} broken link{}.</span><br>"
"<a href='{}'>View/fix broken links</a>".format(
broken_links,
"s" if broken_links > 1 else "",
reverse('linkcheck_report'),
)
)
else:
return ''
| python |
# Sum Compare
# Get 3 numbers from the user. Find the
# biggest number and add them all together.
# If the sum is bigger than 2 times the
# biggest of the 3 numbers, then print the sum.
# If it's smaller, multiply the sum by 3 and print the product.
# write code here
| python |
import tensorflow as tf
import math
class BatchNormalization(tf.keras.layers.BatchNormalization):
"""Make trainable=False freeze BN for real (the og version is sad).
ref: https://github.com/zzh8829/yolov3-tf2
"""
def call(self, x, training=False):
if training is None:
training = tf.constant(False)
training = tf.logical_and(training, self.trainable)
return super().call(x, training)
def safe_norm(x, epsilon=1e-12, axis=None, keep_dims=False):
return tf.sqrt(tf.reduce_sum(x ** 2, axis=axis, keepdims=keep_dims) + epsilon)
class ArcMarginPenaltyLogists(tf.keras.layers.Layer):
"""ArcMarginPenaltyLogists"""
def __init__(self, num_classes, margin=0.5, logist_scale=64, **kwargs):
super(ArcMarginPenaltyLogists, self).__init__(**kwargs)
self.num_classes = num_classes
self.margin = margin
self.logist_scale = logist_scale
def build(self, input_shape):
self.w = self.add_variable(
"weights", shape=[int(input_shape[-1]), self.num_classes])
self.cos_m = tf.identity(math.cos(self.margin), name='cos_m')
self.sin_m = tf.identity(math.sin(self.margin), name='sin_m')
self.th = tf.identity(math.cos(math.pi - self.margin), name='th')
self.mm = tf.multiply(self.sin_m, self.margin, name='mm')
def call(self, embds, labels):
# normed_embds = tf.nn.l2_normalize(embds, axis=1, name='normed_embd')
# normed_w = tf.nn.l2_normalize(self.w, axis=0, name='normed_weights')
embedding_norm = safe_norm(embds, axis=1, keep_dims=True)
normed_embds = tf.divide(embds, embedding_norm, name='normed_embd')
weights_norm = safe_norm(self.w, axis=0, keep_dims=True)
normed_w = tf.divide(self.w, weights_norm, name='normed_weights')
cos_t = tf.matmul(normed_embds, normed_w, name='cos_t')
sin_t = tf.sqrt(1. - cos_t ** 2, name='sin_t')
cos_mt = tf.subtract(
cos_t * self.cos_m, sin_t * self.sin_m, name='cos_mt')
cos_mt = tf.where(cos_t > self.th, cos_mt, cos_t - self.mm)
mask = tf.one_hot(tf.cast(labels, tf.int32), depth=self.num_classes,
name='one_hot_mask')
logists = tf.where(mask == 1., cos_mt, cos_t)
logists = tf.multiply(logists, self.logist_scale, 'arcface_logist')
return logists
| python |
"""
Some simple code to make particle flux spectrograms with matplotlib
@author: Liam M. Kilcommons
(minor modifications R. Redmon, A.G. Burrell)
"""
import numpy as np
import matplotlib.pyplot as pp
import datetime as dt
def dmsp_spectrogram(times, flux, channel_energies=None, lat=None, lt=None,
fluxunits='eV/cm$^2$-s-sr-eV', logy=True, datalabel=None,
cblims=None, title=None, ax=None, ax_cb=None,
label_it=True, color_map="Spectral_r"):
""" Plot the DMSP spectrogram
Parameters
----------
times : numpy.ndarray (dtype=object)(shape=(n,1))
Array of datetimes corresponding to the timestamps of the rows of the
flux array
flux : numpy.ndarray (shape=(n,len(channel_energies)))
Array of fluxes, 1 per channel, per timestamp
channel_energies - numpy.ndarray
Array of particle detector channel center energies in eV, if
None uses default DMSP energies
channel_energies = [ 30000., 20400., 13900., 9450., 6460.,
4400., 3000., 2040., 1392., 949.,
646., 440., 300., 204., 139.,
95., 65., 44., 30.]
fluxunits : str, optional
Units of flux for labeling the spectrogram (title and colorbar)
Defaults to eV/cm$^2$-s-sr-eV
logy : boolean, optional
Flag to make the y axis log scale
(useful for log-spaced channel_energies)
lat : numpy.ndarray (shape=(n,1)), optional
If lat is not None, then it must be the latitude
(magnetic or otherwise) of the spacecraft at
every timestamp in times. Setting this value
will cause the latitude to be added to the
x axis labels
lt : numpy.ndarray (shape=(n,1)), optional
If lat is not None, then it must be the localtime
(magnetic or otherwise) of the spacecraft at
every timestamp in times. Setting this value
will cause the localtime to be added to the
x axis labels
datalabel : str, optional
Some text to add to the title of the graphic
goes on a line above 'Flux [units of flux]'
cblims : None or 2-element list, optional
The limits for the colorbar. If None,
then the colorbar range is set to [flux.min(),flux.max()]
ax : None or axis reference, optional
Allows caller to specify axis for spectrogram; helpful for stackplot.
If 'ax' is specified then so should 'ax_cb'.
ax_cb : None or colorbar axis reference, optional
Allows caller to specify axis for spectrogram color bar; helpful for
stackplot. If 'ax' is specified then so should 'ax_cb'.
"""
#Module for logrithmic colorbar spacing
from matplotlib.colors import LogNorm
#Module for locating dates on the x axis
import matplotlib.dates as mpldates
#Module for getting colormaps
import matplotlib.cm as cm
if channel_energies is None:
channel_energies = np.array([ 30000., 20400., 13900., 9450., 6460.,
4400., 3000., 2040., 1392., 949., 646.,
440., 300., 204., 139., 95., 65., 44.,
30.])
# if Axis not specified then create one
if ax is None:
f = pp.figure(figsize=(12,6),dpi=300)
ax = pp.axes()
if datalabel is not None:
ax.set_title(datalabel+'\n Flux [%s]' %(fluxunits))
else:
pass
#ax.set_title('Flux [%s]' % (fluxunits))
if isinstance(times,np.ndarray):
times = times.flatten()
if isinstance(times[0], dt.datetime):
mpl_times = mpldates.date2num(times)
else:
mpl_times = times
#--------------------------------------------------------------------------
# Channel center energies to bin starts
# Since DMSP SSJ channels are log-linearly spaced, the bins widths are taken
# to be log-constant and the bins are placed symmetric about the channel
# center energies. This is probably not exactly correct since the actual
# instrument response/sensitivity functions are likely more linear than
# log linear. Recall that channels are listed as [30,000 eV to 30 eV] in
# reverse order.
#--------------------------------------------------------------------------
# Hard coded start/end bins taken from SSDP; not sure how they are derived,
# though this does result in bins visually centered correctly on their
# central energies
bin_edges = np.logspace(np.log10(36340.), np.log10(24.76),
len(channel_energies) + 1) # add one for endpoint
T,CH_E = np.meshgrid(mpl_times, bin_edges)
# Infinite, and Negative fluxes => NaN
inds = np.nonzero((~np.isfinite(flux)) | (flux < 0.))
flux[inds] = np.nan
# Mask nan fluxes so that pcolor knows to use the cmap bad value
masked_flux = np.ma.masked_where(np.isnan(flux),flux)
if cblims is None:
z_min = np.nanmin(flux)
z_max = np.nanmax(flux)
else:
z_min = cblims[0]
z_max = cblims[1]
#Set the over and under-range colors for the colorbar
cmap = cm.get_cmap(color_map)
cmap.set_bad('white',.1)
cmap.set_over('black')
cmap.set_under('grey')
mappable = ax.pcolormesh(T, CH_E, masked_flux.transpose(), cmap=cmap,
norm=LogNorm(vmin=z_min, vmax=z_max))
#mappable.set_rasterized( True )
if ax_cb is None:
pp.colorbar(mappable,label=fluxunits,ax=ax)
else:
pp.colorbar(mappable,label=fluxunits,cax=ax_cb)
# if Axis not specified then add x-axis tick marks
if label_it and isinstance(times[0], dt.datetime):
plotwidth_h = (times[-1]-times[0]).total_seconds()/3600.
plotwidth_m = (times[-1]-times[0]).total_seconds()/60.
if plotwidth_m <= 10.:
# if the plot width is less than 10 minutes tick mark every minute
majloc = mpldates.MinuteLocator(interval=1)
elif plotwidth_m <= 30.:
# if the plot width is less than 1/2 hour tick mark every 5 minutes
majloc = mpldates.MinuteLocator(interval=5)
elif plotwidth_h <= 1:
# if the plot width is less than 1 hour, but more than 30 minutes,
# tick mark every 10 minutes
majloc = mpldates.MinuteLocator(interval=10)
elif plotwidth_h <= 3:
# if less than 3 hours, but more than 1 use every 15 minutes
majloc = mpldates.MinuteLocator(interval=15)
elif plotwidth_h <= 5:
# if less than 5 hours, but more than 3 use every half hour
majloc = mpldates.MinuteLocator(interval=30)
else:
majloc = mpldates.HourLocator() #tick mark every hour
#Set the date locator
ax.xaxis.set_major_locator(majloc)
#This is slow and throws errors if used with pcolor, used pcolormesh
# instead
#ax.set_yscale('log')
#Manually create the tick labels
#There is probably a better way to do this with FuncFormatter, but I
# couldn't figure out how to get all of the relavent lat and LT
# information into it
#Get the tick marks
xticks = ax.get_xticks()
xlabels = []
for tick in xticks:
ind = np.nonzero(mpl_times==tick)[0] #Nonzero returns array ARG!
if len(ind)>0:
#Sometimes tick is not found if it wants to tickmark outside of
# data range. Have to put additional index to get datetime
# instead of array of length 1 with datetime in it
tickstr = "%.2d:%.2d" % (times[ind[0]].hour,
times[ind[0]].minute)
if lat is not None:
tickstr+="\n%.2f" % (lat[ind])
if lt is not None:
tickstr+="\n%.2f" % (lt[ind])
xlabels.append(tickstr)
else:
# Convert the tick position to a time
dtime = mpldates.num2date(tick)
xlabels.append('%.2d:%.2d' % (dtime.hour, dtime.minute))
ax.set_xticklabels(xlabels)
ax.set_yscale('log')
ax.set_ylim([channel_energies.min(),channel_energies.max()])
ax.set_ylabel('Channel E \n(log)[eV]')
# In the case that caller didn't specify the axis to use return new figure
if 'f' in locals():
# f.savefig('/home/liamk/test.png',dpi=300,figsize=(12,6))
return f
| python |
from django.apps import apps
from django.forms.models import ModelChoiceField, ModelMultipleChoiceField
from django.forms import ChoiceField
from smart_selects.widgets import ChainedSelect, ChainedSelectMultiple
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_str as force_text
get_model = apps.get_model
class ChainedModelChoiceField(ModelChoiceField):
def __init__(
self,
to_app_name,
to_model_name,
chained_field,
chained_model_field,
foreign_key_app_name,
foreign_key_model_name,
foreign_key_field_name,
show_all,
auto_choose,
sort=True,
manager=None,
initial=None,
view_name=None,
*args,
**kwargs
):
defaults = {
"widget": ChainedSelect(
to_app_name,
to_model_name,
chained_field,
chained_model_field,
foreign_key_app_name,
foreign_key_model_name,
foreign_key_field_name,
show_all,
auto_choose,
sort,
manager,
view_name,
),
}
defaults.update(kwargs)
if "queryset" not in kwargs:
queryset = get_model(to_app_name, to_model_name).objects.all()
super(ChainedModelChoiceField, self).__init__(
queryset=queryset, initial=initial, *args, **defaults
)
else:
super(ChainedModelChoiceField, self).__init__(
initial=initial, *args, **defaults
)
def _get_choices(self):
self.widget.queryset = self.queryset
choices = super(ChainedModelChoiceField, self)._get_choices()
return choices
choices = property(_get_choices, ChoiceField._set_choices)
class ChainedManyToManyField(ModelMultipleChoiceField):
def __init__(
self,
to_app_name,
to_model_name,
chain_field,
chained_model_field,
foreign_key_app_name,
foreign_key_model_name,
foreign_key_field_name,
auto_choose,
horizontal,
verbose_name="",
manager=None,
initial=None,
*args,
**kwargs
):
defaults = {
"widget": ChainedSelectMultiple(
to_app_name,
to_model_name,
chain_field,
chained_model_field,
foreign_key_app_name,
foreign_key_model_name,
foreign_key_field_name,
auto_choose,
horizontal,
verbose_name,
manager,
),
}
defaults.update(kwargs)
if "queryset" not in kwargs:
queryset = get_model(to_app_name, to_model_name).objects.all()
super(ChainedManyToManyField, self).__init__(
queryset=queryset, initial=initial, *args, **defaults
)
else:
super(ChainedManyToManyField, self).__init__(
initial=initial, *args, **defaults
)
class GroupedModelSelect(ModelChoiceField):
def __init__(self, queryset, order_field, *args, **kwargs):
self.order_field = order_field
super(GroupedModelSelect, self).__init__(queryset, *args, **kwargs)
def _get_choices(self):
# If self._choices is set, then somebody must have manually set
# the property self.choices. In this case, just return self._choices.
if hasattr(self, "_choices"):
return self._choices
# Otherwise, execute the QuerySet in self.queryset to determine the
# choices dynamically. Return a fresh QuerySetIterator that has not been
# consumed. Note that we're instantiating a new QuerySetIterator *each*
# time _get_choices() is called (and, thus, each time self.choices is
# accessed) so that we can ensure the QuerySet has not been consumed. This
# construct might look complicated but it allows for lazy evaluation of
# the queryset.
group_indexes = {}
choices = [("", self.empty_label or "---------")]
i = len(choices)
for item in self.queryset:
order_field = getattr(item, self.order_field)
group_index = order_field.pk
if group_index not in group_indexes:
group_indexes[group_index] = i
choices.append([force_str(order_field), []])
i += 1
choice_index = group_indexes[group_index]
choices[choice_index][1].append(self.make_choice(item))
return choices
def make_choice(self, obj):
return (obj.pk, " " + self.label_from_instance(obj))
choices = property(_get_choices, ChoiceField._set_choices)
| python |
class Person(object):
def demo(self):
print('888') | python |
import datetime
import json
import argparse
from typing import Any, Dict
import pytz
from astral import LocationInfo, Observer, sun
options = argparse.ArgumentParser()
options.add_argument(
"-n",
"--name",
dest="name",
default="Somewhere",
help="Location name (free-form text)",
)
options.add_argument(
"-r", "--region", dest="region", default="On Earth", help="Region (free-form text)"
)
options.add_argument(
"-d", "--date", dest="date", help="Date to compute times for (yyyy-mm-dd)"
)
options.add_argument("-t", "--tzname", help="Timezone name")
options.add_argument("latitude", type=float, help="Location latitude (float)")
options.add_argument("longitude", type=float, help="Location longitude (float)")
options.add_argument(
"elevation", nargs="?", type=float, default=0.0, help="Elevation in metres (float)"
)
args = options.parse_args()
loc = LocationInfo(
args.name, args.region, args.tzname, args.latitude, args.longitude
)
obs = Observer(args.latitude, args.longitude, args.elevation)
kwargs: Dict[str, Any] = {}
kwargs["observer"] = obs
if args.date is not None:
try:
kwargs["date"] = datetime.datetime.strptime(args.date, "%Y-%m-%d").date()
except: # noqa: E722
kwargs["date"] = datetime.date.today()
sun_as_str = {}
format_str = "%Y-%m-%dT%H:%M:%S"
if args.tzname is None:
tzinfo = pytz.utc
format_str += "Z"
else:
tzinfo = pytz.timezone(loc.timezone) # type: ignore
format_str += "%z"
kwargs["tzinfo"] = tzinfo
s = sun.sun(**kwargs)
for key, value in s.items():
sun_as_str[key] = s[key].strftime(format_str)
sun_as_str["timezone"] = tzinfo.zone
sun_as_str["location"] = f"{loc.name}, {loc.region}"
print(json.dumps(sun_as_str))
| python |
n, m = map(int, input().split())
if n == 1:
if m == 0:
print(1, 2)
else:
print(-1)
exit()
if m < 0 or m + 2 > n:
print(-1)
else:
print(1, 2 * (m + 2))
for i in range(1, m + 2):
print(2 * i, 2 * i + 1)
for j in range(m + 2, n):
print(2 * j + 1, 2 * j + 2) | python |
from sklearn import preprocessing
from tqdm import tqdm
import time
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, recall_score
from sklearn.metrics import precision_score, f1_score
from sklearn.metrics import classification_report
from core.utils import AverageMeter
from itertools import cycle
from core.utils import save_checkpoint
from sklearn.metrics import roc_auc_score, roc_curve, auc
# from .grad_cam_log import record_output_gradcam
import matplotlib.pyplot as plt
target_names_dict = {"Non": 0, "Venous": 1, "Aterial": 2, "Others": 3}
map_id_name = {0: "Non Contrast", 1: "Venous", 2: "Aterial", 3: "Others"}
def valid_model(
cfg,
mode,
epoch,
model,
dataloader,
criterion,
writer=None,
save_prediction=True,
best_metric=None,
visual=False
):
"""Evaluate model performance on Validating dataset
Args:
cfg (CfgNode): Config object containing running configuration
mode (str): Model running mode (valid/test)
model (nn.Module): Model that need to have performance evaluated
dataloader (data.DataLoader): Dataloader object to load data batch-wise
criterion: Loss function
writer (Summarywriter): Logger that log validation loss and plot it on Tensorboard
save_prediction (Boolean): Whether to save prediction output or not (for bootstraping)
best_metric (float, optional): Best performance result of loaded model. Defaults to None.
"""
# Declare variables
gpu = cfg.SYSTEM.GPU
output_log_dir = cfg.DIRS.OUTPUTS
model.eval()
losses = AverageMeter()
tbar = tqdm(dataloader)
targets, preds, filenames, study_IDs, seriesNumbers = (
list(),
list(),
list(),
list(),
list(),
)
data = dict()
total_time = 0
all_probs = []
for i, (filename, study_ID, seriesNumber, image, target) in enumerate(tbar):
with torch.no_grad():
image = image.float()
if gpu:
image, target = image.cuda(), target.cuda()
start = time.time()
output = model(image)
end = time.time()
# Output prediction
sigmoid = nn.Sigmoid()
probs = sigmoid(output)
pred = torch.argmax(probs, 1)
probs = probs.cpu().numpy()
all_probs.append(probs)
# print(probs.shape)
# print(pred.shape)
# print("_--------------_")
total_time += end - start
# Compute loss
loss = criterion(output, target)
# Record loss
losses.update(loss.item() * cfg.SOLVER.GD_STEPS, target.size(0))
tbar.set_description("Valid loss: %.9f" % (losses.avg))
# Convert target, prediction to numpy
target = list(target.detach().cpu().numpy())
pred = list(pred.detach().cpu().numpy())
# print(pred)
filename = list(filename)
targets += target
preds += pred
filenames += filename
study_IDs += study_ID
seriesNumbers += list(np.array(seriesNumber))
# print(f"Inference time =", (total_time / len(tbar)) / 100)
all_targets = []
for idx in range(len(targets)):
cur = [0] * 4
cur[targets[idx]] = 1
all_targets.append([cur])
all_probs = np.concatenate(all_probs, axis=0)
all_target = np.concatenate(all_targets, axis=0)
# print(all_target.shape)
# print(all_probs.shape)
np.save("target.npy", all_target)
np.save("probs.npy", all_probs)
# print(type(targets), len(targets))
# print(all_probs.shape)
if visual == True:
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(4):
fpr[i], tpr[i], _ = roc_curve(all_target[:, i], all_probs[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
colors = cycle(["aqua", "darkorange", "cornflowerblue", "red"])
lw = 2
plt.figure()
for i, color in zip(range(4), colors):
plt.plot(
fpr[i],
tpr[i],
color=color,
lw=lw,
label=f"ROC curve of class {map_id_name[i]} (area = {roc_auc[i]})"
)
plt.plot([0, 1], [0, 1], "k--", lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("Some extension of Receiver operating characteristic to multiclass")
plt.legend(loc="lower right")
plt.show()
# Record wrongly predicted sample and save confusion matrix
# record_output(cfg, mode, output_log_dir, study_IDs, seriesNumbers,
# targets, preds, filenames)
# record_output_gradcam(cfg, mode, output_log_dir, targets, preds, filenames, model)
# Calculate Metrics
accuracy = accuracy_score(targets, preds)
recall = recall_score(targets, preds, average="macro")
precision = precision_score(targets, preds, average="macro")
f1 = f1_score(targets, preds, average="macro")
print(
"ACCURACY: %.9f, RECALL: %.9f, PRECISION: %.9f, F1: %.9f"
% (accuracy, recall, precision, f1)
)
if len(np.unique(preds)) == cfg.MODEL.NUM_CLASSES:
report = classification_report(
targets,
preds,
target_names=["Non", "Venous", "Aterial", "Others"],
digits=4,
)
print(report)
# else:
# from core.utils import print_report, classification_report_
# report = classification_report_(targets, preds, target_names_dict)
# print_report(report)
data["Study_ID"] = study_IDs
data["Filename"] = filenames
data["SeriesNumber"] = seriesNumbers
data["Prediction"] = preds
data["Label"] = targets
data = pd.DataFrame(data)
all_series = []
for (studyuid, seriesuid), tmp_df in data.groupby(['Study_ID', 'SeriesNumber']):
preds = tmp_df['Prediction'].tolist()
labels = tmp_df['Label'].tolist()
f1_series = f1_score(labels, preds, average='macro')
all_series.append(f1_series)
all_series = np.array(all_series)
f1_series = np.mean(all_series)
print("series", f1_series)
save_dict = {
"epoch": epoch + 1,
"arch": cfg.NAME,
"state_dict": model.state_dict(),
"best_metric": best_metric,
}
save_filename = f"{cfg.NAME}_{str(f1)}_{str(f1_series)}.pth"
save_checkpoint(save_dict, root=cfg.DIRS.WEIGHTS, filename=save_filename)
# print(studyuid, seriesuid, f1)
if mode == "train":
# writer.add_scalars(
# f"Metrics",
# {
# "F1_SCORE": f1,
# "ACCURACY": accuracy,
# "RECALL": recall,
# "PRECISION": precision,
# },
# epoch,
# )
# CHECKPOINT
is_best = f1 > best_metric
best_metric = max(f1, best_metric)
# Save All slices prediction for scan prediction and bootstraping
if save_prediction:
data.to_csv(f"eval_{mode}.csv", index=False)
return best_metric
| python |
#Actualizado Lunes,28 de mayo dos mil diez y ocho
#Autor: Rosnel Alejandro Leyva-Cortes#
import os
import re
import sys
import struct
import socket
import urllib
import time
from subprocess import Popen, PIPE
import json as m_json
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import urllib.request #Python3.x
except ImportError:
import urllib2 #Python2.x
#End of import#
def q():
print(''' You are a horrible ''')
exit()
#initial ping
#for the hostname
def ping ():
welcome = str = raw_input('''\nIn order to perform a test, we must determine if the host is up.''')
hostname = str1 = raw_input("\nInput Hostname: ")
response = os.system("ping -c 10 " + hostname)
#and then check the response...
if response == 0:
print (hostname + ' is up! ') #End result is self explanatory
mainMenu()
def Cloudflare():
print('Not ready yet')
mainMenu()
def traceroute(url,*arg):
print('''This function uses ICMP to trace a host and give an IP.
Please run as root and don't include HTTPS in url. ''')
url = raw_input("\nPlease type in url to traceroute a website: ");
while True:
if 'http' not in url:
url = "http://" + url
elif "www" not in url:
url = "www."[:7] + url[7:]
else:
url = url
break
url = urlparse(url)
url = url.netloc
print(url)
p = Popen(['tracert', url], stdout=PIPE)
while True:
line = p.stdout.readline()
line2 = str(line).replace('\\r','').replace('\\n','')
if len(arg)>0:
file = open(arg[0], "a")
file.write(line2)
file.close()
print(line2)
if not line:
break
def mainMenu():
print ('''
_______ ______ _______
/ \ / \ / \
$$$$$$$ |/$$$$$$ |$$$$$$$ |
$$ |__$$ |$$ | $$/ $$ |__$$ |
$$ $$< $$ | $$ $$/
$$$$$$$ |$$ | __ $$$$$$$/
$$ | $$ |$$ \__/ |$$ |
$$ | $$ |$$ $$/ $$ |
$$/ $$/ $$$$$$/ $$/ net
https://sourceforge.net/projects/rcpnet/
https://twitter.com/PotatoSkins16
Choose one
''')
print('1. Ping host')
print('2. Cloudflare check')
print('3. tracert')
print('4 Quit')
sel=int(input("\nEnter choice: "))
if sel==1:
ping()
elif sel==2:
Cloudflare()
elif sel==3:
traceroute()
elif sel==4:
q()
else:
print('That is not a valid choice!!!')
mainMenu()
mainMenu()
| python |
import pymysql
from sshtunnel import SSHTunnelForwarder
class Database:
def initialize(self, server_name):
self.server = SSHTunnelForwarder(
'51.75.163.1',
ssh_username='karthik',
ssh_password='btm56Vy.3',
remote_bind_address=('127.0.0.1', 3306)
)
self.server.start()
self.cnx = pymysql.connect(
host='localhost',
port=self.server.local_bind_port,
user='discordb0t',
password='d1sCORDb()t!',
db='discordbot'
)
print("Connection Successful!")
self.cur = self.cnx.cursor()
self.server_name = server_name
self.cur.execute("SHOW TABLES")
self.tables = [table_name for (table_name,) in self.cur]
if self.server_name not in self.tables:
self.create_table()
def create_table(self):
SQL = "CREATE TABLE `{0}` LIKE `{1}`".format(self.server_name, "Default_Table")
self.cur.execute(SQL)
self.cnx.commit()
def add_member(self, *member):
if not self.check_mem('Main', member[0]):
SQL = "INSERT INTO `Main`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)"
self.cur.execute(SQL, member)
self.cnx.commit()
SQL = "INSERT INTO `{0}`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)".format(
self.server_name)
self.cur.execute(SQL, member)
self.cnx.commit()
elif self.check_mem('Main', member[0]):
SQL = "INSERT INTO `{0}`(`UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP`) " \
"SELECT `UID`, `Name`, `Avatar`, `Bot`, `Banned`, `Credits`, `Level`, `XP` " \
"FROM `Main` WHERE `Main`.`UID` = {1}".format(
self.server_name, member[0])
self.cur.execute(SQL)
self.cnx.commit()
def remove_member(self, member_id):
SQL = "DELETE FROM `{0}` WHERE `{0}`.`UID` = {1}".format(self.server_name, member_id)
self.cur.execute(SQL)
self.cnx.commit()
def check_mem(self, server_name, member_id):
SQL = "SELECT 1 FROM `{0}` WHERE `{0}`.`UID` = {1}".format(server_name, member_id)
self.cur.execute(SQL)
x = self.cur.fetchone()
if isinstance(x, type(None)):
return False
return True
def reset_credits(self, member_id, amount):
for table in self.tables:
if self.check_mem(table, member_id):
SQL = "UPDATE `{0}` SET `Credits` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, amount, member_id)
self.cur.execute(SQL)
self.cnx.commit()
def reset_xp(self, member_id):
for table in self.tables:
if self.check_mem(table, member_id):
SQL = "UPDATE `{0}` SET `XP` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, 0, member_id)
self.cur.execute(SQL)
self.cnx.commit()
def update_pfp(self, member_id, avatar_url):
for table in self.tables:
if self.check_mem(table, member_id):
SQL = "UPDATE `{0}` SET `Avatar` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, avatar_url, member_id)
self.cur.execute(SQL)
self.cnx.commit()
def update_name(self, new_name, member_id):
for table in self.tables:
if self.check_mem(table, member_id):
SQL = "UPDATE `{0}` SET `Name` = '{1}' WHERE `{0}`.`UID` = {2}".format(table, new_name, member_id)
self.cur.execute(SQL)
self.cnx.commit()
def update_table(self, current_name):
SQL = "ALTER TABLE `{0}` RENAME TO `{1}`".format(self.server_name, current_name)
self.cur.execute(SQL)
self.cnx.commit()
def update_xp(self, member_id, xp_gain):
SQL = ""
def fetch_profile(self, member_id):
SQL = "SELECT * FROM `Main` WHERE `Main`.`UID` = %s"
self.cur.execute(SQL, member_id)
elements = [element for element in self.cur.fetchone()]
return elements
def terminate(self):
print("terminated")
self.cur.close()
self.cnx.close()
self.server.close()
| python |
class Station:
def __init__(self, station_id, direction, stop_name, station_name, accessible, red, blue, green, brown, purple, purple_exp, yellow, pink, orange, latitude, longitude):
self.station_id = station_id
self.direction = direction
self.stop_name = stop_name
self.station_name = station_name
self.accessible = accessible
self.red = red
self.blue = blue
self.green = green
self.brown = brown
self.purple = purple
self.purple_exp = purple_exp
self.yellow = yellow
self.pink = pink
self.orange = orange
self.latitude = latitude
self.longitude = longitude
| python |
meses = {'January': 1, 'February': 2, 'March': 3, 'April': 4, 'May': 5, 'June': 6,
'July': 7, 'August': 8, 'September': 9, 'October': 10, 'November': 11, 'December': 12}
#entrada
mes = int(input())
#processamento & saída
for k, v in meses.items():
if v == mes:
print(k)
break
| python |
from gruffy import AccumulatorBar
g = AccumulatorBar()
g.title = "Gruffy's Graph"
g.data("add", [10, 50, 150, 20])
g.hide_legend = True
g.labels = {0: '2003', 1: '2004', 2: '2005', 3: '2006'}
g.transparent = 0.7
g.y_axis_increment = 50
g.maximum_value = 300
g.write('gruffy-accumulatorbar.png')
| python |
from sym_lis3 import GlobalEnv
import pytest
def test_dyn():
g = GlobalEnv()
g.eval_str('(define "foo" (lambda (x y) (if (in? dyn_env x) y 0)))')
assert not g.eval_str('(in? root_env "x")')
assert g.eval_str('(foo "x" 1)') == 1
assert g.eval_str('(foo "+" 1)') == 0
assert g.eval_str('(foo "y" 55)') == 55
| python |
class BaseEngine:
def __init__(self, world):
self.world = world
self._cull_method = self.default_cull_method
def check_collision(self, entity, collider):
raise NotImplementedError('Nope.')
def resolve_collision(self, entity, collider):
raise NotImplementedError('Nope.')
def handle_collision(self, entity):
raise NotImplementedError('Nope.')
def set_cull_method(self, cull_method):
self._cull_method = cull_method
def cull_chunks(self, chunks):
return self._cull_method(chunks)
def default_cull_method(self, chunks):
return [shape for chunk in chunks for shape in chunk.shapes] | python |
from ..typecheck import *
from . layout import Layout
from . image import Image
from . css import css, div_inline_css, icon_css, none_css
class element:
def __init__(self, is_inline: bool, width: Optional[float], height: Optional[float], css: Optional[css]) -> None:
super().__init__()
self.layout = None #type: Optional[Layout]
self.children = [] #type: Sequence[element]
self.requires_render = True
self._height = height
self._width = width
self.is_inline = is_inline
if css:
self.css = css
self.className = css.class_name
self.padding_height = css.padding_height
self.padding_width = css.padding_width
else:
self.css = none_css
self.className = none_css.class_name
self.padding_height = 0
self.padding_width = 0
def height(self, layout: Layout) -> float:
if self._height is not None:
return self._height + self.padding_height
height = 0.0
height_max = 0.0
for item in self.children:
height += item.height(layout)
if item.is_inline and height > height_max:
height_max = max(height_max, height)
height = 0.0
return max(height_max, height) + self.padding_height
def width(self, layout: Layout) -> float:
if self._width is not None:
return self._width + self.padding_width
width = 0.0
width_max = 0.0
for item in self.children:
width += item.width(layout)
if not item.is_inline and width > width_max:
width_max = max(width_max, width)
width = 0.0
return max(width_max, width) + self.padding_width
def add_class(self, name: str) -> None:
self.className += ' '
self.className += name
def dirty(self):
if self.layout:
self.layout.dirty()
self.requires_render = True
def html_inner(self, layout: Layout) -> str:
html = []
for child in self.children:
html.append(child.html(layout))
return ''.join(html)
def html(self, layout: Layout) -> str:
...
def added(self, layout: Layout) -> None:
...
def removed(self) -> None:
...
def render(self) -> Optional[Union[Sequence['element'], 'element']]:
...
class span (element):
Children = Optional[Union[Sequence['span'], 'span']]
def __init__(self, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None:
super().__init__(True, width, height, css)
self._items = None #type: span.Children
def render(self) -> 'span.Children':
return self._items
def __getitem__(self, values: 'span.Children'):
self._items = values
return self
def html(self, layout: Layout) -> str:
inner = self.html_inner(layout)
h = self.height(layout)
w = self.width(layout)
html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, inner)
return html
class div (element):
Children = Optional[Union[Sequence['div'], Sequence['span'], 'div', 'span']]
def __init__(self, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None:
super().__init__(False, width, height, css)
self._items = None #type: div.Children
def render(self) -> 'div.Children':
return self._items
def __getitem__(self, values: 'div.Children'):
self._items = values
return self
def html(self, layout: Layout) -> str:
inner = self.html_inner(layout)
h = self.height(layout) - self.padding_height
w = self.width(layout) - self.padding_width
if self.children and self.children[0].is_inline:
html = '<div class= "{} {}" style="height:{}rem;width:{}rem;line-height:{}rem"><img style="height:2.5rem;">{}</div>'.format(div_inline_css.class_name, self.className, h, w, h, inner)
else:
html = '<div class="{}" style="height:{}rem;width:{}rem;">{}</div>'.format(self.className, h, w, inner)
return html
# uses an img tag to force the width of the phantom to be the width of the item being rendered
class phantom_sizer (div):
def __init__(self, item: Union[div, span]) -> None:
super().__init__()
self.item = item
def render(self) -> div.Children:
return self.item
def html(self, layout: Layout) -> str:
inner = self.html_inner(layout)
h = self.height(layout)
w = self.width(layout)
html = '<div class="{}" style="height:{}rem;"><img style="width:{}rem;">{}</div>'.format(self.className, h, w, inner)
return html
html_escape_table = {
"&": "&",
">": ">",
"<": "<",
" ": "\u00A0" # HACK spaces inside <a> tags are not clickable. We replaces spaces with no break spaces
}
def html_escape(text: str) -> str:
return "".join(html_escape_table.get(c, c) for c in text)
class text (span):
def __init__(self, text: str, width: Optional[float] = None, height: Optional[float] = None, css: Optional[css] = None) -> None:
super().__init__(width, height, css)
self.text = text.replace("\u0000", "\\u0000")
@property
def text(self) -> str:
return self._text
@text.setter
def text(self, text: str):
self._text = text.replace("\u0000", "\\u0000")
self.text_html = html_escape(self._text)
def width(self, layout: Layout) -> float:
return len(self.text) + self.padding_width
def html(self, layout: Layout) -> str:
h = self.height(layout)
html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, self.text_html)
return html
class click (span):
def __init__(self, on_click: Callable[[], None]) -> None:
super().__init__()
self.on_click = on_click
def html(self, layout: Layout) -> str:
href = layout.register_on_click_handler(self.on_click)
html = '<a href={}>{}</a>'.format(href, self.html_inner(layout))
return html
class icon (span):
def __init__(self, image: Image) -> None:
super().__init__(width=2.5, height=2.5, css=icon_css)
self.image = image
def html(self, layout: Layout) -> str:
return '''<span class="{}"><img style="width:2.5rem;height:2.5rem;" src="{}"></span>'''.format(self.className, self.image.data(layout))
class code(span):
def __init__(self, text: str, language: str = 'c++') -> None:
super().__init__()
self.text = text.replace("\n", "")
self.text_html = html_escape(self.text)
self.language = language
def added(self, layout: Layout) -> None:
self.highlight = layout.syntax_highlight(self.text, self.language)
def width(self, layout: Layout) -> float:
return len(self.text) + self.padding_width
def html(self, layout: Layout) -> str:
h = self.height(layout)
text_html = self.highlight.html or self.text_html
html = '<span class="{}" style="line-height:{}rem;">{}</span>'.format(self.className, h, text_html)
return html
| python |
from django.core.mail import send_mail, EmailMessage
from django.forms import modelformset_factory
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Question, Choice, FilePathFieldForm
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def send(request):
# subject = '主题' # 主题
# message = 'sssssss' # 内容
# sender = '[email protected]' # 发送邮箱,已经在settings.py设置,直接导入
# receiver = ['[email protected]'] # 目标邮箱
# html_message = '<h1>%s</h1>' % 'testtesttest' # 发送html格式
# send_mail(subject, message, sender, receiver, html_message=html_message)
email = EmailMessage(
'Hello',
'Body goes here',
'[email protected]',
['[email protected]', '[email protected]'],
['[email protected]'],
reply_to=['[email protected]'],
headers={'Message-ID': 'foo'},
)
email.send()
# 使用form组件实现注册方式
def manage_FilePathForm(request):
form_obj = FilePathFieldForm() # 实例化一个对象
if request.method == "POST":
# 实例化form对象的时候,把post提交过来的数据直接传进去
form_obj = FilePathFieldForm(request.POST)
# 调用form_obj校验数据的方法
if form_obj.is_valid():
form_obj.save()
return render(request, 'polls/manage_authors.html', {'form_obj': form_obj}) | python |
from ...utils.IndexedRect import IndexedRect
class IndexedRectBuilder(object):
def __init__(self):
self.last_rect = None
self.initial_point = None
self.reset()
def set_initial_point(self, x, y):
self.initial_point = (x,y)
def get_initial_point(self):
return self.initial_point
def reset(self):
self.last_rect = None
self.initial_point = None
def has_initial_point(self):
return self.initial_point is not None
def to_rect(self, i, x, y):
self.last_rect = IndexedRect(i, self.initial_point[0], self.initial_point[1], x, y)
self.initial_point = None
return self.last_rect
class BoundingBoxInputManager(object):
MAX_KEPT = 20
def __init__(self):
self.curr_inputs = []
self.reset()
def add(self, ir: IndexedRect):
self.curr_inputs.append(ir)
self.curr_inputs = self.curr_inputs[-self.MAX_KEPT:]
def get_n(self):
return min(len(self.curr_inputs), 2)
def has_n(self, n):
return len(self.curr_inputs) >= n
def reset(self):
self.curr_inputs = []
def __getitem__(self, key):
return self.curr_inputs[-2:][key]
def get_2_sorted(self):
return sorted(self.curr_inputs[-2:], key=lambda r: r.i)
def get_last(self):
if len(self.curr_inputs) == 0:
return None
return self.curr_inputs[-1]
def remove_last(self):
if self.has_n(1):
last = self.curr_inputs[-1]
else:
last = None
self.curr_inputs = self.curr_inputs[:-1]
return last | python |
# -*- coding: utf-8 -*-
import binarybrain as bb
import binarybrain.core as core
import numpy as np
from typing import List
class Optimizer(bb.Object):
"""Optimizer の基本クラス
"""
def __init__(self, core_optimizer=None):
super(Optimizer, self).__init__(core_object=core_optimizer)
def set_variables(self, params, grads):
"""変数設定
Args:
params (Variables): 学習対象のパラメータ変数
grads (Variables): paramsに対応する勾配変数
"""
self.get_core().set_variables(params.get_core(), grads.get_core())
def update(self):
"""パラメータ更新&勾配ゼロクリア
set_variablesで設定された勾配変数に基づいた学習をset_variablesで
設定されたパラメータ変数に適用して、勾配をゼロクリアする
"""
return self.get_core().update()
def zero_grad(self):
"""勾配のゼロクリア
set_variablesで設定された勾配変数をゼロクリアする
"""
return self.get_core().zero_grad()
def step(self):
"""パラメータ更新
set_variablesで設定された勾配変数に基づいた学習をset_variablesで
設定されたパラメータ変数に適用する
"""
return self.get_core().step()
def set_learning_rate(self, learning_rate):
"""学習率設定
"""
self.get_core().set_learning_rate(learning_rate)
class OptimizerSgd(Optimizer):
"""SGD 最適化クラス
Args:
learning_rate (float): 学習率
"""
def __init__(self, learning_rate=0.001, dtype=bb.DType.FP32):
core_optimizer = bb.search_core_object('OptimizerSgd', [dtype]).create(learning_rate=learning_rate)
super(OptimizerSgd, self).__init__(core_optimizer=core_optimizer)
class OptimizerAdaGrad(Optimizer):
"""AdaGrad 最適化クラス
Args:
learning_rate (float): 学習率
"""
def __init__(self, learning_rate=0.01, dtype=bb.DType.FP32):
core_optimizer = bb.search_core_object('OptimizerAdaGrad', [dtype]).create(learning_rate=learning_rate)
super(OptimizerAdaGrad, self).__init__(core_optimizer=core_optimizer)
class OptimizerAdam(Optimizer):
"""Adam 最適化クラス
Args:
learning_rate (float): 学習率
beta1 (float): beta1
beta2 (float): beta2
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, dtype=bb.DType.FP32):
core_optimizer = bb.search_core_object('OptimizerAdam', [dtype]).create(learning_rate=learning_rate, beta1=beta1, beta2=beta2)
super(OptimizerAdam, self).__init__(core_optimizer=core_optimizer)
| python |
#coding:utf8
# Author : tuxpy
# Email : [email protected]
# Last modified : 2015-03-26 13:14:11
# Filename : gale/utils.py
# Description :
from __future__ import unicode_literals
try: # py2
from urlparse import urlsplit
from urllib import unquote_plus
from urllib import quote_plus
except ImportError: # py3
from urllib.parse import urlsplit # py3
from urllib.parse import unquote_plus
from urllib.parse import quote_plus
import email.utils
import time
import urllib
from gale import escape
from gale.config import CRLF
import mimetypes
import uuid
import fcntl
import gevent
from gevent import (Greenlet, socket)
from functools import wraps
import sys
def set_close_exec(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags|fcntl.FD_CLOEXEC)
# 设置close exec标志,这样在reload时会关闭socket
def get_gale_socket(raw_socket = None):
_socket = raw_socket or socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
_socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1)
set_close_exec(_socket.fileno())
return _socket
def parse_request_range(_range):
if not _range:
return 0, 0
if 'bytes=' not in _range:
return 0, 0
bytes_range = _range.split('bytes=')[-1].strip()
start, stop = bytes_range.split('-')
start = start and int(start) or 0
stop = stop and int(stop) or 0
return start, stop
def urldecode(params_url):
if not params_url: # 如果没有东西的话,就返回{}
return {}
params_url = escape.param_decode(params_url)
_d = {} # 存的是请求参数的字典形式,值是参数值列表
for _name, _value in map(lambda x: x.split('=', 1),
filter(lambda k_v: '=' in k_v, params_url.split('&'))): # filter 是为了把不带有=号的参数去掉
# 对用户提交的url参数和body进行解码成unicode
_d.setdefault(_name, []).append(urlunquote(_value))
return _d
def urlunquote(param):
if param == None:
return param
param = unquote_plus(escape.native_str(param))
return escape.param_decode(param)
def urlquote(param):
return quote_plus(escape.utf8(param))
code_mess_map = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported'
}
def format_timestamp(ts = None):
if not ts:
ts = time.time()
return email.utils.formatdate(ts, usegmt = True)
def get_mime_type(file_path):
return mimetypes.guess_type(file_path)[0] or 'application/octet-stream'
def made_uuid():
return uuid.uuid4().hex
from multiprocessing import Manager
__mgr = Manager()
def ShareDict(*args, **kwargs):
return __mgr.dict(*args, **kwargs)
def stop_share_dict():
__mgr.shutdown()
from gale.py_ver import is_py3
unicode_type = is_py3 and str or unicode
def is_string(s):
if is_py3:
return isinstance(s, str)
else:
return isinstance(s, (str, unicode))
if is_py3:
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
class ObjectDict(dict):
def __setattr__(self, key, value):
self[key] = value
def __getattr__(self, key):
return self[key]
def single_pattern(obj):
@wraps(obj)
def wrap(*args, **kwargs):
if hasattr(obj, '_instance'):
return obj._instance
_instance = obj.__new__(obj, *args, **kwargs)
obj.__init__(_instance, *args, **kwargs)
obj._instance = _instance
return _instance
return wrap
| python |
import cv2
import numpy as np
from matplotlib import pyplot as plt
# calculer difference entre les deux flow optique adjacents
def diffimage(lastframe, nextframe, size):
diff_frame = nextframe - lastframe
ABS = abs(diff_frame)
diff_value = (ABS.sum(axis = 0)).sum(axis = 0)/size
return diff_frame, diff_value
if __name__ == '__main__':
cap = cv2.VideoCapture("../TP2_Videos/Extrait1-Cosmos_Laundromat1(340p).m4v")
ret, lastframe = cap.read()
lastgray = cv2.cvtColor(lastframe, cv2.COLOR_BGR2GRAY)
ret, nextframe = cap.read()
nextgray = cv2.cvtColor(nextframe, cv2.COLOR_BGR2GRAY)
index = 1
last_diff_value = 0
lasthist = np.zeros([100, 100])
a = np.array([0]) # pour preserver deux diffrence entre les deux flow optique adjacents
while(ret):
index += 1
size = nextframe.size
flow = cv2.calcOpticalFlowFarneback(lastgray,nextgray,None,
pyr_scale = 0.5,# Taux de réduction pyramidal
levels = 3, # Nombre de niveaux de la pyramide
winsize = 15, # Taille de fenêtre de lissage (moyenne) des coefficients polynomiaux
iterations = 3, # Nb d'itérations par niveau
poly_n = 7, # Taille voisinage pour approximation polynomiale
poly_sigma = 1.5, # E-T Gaussienne pour calcul dérivées
flags = 0) # flow est une flow optique qui a deux channels
nexthist = cv2.calcHist([flow], [0,1], None, [100,100], [-100,100,-100,100])
nexthist[nexthist > 255] = 255
diff_frame, next_diff_value = diffimage(lasthist, nexthist, size)
a = np.append(a, next_diff_value)
cv2.imshow('fame', nextframe)
if (next_diff_value > 0.05 and abs(a[1]-a[0]) < 0.002 ) or next_diff_value > 0.1:
cv2.imwrite('Frame_%04d.png'%index,nextframe) # conserver image clef
a = np.delete(a,[0])
k = cv2.waitKey(15)
if k == 27:
break
lastgray = nextgray
lasthist = nexthist
ret, nextframe = cap.read()
if (ret):
nextgray = cv2.cvtColor(nextframe, cv2.COLOR_BGR2GRAY)
cap.realease()
cv2.destroyAllWindows() | python |
import pytest
from mcanitexgen.animation.parser import Duration, ParserError, Time, Timeframe, Weight
class Test_Timeframe_init:
@pytest.mark.parametrize(
"start, end, duration, expected_timeframe",
[
# Deduce end and duration
(0, None, None, Timeframe(0, 1, 1)),
(10, None, None, Timeframe(10, 11, 1)),
# Deduce duration
(0, 20, None, Timeframe(0, 20, 20)),
(11, 22, None, Timeframe(11, 22, 11)),
# Deduce end
(0, None, 5, Timeframe(0, 5, 5)),
(15, None, 5, Timeframe(15, 20, 5)),
# All set
(0, 10, 10, Timeframe(0, 10, 10)),
],
)
def test_args(self, start, end, duration, expected_timeframe):
assert Timeframe(start, end, duration) == expected_timeframe
@pytest.mark.parametrize(
"start, end, duration, match",
[
(None, None, None, "Timeframe must have at least one of start, end, duration set"),
(None, 2, 20, "Timeframes without start can't have end and duration"),
(0, 5, 20, "Start, end and duration of timeframe don't match: 0, 5, 20"),
],
)
def test_illegal_args(self, start, end, duration, match):
with pytest.raises(ParserError, match=match):
Timeframe(start, end, duration)
class Test_Time_from_args:
@pytest.mark.parametrize(
"start, end, duration, weight, expected_time",
[
(None, None, None, None, None),
# Weight
(None, None, None, 12, Weight(12)),
# Duration
(None, None, 10, None, Duration(10)),
# Timeframe
(0, None, None, None, Timeframe(0, 1, 1)),
(1, 20, None, None, Timeframe(1, 20, 19)),
(1, 20, 19, None, Timeframe(1, 20, 19)),
(1, None, 19, None, Timeframe(1, 20, 19)),
(None, 10, None, None, Timeframe(None, 10, None)),
],
)
def test_args(self, start, end, duration, weight, expected_time):
assert Time.from_args(start, end, duration, weight) == expected_time
@pytest.mark.parametrize(
"start, end, duration, weight, match",
[
# Weight
(None, None, None, 0, "Weight of time must be at least 1"),
(None, None, 1, 1, "Weighted time can't have start, end or duration"),
(None, 1, None, 1, "Weighted time can't have start, end or duration"),
(1, None, None, 1, "Weighted time can't have start, end or duration"),
# Duration
(None, None, 0, None, "Duration must be at least 1"),
(None, None, -10, None, "Duration must be at least 1"),
],
)
def test_illegal_args(self, start, end, duration, weight, match):
with pytest.raises(ParserError, match=match):
Time.from_args(start, end, duration, weight)
| python |
#!/usr/bin/env python3
from yaml import load
class ComposePlantuml:
def __init__(self):
pass
def parse(self, data):
return load(data)
def link_graph(self, compose, notes=False):
result = 'skinparam componentStyle uml2\n'
for component in sorted(self.components(compose)):
result += '[{0}]\n'.format(component)
for source, destination in sorted(self.links(compose)):
result += '[{0}] --> [{1}]\n'.format(source, destination)
for source, destination in sorted(self.dependencies(compose)):
result += '[{0}] ..> [{1}] : depends on\n'.format(source, destination)
if notes:
for component_name in sorted(self.components(compose)):
component = self.component(compose, component_name)
if 'labels' in component:
labels = [
'{0}={1}'.format(key, value)
for key, value in component['labels'].items()
]
result += 'note top of [{0}]\n {1}\nend note\n'.format(component_name, '\n '.join(labels))
return result.strip()
def boundaries(self, compose, group=False, notes=False):
result = 'skinparam componentStyle uml2\n'
result += 'cloud system {\n'
for component in sorted(self.components(compose)):
if self.has_service_external_ports(compose, component) or self.has_service_volumes(compose, component):
result += ' [{0}]\n'.format(component)
result += '}\n'
volume_registry = {}
volume_uml = ''
for volume in sorted(self.volumes(compose)):
if not self.is_volume_used(compose, volume):
continue
volume_uml += 'database {0}'.format(volume) + ' {\n'
for path in sorted(self.volume_usage(compose, volume)):
id = self.volume_identifier(volume, path)
if id in volume_registry:
continue
volume_registry[id] = 'volume_{0}'.format(len(volume_registry.keys()) + 1)
volume_uml += ' [{0}] as {1}\n'.format(path, volume_registry[id])
volume_uml += '}\n'
result += self.group('volumes', volume_uml) if group else volume_uml
port_uml = ''
port_links = ''
for service, host, container in sorted(self.ports(compose)):
port = host if container is None else '{0} : {1}'.format(host, container)
port_links += '[{0}] --> {1}\n'.format(service, port)
port_uml += 'interface {0}\n'.format(host)
result += self.group('ports', port_uml) if group else ''
result += port_links
for volume in sorted(self.volumes(compose)):
for service, volume_path in sorted(self.service_using_path(compose, volume)):
name = volume_path
if '{0}.{1}'.format(volume, volume_path) in volume_registry:
name = volume_registry['{0}.{1}'.format(volume, volume_path)]
result += '[{0}] --> {1}\n'.format(service, name)
if notes:
for component_name in sorted(self.components(compose)):
if not (self.has_service_external_ports(compose, component_name) or self.has_service_volumes(compose, component_name)):
continue
if not self.labels(compose, component_name):
continue
labels = [
'{0}={1}'.format(key, value)
for key, value in self.labels(compose, component_name).items()
]
result += 'note top of [{0}]\n {1}\nend note\n'.format(component_name, '\n '.join(labels))
return result.strip()
@staticmethod
def labels(compose, service):
service = ComposePlantuml.component(compose, service)
if 'labels' not in service:
return None
if type(service['labels']) is str:
key, value = service['labels'].split(':')
return {key: value}
return service['labels']
@staticmethod
def group(name, content):
if len(content) == 0:
return ''
return 'package {0} '.format(name) + '{\n ' + '\n '.join(content.split('\n')).strip() + '\n}\n'
@staticmethod
def is_volume_used(compose, volume):
components = compose if 'version' not in compose else compose.get('services', {})
for _, component in components.items():
for volume_name in component.get('volumes', {}):
if volume_name.startswith('{0}:'.format(volume)):
return True
return False
@staticmethod
def is_service_used(compose, service):
components = compose if 'version' not in compose else compose.get('services', {})
for _, component in components.items():
for link in component.get('links', []):
link = link if ':' not in link else link.split(':')[0]
if link == service:
return True
for dependency in component.get('depends_on', []):
if dependency == service:
return True
return False
@staticmethod
def has_service_external_ports(compose, service):
components = compose if 'version' not in compose else compose.get('services', {})
for name, component in components.items():
if service != name:
continue
return 'ports' in component
return False
@staticmethod
def has_service_volumes(compose, service):
components = compose if 'version' not in compose else compose.get('services', {})
for name, component in components.items():
if service != name:
continue
if 'volumes' not in component:
return False
for volume in component['volumes']:
if volume.startswith('/'):
continue
if ':' in volume:
return True
return False
@staticmethod
def volume_identifier(volume, path):
return '{0}.{1}'.format(volume, path)
@staticmethod
def components(compose):
if 'version' not in compose:
return [component for component in compose]
return [component for component in compose.get('services', {})]
@staticmethod
def component(compose, name):
root = compose if 'version' not in compose else compose['services']
assert name in root
return root[name]
@staticmethod
def links(compose):
result = []
components = compose if 'version' not in compose else compose.get('services', {})
for component_name, component in components.items():
for link in component.get('links', []):
link = link if ':' not in link else link.split(':')[0]
result.append((component_name, link))
return result
@staticmethod
def dependencies(compose):
result = []
components = compose if 'version' not in compose else compose.get('services', {})
for component_name, component in components.items():
for dependency in component.get('depends_on', []):
result.append((component_name, dependency))
return result
@staticmethod
def ports(compose):
result = []
components = compose if 'version' not in compose else compose.get('services', {})
for component_name, component in components.items():
for port in component.get('ports', []):
port = str(port)
host, container = (port, None)
if ':' in port:
host, container = port.split(':')
result.append((component_name, host, container))
return result
@staticmethod
def volumes(compose):
if 'version' not in compose:
return [] # TODO: support for version 1
volumes = compose.get('volumes', {})
return list(volumes.keys())
@staticmethod
def volume_usage(compose, volume):
result = []
components = compose if 'version' not in compose else compose.get('services', {})
for component_name, component in components.items():
for volume_name in component.get('volumes', {}):
if not volume_name.startswith('{0}:'.format(volume)):
continue
result.append(volume_name.split(':')[1])
return result
@staticmethod
def service_using_path(compose, volume):
result = []
components = compose if 'version' not in compose else compose.get('services', {})
for component_name, component in components.items():
for volume_name in component.get('volumes', {}):
if not volume_name.startswith('{0}:'.format(volume)):
continue
result.append((component_name, volume_name.split(':')[1]))
return result
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.