content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
class Config:
redis = {
'host': '127.0.0.1',
'port': '6379',
'password': None,
'db': 0
}
app = {
'name': 'laravel_database_gym',
'tag': 'swap'
}
conf = Config()
|
python
|
# Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
from .checksum import ChecksumSerializer
class VnfPackageArtifactInfoSerializer(serializers.Serializer):
artifactPath = serializers.CharField(
help_text="Path in the VNF package.",
required=True,
allow_null=False,
allow_blank=False
)
checksum = ChecksumSerializer(
help_text="Checksum of the artifact file.",
required=True,
allow_null=False
)
metadata = serializers.DictField(
help_text="The metadata of the artifact that are available in the VNF package",
child=serializers.CharField(
help_text="KeyValue Pairs",
allow_blank=True
),
required=False,
allow_null=True
)
|
python
|
#!/usr/bin/python
"""
Broker for MQTT communication of the agent.
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import json
from typing import Optional
from diagnostic.ibroker import IBroker
from diagnostic.diagnostic_checker import DiagnosticChecker
from diagnostic.constants import AGENT, CONFIGURATION_UPDATE_CHANNEL, ALL_AGENTS_UPDATE_CHANNEL, CMD_CHANNEL, \
RESPONSE_CHANNEL, STATE_CHANNEL, CLIENT_CERTS, CLIENT_KEYS
from inbm_lib.mqttclient.config import DEFAULT_MQTT_HOST, DEFAULT_MQTT_PORT, MQTT_KEEPALIVE_INTERVAL
from inbm_lib.mqttclient.mqtt import MQTT
logger = logging.getLogger(__name__)
class Broker(IBroker): # pragma: no cover
"""Starts the agent and listens for incoming commands on the command channel"""
def __init__(self, tls: bool = True) -> None:
self.diagnostic_checker: Optional[DiagnosticChecker] = None
self._mqttc = MQTT(AGENT + "-agent", DEFAULT_MQTT_HOST, DEFAULT_MQTT_PORT,
MQTT_KEEPALIVE_INTERVAL, env_config=True,
tls=tls, client_certs=CLIENT_CERTS, client_keys=CLIENT_KEYS)
self._mqttc.start()
self._initialize_broker()
def publish(self, channel: str, message: str):
"""Publish message on MQTT channel
@param channel: channel to publish upon
@param message: message to publish
"""
self._mqttc.publish(channel, message)
def _initialize_broker(self) -> None:
self.diagnostic_checker = DiagnosticChecker(self)
try:
logger.debug('Subscribing to: %s', STATE_CHANNEL)
self._mqttc.subscribe(STATE_CHANNEL, self._on_message)
logger.debug('Subscribing to: %s', CMD_CHANNEL)
self._mqttc.subscribe(CMD_CHANNEL, self._on_command)
logger.debug('Subscribing to: %s', CONFIGURATION_UPDATE_CHANNEL)
self._mqttc.subscribe(CONFIGURATION_UPDATE_CHANNEL, self._on_update)
logger.debug('Subscribing to: %s', ALL_AGENTS_UPDATE_CHANNEL)
self._mqttc.subscribe(ALL_AGENTS_UPDATE_CHANNEL, self._on_update)
self._mqttc.publish(f'{AGENT}/state', 'running', retain=True)
except Exception as exception:
logger.exception('Subscribe failed: %s', exception)
def _on_update(self, topic: str, payload: str, qos: int) -> None:
"""Callback for messages received on Configuration Update Channel
@param topic: channel message received
@param payload: message received
@param qos: quality of service level
"""
logger.info(f'Message received:{payload} on topic: {topic}')
if self.diagnostic_checker:
self.diagnostic_checker.set_configuration_value(json.loads(
payload), topic.split('/')[-2] + '/' + topic.split('/')[-1])
def _on_command(self, topic: str, payload: str, qos: int) -> None:
"""Callback for messages received on Command Channel
@param topic: channel message received
@param payload: message received
@param qos: quality of service level
"""
# Parse payload
try:
if payload is not None:
request = json.loads(payload)
logger.info(f'Received message: {request} on topic: {topic}')
if self.diagnostic_checker:
self.diagnostic_checker.execute(request)
except ValueError as error:
logger.error(
f'Unable to parse command/request ID. Verify request is in the correct format. {error}')
def _on_message(self, topic: str, payload: str, qos: int) -> None:
"""Callback for messages received on State Channel
@param topic: channel message received
@param payload: message received
@param qos: quality of service level
"""
logger.info(f'Message received: {payload} on topic: {topic}')
def stop(self) -> None:
"""Shutdown broker, publishing 'dead' event first."""
if self.diagnostic_checker:
self.diagnostic_checker.stop_timer()
self._mqttc.publish(f'{AGENT}/state', 'dead', retain=True)
self._mqttc.stop()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: https://github.com/monero-project/mininero
# Author: Dusan Klinec, ph4r05, 2018
# see https://eprint.iacr.org/2015/1098.pdf
import logging
from monero_glue.xmr import common, crypto
from monero_serialize import xmrtypes
logger = logging.getLogger(__name__)
def copy_ct_key(ct):
"""
Ct key copy
:param ct:
:return:
"""
return xmrtypes.CtKey(mask=ct.mask, dest=ct.dest)
def copy_ct_keys(vct):
"""
Copy of the CtKey vector
:param vct:
:return:
"""
return [copy_ct_key(x) for x in vct]
def decode_ct_keys_points(vct, copy=False):
"""
Decodes CtKeys vector as points
:param vct:
:param copy:
:return:
"""
rvct = copy_ct_keys(vct) if copy else vct
for i in range(len(rvct)):
rvct[i].mask = crypto.decodepoint(rvct[i].mask)
rvct[i].dest = crypto.decodepoint(rvct[i].dest)
return rvct
def decode_ct_keys_matrix_points(mxt, copy=False):
"""
Decodes CtKeys matrix as points
:param vct:
:param copy:
:return:
"""
rmxt = key_matrix(len(mxt), len(mxt[0])) if copy else mxt
for i in range(len(mxt)):
cur = decode_ct_keys_points(mxt[i], copy)
if copy:
rmxt[i] = cur
return rmxt
def gen_mlsag(pk, xx, index):
"""
Multilayered Spontaneous Anonymous Group Signatures (MLSAG signatures)
These are aka MG signatutes in earlier drafts of the ring ct paper
c.f. http://eprint.iacr.org/2015/1098 section 2.
keyImageV just does I[i] = xx[i] * Hash(xx[i] * G) for each i
Gen creates a signature which proves that for some column in the keymatrix "pk"
the signer knows a secret key for each row in that column
Ver verifies that the MG sig was created correctly
:param pk:
:param xx:
:param index:
:return:
"""
rows = len(xx)
cols = len(pk)
logger.debug("Generating MG sig of size %s x %s" % (rows, cols))
logger.debug("index is: %s, %s" % (index, pk[index]))
c = [None] * cols
alpha = scalar_gen_vector(rows)
I = key_image_vector(xx)
L = key_matrix(rows, cols)
R = key_matrix(rows, cols)
s = key_matrix(rows, cols)
m = "".join(pk[0])
for i in range(1, cols):
m = m + "".join(pk[i])
L[index] = [crypto.scalarmult_base(aa) for aa in alpha] # L = aG
Hi = hash_key_vector(pk[index])
R[index] = [crypto.scalarmult(Hi[ii], alpha[ii]) for ii in range(0, rows)] # R = aI
oldi = index
i = (index + 1) % cols
c[i] = crypto.cn_fast_hash(m + "".join(L[oldi]) + "".join(R[oldi]))
while i != index:
s[i] = scalar_gen_vector(rows)
L[i] = [crypto.add_keys2(s[i][j], c[i], pk[i][j]) for j in range(0, rows)]
Hi = hash_key_vector(pk[i])
R[i] = [crypto.add_keys3(s[i][j], Hi[j], c[i], I[j]) for j in range(0, rows)]
oldi = i
i = (i + 1) % cols
c[i] = crypto.cn_fast_hash(m + "".join(L[oldi]) + "".join(R[oldi]))
s[index] = [
crypto.sc_mulsub(c[index], xx[j], alpha[j]) for j in range(0, rows)
] # alpha - c * x
return I, c[0], s
def ver_mlsag(pk, I, c0, s):
"""
Verify MLSAG
:param pk:
:param I:
:param c0:
:param s:
:return:
"""
rows = len(pk[0])
cols = len(pk)
logger.debug("verifying MG sig of dimensions %s x %s" % (rows, cols))
c = [None] * (cols + 1)
c[0] = c0
L = key_matrix(rows, cols)
R = key_matrix(rows, cols)
m = "".join(pk[0])
for i in range(1, cols):
m = m + "".join(pk[i])
i = 0
while i < cols:
L[i] = [crypto.add_keys2(s[i][j], c[i], pk[i][j]) for j in range(0, rows)]
Hi = hash_key_vector(pk[i])
R[i] = [crypto.add_keys3(s[i][j], Hi[j], c[i], I[j]) for j in range(0, rows)]
oldi = i
i = i + 1
c[i] = crypto.cn_fast_hash(m + "".join(L[oldi]) + "".join(R[oldi]))
return c0 == c[cols]
|
python
|
import os
from dataclasses import dataclass, field
from di import Container, Dependant
@dataclass
class Config:
host: str = field(default_factory=lambda: os.getenv("HOST", "localhost"))
class DBConn:
def __init__(self, config: Config) -> None:
self.host = config.host
async def controller(conn: DBConn) -> None:
assert isinstance(conn, DBConn)
async def framework():
container = Container()
await container.execute_async(container.solve(Dependant(controller)))
|
python
|
from beaker._compat import pickle
import logging
from datetime import datetime
from beaker.container import OpenResourceNamespaceManager, Container
from beaker.exceptions import InvalidCacheBackendError
from beaker.synchronization import null_synchronizer
log = logging.getLogger(__name__)
db = None
class GoogleNamespaceManager(OpenResourceNamespaceManager):
tables = {}
@classmethod
def _init_dependencies(cls):
global db
if db is not None:
return
try:
db = __import__("google.appengine.ext.db").appengine.ext.db
except ImportError:
raise InvalidCacheBackendError(
"Datastore cache backend requires the " "'google.appengine.ext' library"
)
def __init__(self, namespace, table_name="beaker_cache", **params):
"""Creates a datastore namespace manager"""
OpenResourceNamespaceManager.__init__(self, namespace)
def make_cache():
table_dict = dict(
created=db.DateTimeProperty(),
accessed=db.DateTimeProperty(),
data=db.BlobProperty(),
)
table = type(table_name, (db.Model,), table_dict)
return table
self.table_name = table_name
self.cache = GoogleNamespaceManager.tables.setdefault(table_name, make_cache())
self.hash = {}
self._is_new = False
self.loaded = False
self.log_debug = logging.DEBUG >= log.getEffectiveLevel()
# Google wants namespaces to start with letters, change the namespace
# to start with a letter
self.namespace = "p%s" % self.namespace
def get_access_lock(self):
return null_synchronizer()
def get_creation_lock(self, key):
# this is weird, should probably be present
return null_synchronizer()
def do_open(self, flags, replace):
# If we already loaded the data, don't bother loading it again
if self.loaded:
self.flags = flags
return
item = self.cache.get_by_key_name(self.namespace)
if not item:
self._is_new = True
self.hash = {}
else:
self._is_new = False
try:
self.hash = pickle.loads(str(item.data))
except (IOError, OSError, EOFError, pickle.PickleError):
if self.log_debug:
log.debug("Couln't load pickle data, creating new storage")
self.hash = {}
self._is_new = True
self.flags = flags
self.loaded = True
def do_close(self):
if self.flags is not None and (self.flags == "c" or self.flags == "w"):
if self._is_new:
item = self.cache(key_name=self.namespace)
item.data = pickle.dumps(self.hash)
item.created = datetime.now()
item.accessed = datetime.now()
item.put()
self._is_new = False
else:
item = self.cache.get_by_key_name(self.namespace)
item.data = pickle.dumps(self.hash)
item.accessed = datetime.now()
item.put()
self.flags = None
def do_remove(self):
item = self.cache.get_by_key_name(self.namespace)
item.delete()
self.hash = {}
# We can retain the fact that we did a load attempt, but since the
# file is gone this will be a new namespace should it be saved.
self._is_new = True
def __getitem__(self, key):
return self.hash[key]
def __contains__(self, key):
return key in self.hash
def __setitem__(self, key, value):
self.hash[key] = value
def __delitem__(self, key):
del self.hash[key]
def keys(self):
return self.hash.keys()
class GoogleContainer(Container):
namespace_class = GoogleNamespaceManager
|
python
|
# Generated by Django 3.1.5 on 2021-05-01 09:50
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EducationalDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('university', models.CharField(max_length=200)),
('board', models.CharField(max_length=200)),
('year_of_passing', models.IntegerField()),
('division', models.CharField(max_length=6)),
],
),
migrations.CreateModel(
name='Experience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duration', models.IntegerField(null=True)),
('organization', models.CharField(max_length=100, null=True)),
('area', models.CharField(max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Vacancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('advertisement_number', models.IntegerField()),
('job_description', models.TextField()),
('job_notification', models.FileField(upload_to='')),
('number_of_vacancy', models.IntegerField(default=1)),
('job_type', models.CharField(choices=[('T', 'Teaching'), ('NT', 'Non-Teaching')], max_length=15)),
('last_date', models.DateField()),
],
),
migrations.CreateModel(
name='ThesisSupervision',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_of_student', models.CharField(max_length=200)),
('masters_or_phd', models.CharField(choices=[('Masters', 'Masters'), ('PhD', 'PhD')], max_length=20)),
('year_of_completion', models.IntegerField()),
('title_of_thesis', models.CharField(max_length=100)),
('co_guides', models.CharField(max_length=200, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TeachingExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('teaching_experience', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recruitment.experience')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SponsoredProjects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period', models.CharField(max_length=10)),
('sponsoring_organisation', models.CharField(max_length=200)),
('title_of_project', models.CharField(max_length=200)),
('grant_amount', models.IntegerField(null=True)),
('co_investigators', models.CharField(max_length=200, null=True)),
('status', models.CharField(choices=[('Ongoing', 'Ongoing'), ('Completed', 'Completed')], max_length=20)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ResearchExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('research_experience', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recruitment.experience')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='References',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('address', models.TextField(null=True)),
('email', models.EmailField(max_length=254)),
('mobile_number', models.BigIntegerField()),
('department', models.CharField(max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='QualifiedExams',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('net', models.BooleanField()),
('gate', models.BooleanField()),
('jrf', models.BooleanField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Publications',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('referred_journal', models.CharField(max_length=100)),
('sci_index_journal', models.CharField(max_length=100)),
('international_conferences', models.CharField(max_length=100, null=True)),
('national_conferences', models.CharField(max_length=100, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PersonalDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='Dr.', max_length=20)),
('sex', models.CharField(choices=[('M', 'Male'), ('F', 'Female'), ('O', 'Other')], default='M', max_length=2)),
('profile_picture', models.ImageField(blank=True, null=True, upload_to='')),
('marital_status', models.CharField(choices=[('M', 'Married'), ('U', 'Unmarried')], max_length=10)),
('discipline', models.CharField(max_length=50)),
('specialization', models.CharField(choices=[('MA', 'Major'), ('MI', 'Minor')], max_length=10)),
('category', models.CharField(choices=[('PH', 'Physically Handicapped'), ('UR', 'Unreserved'), ('OBC', 'Other Backward Classes'), ('SC', 'Scheduled Castes'), ('ST', 'Scheduled Tribes'), ('EWS', 'Economic Weaker Section')], max_length=20)),
('father_name', models.CharField(default='', max_length=40)),
('address_correspondence', models.TextField(max_length=1000)),
('address_permanent', models.TextField(default='', max_length=1000)),
('email_alternate', models.EmailField(default='', max_length=50, null=True)),
('phone_no', models.BigIntegerField(default=9999999999, null=True)),
('mobile_no_first', models.BigIntegerField(default=9999999999)),
('mobile_no_second', models.BigIntegerField(default=9999999999, null=True)),
('date_of_birth', models.DateField(default=datetime.date(1970, 1, 1))),
('nationality', models.CharField(max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Patent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filed_national', models.CharField(max_length=200, null=True)),
('filed_international', models.CharField(max_length=200, null=True)),
('award_national', models.CharField(max_length=200, null=True)),
('award_international', models.CharField(max_length=200, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='PapersInReferredJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=100)),
('year', models.IntegerField()),
('published', models.BooleanField()),
('accepted', models.BooleanField()),
('title', models.CharField(max_length=100)),
('reference_of_journal', models.CharField(max_length=100)),
('impact_factor', models.CharField(max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='NationalConference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=100)),
('year', models.IntegerField()),
('title', models.CharField(max_length=100)),
('name_and_place_of_conference', models.CharField(max_length=200)),
('presented', models.BooleanField()),
('published', models.BooleanField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InternationalConference',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(max_length=100)),
('year', models.IntegerField()),
('title', models.CharField(max_length=100)),
('name_and_place_of_conference', models.CharField(max_length=200)),
('presented', models.BooleanField()),
('published', models.BooleanField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='IndustrialExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period', models.IntegerField(null=True)),
('organization', models.CharField(max_length=200, null=True)),
('title_of_post', models.CharField(max_length=200, null=True)),
('nature_of_work', models.TextField(null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ExperienceDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total_experience_months', models.IntegerField(null=True)),
('member_of_professional_body', models.CharField(max_length=200, null=True)),
('employer', models.CharField(max_length=100, null=True)),
('position_held', models.CharField(max_length=100, null=True)),
('date_of_joining', models.DateField(null=True)),
('date_of_leaving', models.DateField(null=True)),
('pay_in_payband', models.CharField(max_length=20, null=True)),
('payband', models.CharField(max_length=20, null=True)),
('AGP', models.CharField(max_length=20, null=True)),
('reasons_for_leaving', models.TextField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CoursesTaught',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, null=True)),
('level', models.CharField(choices=[('UG', 'UnderGraduate'), ('PG', 'PostGraduate')], max_length=20, null=True)),
('number_of_times', models.IntegerField(null=True)),
('developed_by_you', models.BooleanField(null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Consultancy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period', models.CharField(max_length=10)),
('sponsoring_organisation', models.CharField(max_length=200)),
('title_of_project', models.CharField(max_length=200)),
('grant_amount', models.IntegerField(null=True)),
('co_investigators', models.CharField(max_length=200, null=True)),
('status', models.CharField(choices=[('Ongoing', 'Ongoing'), ('Completed', 'Completed')], max_length=20)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Books',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_of_book', models.CharField(max_length=100)),
('year', models.IntegerField()),
('published', models.BooleanField()),
('title', models.CharField(max_length=100)),
('publisher', models.CharField(max_length=200)),
('co_author', models.CharField(max_length=100, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='BankDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_reference_number', models.CharField(max_length=20)),
('payment_date', models.DateField()),
('bank_name', models.CharField(max_length=100)),
('bank_branch', models.CharField(max_length=200)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='applied',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=django.utils.timezone.now)),
('advertisement_number', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='recruitment.vacancy')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='AdministrativeExperience',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('period', models.IntegerField(null=True)),
('organization', models.CharField(max_length=200, null=True)),
('title_of_post', models.CharField(max_length=200, null=True)),
('nature_of_work', models.TextField(null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='AcademicDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('area_of_specialization', models.TextField()),
('current_area_of_research', models.TextField()),
('date_of_enrollment_in_phd', models.DateField()),
('date_of_phd_defence', models.DateField()),
('date_of_award_of_phd', models.DateField()),
('XIIth', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='XIIth_details', to='recruitment.educationaldetails')),
('Xth', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='Xth_details', to='recruitment.educationaldetails')),
('graduation', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='graduation_details', to='recruitment.educationaldetails')),
('phd', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='phd_details', to='recruitment.educationaldetails')),
('post_graduation', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='post_graduations_details', to='recruitment.educationaldetails')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
python
|
parrot = "Norwegian Blue"
letter = input('Enter a char: ')
if letter in parrot:
print('it is in there')
else:
print('not there')
activity = input("What would you like to do today? ")
# I want to go to Cinema
if "cinema" not in activity.casefold(): # casefold handles some languages better
print("But I want to go to the cinema")
|
python
|
from __future__ import absolute_import
from .node import Node
from .edge import Edge
from .domain import URI, Domain
from .file import File, FileOf
from .ip_address import IPAddress
from .process import Launched, Process
from .registry import RegistryKey
from .alert import Alert
__all__ = [
"Node",
"Edge",
"URI",
"Domain",
"File",
"FileOf",
"IPAddress",
"Launched",
"Process",
"RegistryKey",
"Alert",
]
|
python
|
#!/usr/bin/env python3
# Advent of Code 2016 - Day 13, Part One & Two
import sys
from itertools import chain, combinations, product
# test set
# start = (1, 1)
# goal = (7, 4)
# magic = 10
# part one & two
start = (1, 1)
goal = (31, 39)
magic = 1352
def is_valid(coord):
x, y = coord
if x < 0 or y < 0:
return False
s = x*x + 3*x + 2*x*y + y + y*y + magic
popcount = bin(s).count('1')
return popcount % 2 == 0
def gen_moves(coord):
x, y = coord
moves = [(x+dx, y+dy) for dx, dy in [(0,-1), (1,0), (0,1), (-1, 0)]]
return filter(is_valid, moves)
def bfs_paths(start, goal, limit=None):
iterations = 0
queue = [(start, [start])]
visited = set(start)
while queue:
(vertex, path) = queue.pop(0)
for current in set(gen_moves(vertex)) - set(path):
if limit and len(path) > limit:
yield visited
return
iterations += 1
if current == goal:
yield path
elif current in visited:
continue
else:
visited.add(current)
queue.append((current, path + [current]))
def shortest_path(start, goal, limit=None):
try:
return next(bfs_paths(start, goal, limit))
except StopIteration:
return None
def main(argv):
path = shortest_path(start, goal)
print(len(path))
visited = shortest_path(start, goal, 50)
print(len(visited))
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
python
|
def extractWwwScribblehubCom(item):
'''
Parser for 'www.scribblehub.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if len(item['tags']) == 2:
item_title, item_id = item['tags']
if item_id.isdigit():
return buildReleaseMessageWithType(item, item_title, vol, chp, frag=frag, postfix=postfix, tl_type="oel")
item_id, item_title = item['tags']
if item_id.isdigit():
return buildReleaseMessageWithType(item, item_title, vol, chp, frag=frag, postfix=postfix, tl_type="oel")
return False
|
python
|
from taichi.profiler.kernelprofiler import \
KernelProfiler # import for docstring-gen
from taichi.profiler.kernelprofiler import get_default_kernel_profiler
|
python
|
from setuptools import setup
# Convert README.md to README.rst because PyPI does not support Markdown.
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst', format='md')
except:
# If unable to convert, try inserting the raw README.md file.
try:
with open('README.md', encoding="utf-8") as f:
long_description = f.read()
except:
# If all else fails, use some reasonable string.
long_description = 'BigQuery schema generator.'
setup(name='bigquery-schema-generator',
version='0.5.1',
description='BigQuery schema generator from JSON or CSV data',
long_description=long_description,
url='https://github.com/bxparks/bigquery-schema-generator',
author='Brian T. Park',
author_email='[email protected]',
license='Apache 2.0',
packages=['bigquery_schema_generator'],
python_requires='~=3.5',
entry_points={
'console_scripts': [
'generate-schema = bigquery_schema_generator.generate_schema:main'
]
}
)
|
python
|
from sanic import Sanic
from sanic.response import text
from sanic_plugin_toolkit import SanicPluginRealm
#from examples.my_plugin import my_plugin
from examples import my_plugin
from examples.my_plugin import MyPlugin
from logging import DEBUG
app = Sanic(__name__)
# mp = MyPlugin(app) //Legacy registration example
realm = SanicPluginRealm(app)
my_plugin = realm.register_plugin(my_plugin)
@app.route('/')
def index(request):
return text("hello world")
if __name__ == "__main__":
app.run("127.0.0.1", port=8098, debug=True, auto_reload=False)
|
python
|
from time import sleep
from procs.makeTree import rel2abs
# ์ํ๋ถ
def loadSet(): # ์ ์ ๊ฐ ๊ตฌ์ฑํ ๋งคํฌ๋ก ๋ถ๋ฌ์ค๊ธฐ ์ฝ๋
try:
f = open("macro.brn", 'r')
except FileNotFoundError:
return
try:
lines = f.readlines()
for line in lines:
spite_line = line.split()
list_make = []
for i in range(len(spite_line)):
if i == 0:
keyv = spite_line[0]
elif spite_line[i] == 'ํค':
if spite_line[i+1] == '์
๋ ฅ':
list_make.append(['ํค ์
๋ ฅ',spite_line[i+2]])
elif spite_line[i] == '์๊ฐ':
if spite_line[i+1] == '์ง์ฐ':
list_make.append(['์๊ฐ ์ง์ฐ',spite_line[i+2]])
elif spite_line[i] == 'ํ๋ ํธ':
list_make.append(['ํ๋ ํธ',spite_line[i+1]])
elif spite_line[i] == '๋ช
๋ น':
list_make.append(['๋ช
๋ น',spite_line[i+1]])
customCommands[keyv] = list_make
except:
pass
f.close()
def saveSet(): # ์ ์ ๊ฐ ๊ตฌ์ฑํ ๋งคํฌ๋ก ์ ์ฅ ์ฝ๋
f = open("macro.brn", 'w')
for i in customCommands.keys():
f.write(i+" ")
for j in customCommands.get(i):
f.write(j[0]+" "+j[1]+" ")
f.write("\n")
f.close()
def stall(time): # ์๊ฐ ์ง์ฐ ์ํ
sleep(time)
def palette(COM_name): # ํ๋ ํธ ๋ช
๋ น ์ํ
temp=pyperclip.paste()
pyperclip.copy(COM_name)
if IDE == 0:
pag.hotkey('ctrl', 'p')
sleep(0.05)
pag.write('>')
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE == 1:
pag.hotkey('ctrl','alt','a')
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE == 2:
pag.hotkey('ctrl','3')
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE == 3:
pag.hotkey('ctrl','shift','a')
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
pyperclip.copy(temp)
def opn(sel4): # ํด๋์ค/ํจ์/ํ์ผ ์ด๊ธฐ, ์ธ์: main์ sel4
if len(sel4)==1: # ์ธ๋ฑ์ค 0์ ํ์ผ ์ด๋ฆ
openRoutine(sel4[0])
return
else: # ์ธ๋ฑ์ค 1์ ํ์ผ ์ด๋ฆ, ์ธ๋ฑ์ค 2์ ์์ ์์น
openRoutine(sel4[1])
lineRoutine(sel4[2][0])
def openRoutine(name):
if IDE==0:
pag.hotkey('ctrl','p')
pyperclip.copy(name)
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE==1:
pag.hotkey('ctrl','o')
pyperclip.copy(rel2abs[name])
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE==2:
palette('open file')
sleep(0.05)
pyperclip.copy(rel2abs[name])
pag.hotkey('ctrl','v')
pag.press('enter')
elif IDE==3:
pag.hotkey('ctrl','shift','n')
pyperclip.copy(name)
sleep(0.05)
pag.hotkey('ctrl','v')
pag.press('enter')
def lineRoutine(no):
if IDE==2:
pag.hotkey('ctrl','l')
else:
pag.hotkey('ctrl','g')
sleep(0.05)
pag.write(str(no))
pag.press('enter')
press_key = []
callStack=[]
IDE = -1
# keyboard ๋ชจ๋: ํค ๋๋ฅด๋ ๋งคํฌ๋ก
# pag ๋ชจ๋: ํ
์คํธ ์
๋ ฅ์ฉ
def keyIn(Inputkey): # ํค ์
๋ ฅ
pag.keyDown(Inputkey)
press_key.append(Inputkey)
def keyRel(): # ํค ๋ผ๊ธฐ
for k in press_key:
pag.keyUp(k)
press_key.clear()
def execute(name):
if name in builtInCommands:
com=builtInCommands[name][IDE]
elif name in customCommands:
com=customCommands[name]
else: # '๋ช
๋ น' ์ดํ ์๋ ๋ช
๋ น ๋ฑ์ฅ
return
if name not in callStack:
callStack.append(name)
else:
return
for comm in com:
if comm[0] == 'ํค ์
๋ ฅ':
keyIn(comm[1])
else:
keyRel()
if comm[0]=='์๊ฐ ์ง์ฐ':
stall(float(comm[1]))
elif comm[0]=='ํ๋ ํธ':
palette(comm[1])
elif comm[0]=='๋ช
๋ น':
execute(comm[1])
keyRel()
callStack.pop()
string_macro = ''
if name in builtInCommands:
string_macro="+".join([x[1] for x in builtInCommands[name][IDE]])
string_macro=' "'+string_macro+'"'
return string_macro
# ์ ๋ณ๋ถ
import os, sys
import pyautogui as pag
sys.path.append(os.path.abspath('..'))
import procs.phonetic as ph
import pyperclip
builtInCommands={
'์ฝ๋์๋ก์ด๋':((('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','up')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','up')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','up')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','up'))),
'์ฝ๋์๋๋ก์ด๋':((('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','down')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','down')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','down')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','down'))),
'์ค๋จ์ ':((('ํค ์
๋ ฅ','f9'),),(('ํค ์
๋ ฅ','f9'),),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','b')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','f8'),)),
'ํ์๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','b')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','l')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','p'),('ํค ์
๋ ฅ','q')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','l'))),
'์ ๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','[')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','M'),('ํค ์
๋ ฅ','M')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','+')),()),
'ํด๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ',']')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','M'),('ํค ์
๋ ฅ','M')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','+')),()),
'์ ํ์ฃผ์':((('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','a')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','k'),('ํค ์
๋ ฅ','c')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','/')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','/'))),
'์ฃผ์':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','/')),(()),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','/')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','/'))),
'์๋์์ฑ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','space')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','space')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','space')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','space'))),
'์ด์ ์์น':((('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','left')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','-')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','left')),(())),
'์ค๋ฅ์์น':((('ํค ์
๋ ฅ','f8')),(()),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','.')),(())),
'๋ค์์ด๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','t')),(()),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','left')),(())),
'๋ชจ๋ ์ฐธ์กฐ':((('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','f12')),(('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','f12')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','h')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','f7'))),
'๋ฆฌํฉํฐ๋ง':((('ํค ์
๋ ฅ', 'ctrl'), ('ํค ์
๋ ฅ', 'shift'),('ํค ์
๋ ฅ','r')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','enter')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','t')),(('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','f6'))),
'๋ชจ๋์ ๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','k'),('ํค ์
๋ ฅ','0')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','m'),('ํค ์
๋ ฅ','o')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','/')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','-'))),
'๋ชจ๋ํด๊ธฐ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','k'),('ํค ์
๋ ฅ','j')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','m'),('ํค ์
๋ ฅ','l')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','*')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','+'))),
'์๋์ ๋ ฌ':((()),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','k'),('ํค ์
๋ ฅ','f')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','f')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','i'))),
'ํ์ผ์ด๊ธฐ':((('ํค ์
๋ ฅ', 'ctrl'), ('ํค ์
๋ ฅ', 'p')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','t')),(()),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','n'))),
'์ํ์ผ':((('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','n')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','n')),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','n')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','insert'))),
'์ด๋ฆ๋ณ๊ฒฝ':((('ํค ์
๋ ฅ','f2'),),(('ํค ์
๋ ฅ','ctrl'),('ํค ์
๋ ฅ','r')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','r')),(('ํค ์
๋ ฅ','shift'),('ํค ์
๋ ฅ','f6'))),
'์ ์๋ก์ด๋':((('ํค ์
๋ ฅ','f12'),),(('ํค ์
๋ ฅ','f12'),),(('ํค ์
๋ ฅ','f3'),),(())),
'์ ์๋ณด๊ธฐ':((('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','f12')),(('ํค ์
๋ ฅ','alt'),('ํค ์
๋ ฅ','f12')),(('ํค ์
๋ ฅ','f3')),(())),
}
customCommands=dict()
def matchK(inp):
inp=normalize(inp)
key=list(builtInCommands)
key2=list(customCommands)
key3=['๋ช
๋ น', '๋ณด๊ธฐ', 'ํ์']
key.extend(key2)
key.extend(key3)
key=[normalize(x) for x in key]
key.sort(key=lambda x: len(x))
key.sort(key=lambda x: len(x)!=len(inp))
ret=ph.arrange_k(inp, key)
return ret
def normalize(inp): # ๊ณต๋ฐฑ๋ง ์ ๊ฑฐ
return ''.join(inp.split())
def ideUP(name):
global IDE
try:
IDE=('๋น์ฃผ์ผ ์คํ๋์ค ์ฝ๋','๋น์ฃผ์ผ ์คํ๋์ค','์ดํด๋ฆฝ์ค','PyCharm').index(name)
except ValueError:
pass
|
python
|
from __future__ import print_function
import math, json, os, pickle, sys
import keras
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model, Sequential
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import ImageDataGenerator
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# DATADIR = "/storage/plzen1/home/radekj/vmmr"
DATADIR = "/Users/radekj/devroot/vmmr"
name = "dir8r"
log_path = os.path.join(DATADIR, 'results')
log_file = "{}/{}/{}_log.csv".format(log_path, name, name)
csv_logger = CSVLogger(log_file, append=True)
my_log_path = os.path.join(DATADIR, 'results')
my_log_file = "{}/{}/{}_log.txt".format(my_log_path, name, name)
SIZE = (224, 224)
BATCH_SIZE = 32
EPOCH = 30
input_shape = (224, 224, 3)
def get_model():
model = keras.applications.resnet50.ResNet50(include_top=False,
weights='imagenet',
pooling='max',
input_shape=input_shape)
return model
def save_history(history):
hist_path = os.path.join(DATADIR, 'results')
hist_file = "{}/{}/{}_log.json".format(hist_path, name, name)
with open(hist_file, 'wb') as file_pi:
pickle.dump(history.history, file_pi)
def write_to_log(epoch, logs):
if not os.path.isfile(my_log_file):
with open(my_log_file, mode='a+') as f:
f.write("epoch, loss, acc\n")
with open(my_log_file, mode='a') as f:
# epoch, loss, acc
f.write("{}, {}, {},\n".format(epoch, logs['loss'], logs['acc']))
def train_vgg(folder):
DATA_DIR = folder
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
VALID_DIR = os.path.join(DATA_DIR, 'valid')
TEST_DIR = os.path.join(DATA_DIR, 'test')
save_aug = os.path.join(DATA_DIR, 'tmp')
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])
num_train_steps = math.floor(num_train_samples / BATCH_SIZE)
num_valid_steps = math.floor(num_valid_samples / BATCH_SIZE)
shift = 0.05
train_gen = ImageDataGenerator(
width_shift_range=shift,
height_shift_range=shift,
horizontal_flip=False,
vertical_flip=False,
rotation_range=4,
zoom_range=0.1)
batches = train_gen.flow_from_directory(
directory=TRAIN_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True)
val_gen = ImageDataGenerator()
val_batches = val_gen.flow_from_directory(
directory=VALID_DIR,
target_size=SIZE,
color_mode="rgb",
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True)
model = get_model()
model.layers.pop()
classes = list(iter(batches.class_indices))
for layer in model.layers:
layer.trainable = False
#model.layers.
#model.add(Flatten())
# add last layer
x = model.layers[-1].output # (None, 7, 7, 2048)
x = Flatten()(x)
x = Dense(len(classes), activation='softmax', name='fc_last')(x)
finetuned_model = Model(model.input, x)
finetuned_model.summary()
# opt = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
# opt = Adam(lr=0.001)
# opt = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
finetuned_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
# finetuned_model.summary()
early_stopping = EarlyStopping(patience=10)
my_log_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: write_to_log(epoch, logs), )
check_pointer = ModelCheckpoint("{}_best.h5".format(name), verbose=1, save_best_only=True)
history = finetuned_model.fit_generator(
batches,
steps_per_epoch=num_train_steps,
epochs=EPOCH,
callbacks=[early_stopping, check_pointer, my_log_callback],
validation_data=val_batches,
validation_steps=num_valid_steps)
save_history(history)
model.save("{}_final.h5".format(name))
if __name__ == '__main__':
"""
dataset_path: /Users/radekj/devroot/vmmr/datasets/sample5
/storage/plzen1/home/radekj/vmmr"
"""
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python train_vgg16.py dataset_path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
print("===== folder: {}".format(folder))
train_vgg(folder)
print("===== end.")
|
python
|
#This is an exercise program which shows example how while loop can be utilized.
password = ''
while password != 'python123':
password = input("Enter password:")
if password == 'python123':
print("You are logged in!")
else:
print("Try again.")
|
python
|
import argparse
import random
from typing import Any, Dict, List, Sequence, Tuple
import segmentation_models_pytorch as smp
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.transforms.functional import (InterpolationMode, hflip,
resized_crop, rotate)
from torchvision.transforms.transforms import RandomResizedCrop
from torchvision.utils import save_image
from solo.losses.simclr import simclr_loss_func
from solo.methods.base import BaseMomentumMethod
from solo.utils.momentum import initialize_momentum_params
class RandomResizedCropWithMask(RandomResizedCrop):
"""
Perform the same random resized crop, horizontal flip
and 90 degree rotation on an image and mask
"""
def __init__(
self,
size,
scale=(0.2, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation=InterpolationMode.BILINEAR,
interpolation_mask=InterpolationMode.NEAREST,
):
super().__init__(size, scale, ratio, interpolation)
self.interpolation_mask = interpolation_mask
def forward(self, img, mask):
i, j, h, w = self.get_params(img, self.scale, self.ratio) # type: ignore
# Resize
img = resized_crop(img, i, j, h, w, self.size, self.interpolation) # type: ignore
mask = resized_crop(mask, i, j, h, w, self.size, self.interpolation_mask) # type: ignore
# Horizontal flip
if random.random() > 0.5:
img = hflip(img)
mask = hflip(mask)
# 90 degree rotation
if random.random() > 0.5:
angle = random.choice([90, 180, 270])
img = rotate(img, angle)
mask = rotate(mask, angle)
return img, mask
class SimCLRPointReg(BaseMomentumMethod):
def __init__(
self,
proj_output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
temperature: float,
segmentation_arch: str,
segmentation_encoder: str,
segmentation_weights: str,
n_classes: int,
n_points: int,
feats_res: int,
alpha: float,
**kwargs,
):
"""Implements BYOL (https://arxiv.org/abs/2006.07733).
Args:
proj_output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.
"""
kwargs["num_classes"] = 1
super().__init__(**kwargs)
self.n_points = n_points
self.feats_res = feats_res
self.n_classes = n_classes
self.temperature = temperature
self.alpha = alpha
# projector
self.projector = nn.Sequential(
nn.Conv2d(self.features_dim, proj_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(proj_hidden_dim),
nn.ReLU(),
nn.Conv2d(proj_hidden_dim, proj_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(proj_hidden_dim),
nn.ReLU(),
nn.Conv2d(proj_hidden_dim, proj_output_dim, kernel_size=1),
nn.BatchNorm2d(proj_output_dim, affine=False),
)
# momentum projector
self.momentum_projector = nn.Sequential(
nn.Conv2d(self.features_dim, proj_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(proj_hidden_dim),
nn.ReLU(),
nn.Conv2d(proj_hidden_dim, proj_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(proj_hidden_dim),
nn.ReLU(),
nn.Conv2d(proj_hidden_dim, proj_output_dim, kernel_size=1),
nn.BatchNorm2d(proj_output_dim, affine=False),
)
initialize_momentum_params(self.projector, self.momentum_projector)
# predictor
self.predictor = nn.Sequential(
nn.Conv2d(proj_output_dim, pred_hidden_dim, kernel_size=1, bias=False),
nn.BatchNorm2d(pred_hidden_dim),
nn.ReLU(),
nn.Conv2d(pred_hidden_dim, proj_output_dim, kernel_size=1),
nn.BatchNorm2d(proj_output_dim, affine=False),
)
# Segmentation network
self.segmentation_net = smp.create_model(
segmentation_arch,
encoder_name=segmentation_encoder,
in_channels=3,
classes=n_classes - 1 if n_classes == 2 else n_classes,
encoder_weights=None,
)
state_dict = torch.load(segmentation_weights)
self.segmentation_net.load_state_dict(state_dict, strict=True)
print(f"Loaded segmentation network weights from {segmentation_weights}")
self.crop = RandomResizedCropWithMask(size=kwargs["crop_size"])
@staticmethod
def add_model_specific_args(
parent_parser: argparse.ArgumentParser,
) -> argparse.ArgumentParser:
parent_parser = super(SimCLRPointReg, SimCLRPointReg).add_model_specific_args(
parent_parser
)
parser = parent_parser.add_argument_group("byol")
# Projector
parser.add_argument("--proj_output_dim", type=int, default=256)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# Predictor
parser.add_argument("--pred_hidden_dim", type=int, default=512)
# Parameter
parser.add_argument("--temperature", type=float, default=0.1)
# Segmentation network
parser.add_argument("--segmentation_arch", type=str, default="unet")
parser.add_argument("--segmentation_encoder", type=str, default="resnet18")
parser.add_argument("--n_classes", type=int, default=2)
parser.add_argument("--segmentation_weights", type=str, required=True)
# Method params
parser.add_argument("--n_points", type=int, default=8)
parser.add_argument("--feats_res", type=int, default=7)
parser.add_argument("--alpha", type=float, default=1.0)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and predictor parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters()},
]
return super().learnable_params + extra_learnable_params
@property
def momentum_pairs(self) -> List[Tuple[Any, Any]]:
"""Adds (projector, momentum_projector) to the parent's momentum pairs.
Returns:
List[Tuple[Any, Any]]: list of momentum pairs.
"""
extra_momentum_pairs = [(self.projector, self.momentum_projector)]
return super().momentum_pairs + extra_momentum_pairs
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs forward pass of the online backbone, projector and predictor.
Args:
X (torch.Tensor): batch of images in tensor format.
Returns:
Dict[str, Any]: a dict containing the outputs of the parent and the projected features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
@staticmethod
def sample_index_pairs(
mask1, mask2, n_points
) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
"""
Sample n_points indices from each mask
"""
# Only sample a class mask if its non-empty across the entire batch
bs = mask1.shape[1]
classes = [[] for _ in range(bs)]
for n in range(bs):
for i, (m1, m2) in enumerate(zip(mask1[:, n], mask2[:, n])):
if not (torch.all(m1 == 0) or torch.all(m2 == 0)):
classes[n].append(i)
# Sample points
indices1, indices2, selected = [], [], []
bs_range = torch.arange(bs)
for _ in range(n_points):
i = [random.choice(c) for c in classes] # Sample a class for each element
indices1.append(torch.multinomial(mask1[i, bs_range], 1).permute(1, 0))
indices2.append(torch.multinomial(mask2[i, bs_range], 1).permute(1, 0))
selected.extend(i)
indices1 = torch.cat(indices1)
indices2 = torch.cat(indices2)
return indices1, indices2, selected
def _shared_step(
self, feats: List[torch.Tensor], momentum_feats: List[torch.Tensor]
) -> torch.Tensor:
Z = [self.projector(f) for f in feats]
P = [self.predictor(z) for z in Z]
# forward momentum backbone
with torch.no_grad():
Z_momentum = [self.momentum_projector(f) for f in momentum_feats]
# ------- negative consine similarity loss -------
neg_cos_sim = 0
for v1 in range(self.num_large_crops):
for v2 in np.delete(range(self.num_crops), v1):
neg_cos_sim += byol_loss_func(P[v2], Z_momentum[v1])
# calculate std of features
with torch.no_grad():
z_std = (
F.normalize(torch.stack(Z[: self.num_large_crops]), dim=-1)
.std(dim=1)
.mean()
)
return neg_cos_sim, z_std
def training_step(self, batch: List[Any], batch_idx: int) -> torch.Tensor:
"""Training step for BYOL reusing BaseMethod training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size num_crops containing batches of images.
batch_idx (int): index of the batch.
Returns:
torch.Tensor: total loss composed of BYOL and classification loss.
"""
# Generate mask
with torch.no_grad():
mask = self.segmentation_net(batch[1][2])
if self.n_classes == 2:
mask = F.logsigmoid(mask).exp() # Convert to [0,1]
mask = torch.where(mask > 0.5, 1, 0) # Threshold
else:
idxs = mask.argmax(dim=1)
mask = F.one_hot(idxs, num_classes=self.n_classes).permute(0, 3, 1, 2)
# Crop views and masks
batch[1][0], mask1 = self.crop(batch[1][0], mask)
batch[1][1], mask2 = self.crop(batch[1][1], mask)
# Pass through encoder
out = super().training_step(batch, batch_idx, dense=True)
[_, _, feats1], [_, _, feats2], _ = out["dense_feats"]
[_, _, momentum_feats1], [_, _, momentum_feats2], _ = out[
"momentum_dense_feats"
]
p1 = self.predictor(self.projector(feats1))
p2 = self.predictor(self.projector(feats2))
with torch.no_grad():
z1 = self.momentum_projector(momentum_feats1)
z2 = self.momentum_projector(momentum_feats2)
# Resize features
z1 = F.interpolate(
z1, self.feats_res, mode="bilinear", align_corners=True
).flatten(2)
z2 = F.interpolate(
z2, self.feats_res, mode="bilinear", align_corners=True
).flatten(2)
p1 = F.interpolate(
p1, self.feats_res, mode="bilinear", align_corners=True
).flatten(2)
p2 = F.interpolate(
p2, self.feats_res, mode="bilinear", align_corners=True
).flatten(2)
# Resize masks
mask1 = (
F.adaptive_avg_pool2d(mask1.float(), self.feats_res)
.flatten(2)
.permute(1, 0, 2)
)
mask2 = (
F.adaptive_avg_pool2d(mask2.float(), self.feats_res)
.flatten(2)
.permute(1, 0, 2)
)
# Create background mask for binary segmentation
if self.n_classes == 2:
mask1 = torch.cat([mask1, 1 - mask1], dim=0)
mask2 = torch.cat([mask2, 1 - mask2], dim=0)
# --- Loss --- #
# Sample point pairs
idxs1, idxs2, classes = self.sample_index_pairs(mask1, mask2, self.n_points)
# Get corresponding features for the points
b = z1.shape[0]
range = torch.arange(b)
p1_pts, z1_pts, p2_pts, z2_pts, all_classes = [], [], [], [], []
for idx1, idx2 in zip(idxs1, idxs2):
p1_pts.append(p1[range, :, idx1[:]])
z1_pts.append(z1[range, :, idx1[:]])
p2_pts.append(p2[range, :, idx2[:]])
z2_pts.append(z2[range, :, idx2[:]])
g1 = torch.concat(p1_pts + z2_pts)
g2 = torch.concat(p2_pts + z1_pts)
classes = torch.tensor(classes, device=z1.device, dtype=torch.int64).repeat(2)
nce_loss_pt = (
simclr_loss_func(g1, indexes=classes, temperature=self.temperature)
+ simclr_loss_func(g2, indexes=classes, temperature=self.temperature) / 2
)
self.log("train_nce_loss", nce_loss_pt, on_epoch=True, sync_dist=True)
if self.alpha == 1.0:
return nce_loss_pt
# --- Image Loss --- #
z1_pooled = F.adaptive_avg_pool1d(z1, 1).squeeze(-1)
z2_pooled = F.adaptive_avg_pool1d(z2, 1).squeeze(-1)
p1_pooled = F.adaptive_avg_pool1d(p1, 1).squeeze(-1)
p2_pooled = F.adaptive_avg_pool1d(p2, 1).squeeze(-1)
g1_img = torch.cat([p1_pooled, z2_pooled])
g2_img = torch.cat([p2_pooled, z1_pooled])
indexes = batch[0].repeat(2)
nce_loss_img = (
simclr_loss_func(g1_img, indexes=indexes, temperature=self.temperature)
+ simclr_loss_func(g2_img, indexes=indexes, temperature=self.temperature)
) / 2
self.log("train_nce_loss_img", nce_loss_img, on_epoch=True, sync_dist=True)
return nce_loss_pt * self.alpha + nce_loss_img * (1 - self.alpha)
|
python
|
import sys
from cx_Freeze import setup, Executable
build_exe_options = {"packages": ["os"]}
base="Win32GUI"
setup( name="NPSerialOscilloscopeGUI",
version="1.0",
description="potato",
options = {"build_exe": build_exe_options},
executables = [Executable("oscilloscope_gui.py", base=base)])
|
python
|
"""
Problem Statement:
Implement a function,ย `find_first_unique(lst)`ย that returns
the first unique integer in the list.
Input:
- A list of integers
Output:
- The first unique element in the list
Sample Input:
[9,2,3,2,6,6]
Sample Output:
9
"""
def find_first_unique(lst):
is_unique = {}
for i, v in enumerate(lst):
if v in is_unique:
is_unique[v] = False
else:
is_unique[v] = True
for i, v in enumerate(lst):
if is_unique[v]:
return v
if __name__ == '__main__':
print(find_first_unique([9, 2, 3, 2, 6, 6]))
|
python
|
#!/usr/bin/env python
import itertools
with open('input') as f:
serial_number = int(f.read())
# Puzzle 1
def get_power_level(x, y, serial_number):
rack_id = x + 10
power_level = ((rack_id * y) + serial_number) * rack_id
power_level = (power_level // 100 % 10) - 5
return power_level
def get_total_power(grid, x, y, x_max, y_max):
if not (0 < x <= x_max - 2 and 0 < y <= y_max - 2):
return float('-inf')
coords_3x3_grid = itertools.product(range(x, x+3), range(y, y+3))
return sum(grid[y-1][x-1] for x, y in coords_3x3_grid)
assert get_power_level(3, 5, 8) == 4
assert get_power_level(122, 79, 57) == -5
assert get_power_level(217, 196, 39) == 0
assert get_power_level(101, 153, 71) == 4
# 300x300 grid starting at (1,1)
x_max = 300
y_max = 300
grid = [[get_power_level(x, y, serial_number) for x in range(1, x_max+1)] for y in range(1, y_max+1)]
all_coords = itertools.product(range(1, y_max+1), range(1, x_max+1))
_, x, y = max((get_total_power(grid, x, y, x_max, y_max), x, y) for x, y in all_coords)
print(x, y)
# Puzzle 2
|
python
|
import os
import nose
import ckanext.dcatapit.interfaces as interfaces
from ckanext.dcatapit.commands.dcatapit import DCATAPITCommands
eq_ = nose.tools.eq_
ok_ = nose.tools.ok_
class BaseOptions(object):
def __init__(self, options):
self.url = options.get("url", None)
self.name = options.get("name", None)
self.filename = options.get("filename", None)
class BaseCommandTest(object):
def _get_file_contents(self, file_name):
path = os.path.join(os.path.dirname(__file__),
'..', '..', '..', 'vocabularies',
file_name)
return path
class TestDCATAPITCommand(BaseCommandTest):
def test_vocabulary_command(self):
dcatapit_commands = DCATAPITCommands('eu_themes')
vocab_file_path = self._get_file_contents('data-theme-skos.rdf')
options = BaseOptions({
'url': vocab_file_path,
'name': 'eu_themes'
})
setattr(dcatapit_commands, 'options', options)
dcatapit_commands.initdb()
dcatapit_commands.load()
tag_localized = interfaces.get_localized_tag_name('ECON')
ok_(tag_localized)
|
python
|
from .Solver import Solver
|
python
|
#!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Downsampling helpers for drivers that do not implement it server-side."""
from __future__ import absolute_import
from __future__ import print_function
import collections
import logging
import time
log = logging.getLogger(__name__)
class DelayedWriter(object):
"""Delay writes."""
DEFAULT_PERIOD_MS = 600000
def __init__(self, accessor, period_ms=DEFAULT_PERIOD_MS):
"""Create a DelayedWriter.
The delayed writer will separate high resolution points and low
resolution points and will write the low resolution ones every
`period_ms` milliseconds.
For these points the value for a given timestamp is frequently
updated and we can safely delay the writes. In case of an unclean
shutdown we might loose up to `period_ms` points of data.
Args:
accessor: a connected accessor.
period_ms: delay before writing low resolution points.
"""
self.accessor = accessor
self.period_ms = period_ms
self._queue = []
self._metrics_per_ms = 0
self._last_write_ms = 0
self._points = collections.defaultdict(dict)
def clear(self):
"""Reset internal structures."""
self._queue = []
self._points.clear()
def feed(self, metric, datapoints):
"""Feed the delayed writer.
This function will seperate datapoints based on their
resolutions and keep the low resolution points for later.
Args:
metric: the metric associated with these points.
datapoints: downsampled datapoints.
Returns:
list(datapoints) list of high resolution points that
should get written now.
"""
high_res, low_res = [], []
for datapoint in datapoints:
_, _, _, stage = datapoint
# In case of unclean shutdown we could loose up to
# 25% of the data. We also allow a lag of up to 1/4th of
# a period. stage0 are never delayed.
if stage.stage0 or stage.precision_ms < (self.period_ms * 4):
high_res.append(datapoint)
else:
low_res.append(datapoint)
self.write_later(metric, low_res)
# We piggy back on feed() to write delayed points, this works
# as long as we receive points regularly. We might want to add
# a timer at some point.
self.write_some()
return high_res
def flush(self):
"""Flush all buffered points."""
self._build_queue()
while self._queue:
self.write_some(flush=True)
def size(self):
"""Number of queued metrics."""
return len(self._points)
def write_later(self, metric, datapoints):
"""Queue points for later."""
for datapoint in datapoints:
timestamp, value, count, stage = datapoint
self._points[metric][(stage, timestamp)] = (value, count)
self._build_queue()
def _build_queue(self):
"""Build the queue of metrics to write."""
if len(self._queue) > 0:
return
# Order by number of points.
self._queue = sorted(self._points.keys(), key=lambda k: len(self._points[k]))
# We know that we have up to `period_ms` to write everything
# so let's write only a few metrics per iteration.
self._metrics_per_ms = float(len(self._queue)) / self.period_ms
log.debug(
"rebuilt the queues: %d metrics, %d per second",
len(self._queue),
self._metrics_per_ms,
)
def write_some(self, flush=False, now=time.time):
"""Write some points from the queue."""
now = now() * 1000 # convert to ms.
if self._last_write_ms == 0:
self._last_write_ms = now
delta_ms = (now - self._last_write_ms) + 1
if flush:
metrics_to_write = len(self._queue)
else:
metrics_to_write = round(delta_ms * self._metrics_per_ms)
if metrics_to_write == 0:
return
i = 0
log.debug("writing low res points for %d metrics" % metrics_to_write)
while self._queue and i < metrics_to_write:
metric = self._queue.pop()
datapoints = []
# collect the points to write them.
for k, v in self._points[metric].items():
stage, timestamp = k
value, count = v
i += 1
datapoints.append((timestamp, value, count, stage))
self.accessor.insert_downsampled_points_async(metric, datapoints)
# remove the points that have been written
del self._points[metric]
self._last_write_ms = now
|
python
|
"""Location cards."""
import logging
from onirim.card._base import ColorCard
from onirim import exception
from onirim import util
LOGGER = logging.getLogger(__name__)
class LocationKind(util.AutoNumberEnum):
"""
Enumerated kinds of locations.
Attributes:
sun
moon
key
"""
sun = ()
moon = ()
key = ()
def _can_obtain_door(content):
"""
Check if the explored cards can obtain a door.
"""
last_card = content.explored[-1]
same_count = 0
for card in reversed(content.explored):
if last_card.color == card.color:
same_count += 1
else:
break
return same_count % 3 == 0
class _Location(ColorCard):
"""Location card without special effect."""
def __init__(self, color, kind=None):
super().__init__(color)
if kind is not None:
self._kind = kind
def _class_name(self):
return "{} location".format(self._kind.name)
def _do_drawn(self, core):
core.content.hand.append(self)
def _do_play(self, core):
observer = core.observer
content = core.content
if content.explored and content.explored[-1].kind == self.kind:
raise exception.ConsecutiveSameKind
content.explored.append(self)
content.hand.remove(self)
if _can_obtain_door(content):
observer.on_door_obtained_by_explore(core.content)
color = content.explored[-1].color
card = content.piles.pull_door(color)
if card is not None:
content.opened.append(card)
if len(content.opened) == 8:
raise exception.Win
def _on_discard(self, core):
"""
Do additional operations after discard a card from hand to discarded
pile.
"""
pass
def _do_discard(self, core):
content = core.content
content.hand.remove(self)
content.piles.put_discard(self)
self._on_discard(core)
def sun(color):
"""
Make a sun location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A sun location card.
"""
return _Location(color, LocationKind.sun)
def moon(color):
"""
Make a moon location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A moon location card.
"""
return _Location(color, LocationKind.moon)
class _KeyLocation(_Location):
"""
Key location card implementation.
"""
_kind = LocationKind.key
def _on_discard(self, core):
actor = core.actor
content = core.content
drawn = content.piles.draw(5)
discarded_idx, back_idxes = actor.key_discard_react(core.content, drawn)
LOGGER.info(
"Agent choose key discard react %s, %s",
discarded_idx,
back_idxes)
# TODO check returned value
content.piles.put_discard(drawn[discarded_idx])
content.piles.put_undrawn_iter(drawn[idx] for idx in back_idxes)
def key(color):
"""
Make a key location card with specific color.
Args:
color (Color): The specific color.
Returns:
Card: A key location card.
"""
return _KeyLocation(color)
|
python
|
import logging
import argparse
import transaction
from pyramid.paster import bootstrap
from .actions import proceed_contest
from .db import Config
def process_queue():
parser = argparse.ArgumentParser()
parser.add_argument("config", help="config file")
args = parser.parse_args()
logging.basicConfig()
env = bootstrap(args.config)
request = env["request"]
configs = Config.get_ended_configs(request.db)
for config in configs:
proceed_contest(request, config.channel)
transaction.commit()
|
python
|
#!/usr/bin/env false
"""Generate script to activate project."""
# Internal packages (absolute references, distributed with Python)
# External packages (absolute references, NOT distributed with Python)
# Library modules (absolute references, NOT packaged, in project)
# from src_gen.script.bash.briteonyx.source import generate as gen
# from src_gen.script.bash.briteonyx.structure import *
from src_gen.script.bash.source import generate as gen
from src_gen.script.bash.structure import *
# Project modules (relative references, NOT packaged, in project)
def _abort_if_activated():
return [
if_(
string_is_not_null(dq(vr("BO_Project"))),
indent(),
"1>&2 ",
echo(
dq(
"Aborting, this project is already activated as ",
sq(vr("BO_Project")),
)
),
eol(),
indent(),
abort_script(),
),
fi(),
]
def _abort_if_missing_pwd():
return [
if_(
string_is_null(dq(vr("PWD"))),
indent(),
"1>&2 ",
echo(
dq(
"Aborting, missing environment variable ",
sq(vn("PWD")),
)
),
eol(),
indent(),
abort_script(),
),
fi(),
]
def _activate_python_virtual_environment(pve_activate_script, script, status):
return [
comment("Activate Python virtual environment (PVE)"),
_capture_environment("PVE-prior"),
source_or_abort(pve_activate_script, script, status),
_capture_environment("PVE-after"),
]
def _capture_environment(file_name):
return [
"(",
set_("-o", "posix"),
seq(),
set_(),
")",
pipe(),
command("sort", ">", x(vr("PWD"), "/BO-", file_name, ".env")),
eol(),
]
def _comments():
return [
# TODO: Redesign to automatically wrap comment paragraphs at a set line length
comment(
"Activate the BriteOnyx framework to manage this project directory tree"
),
comment(),
note("We MUST NOT EVER ", cc(exit()), " during BriteOnyx activation!"),
comment(),
comment("We cannot use a `trap` here"),
comment("because it will remain active"),
comment("within the shell"),
comment("that will `source` this script."),
comment(),
comment("Please see HowTo-use_this_project.md for details."),
rule(),
]
def _create_random_tmpdir():
local = "_result"
# TODO: Consider capturing this special variable
tmpdir = "TMPDIR"
user = "USER"
return [
comment("Create random temporary directory"),
# TODO: Consider creating method for 'mktemp'
if_(
string_equals(dq(vr("BO_OS")), "macOS"),
indent(),
assign(
vn(local),
substitute("mktemp", "-d", "-t", dq("BO-", vr(user))),
),
eol(),
),
else_(
indent(),
assign(
vn(local),
substitute(
"mktemp", "-d", "-t", dq("BO-", vr(user), "-XXXXXXX")
),
),
eol(),
),
fi(),
if_(
directory_exists(dq(vr(local))),
indent(),
assign(vn(tmpdir), vr(local)),
eol(),
indent(),
log_info("Created temporary directory ", sq(vr(tmpdir))),
eol(),
),
fi(),
if_(
directory_exists(dq(vr(tmpdir))),
indent(),
remembering(tmpdir),
eol(),
indent(),
export(vn(tmpdir)),
eol(),
),
else_(
indent(),
log_error(
"Aborting, failed to establish temporary directory ",
sq(vr(tmpdir)),
),
eol(),
indent(),
abort_script(),
),
fi(),
]
def _declare_remembering():
return [
exported_function(
"remembering",
indent(),
comment("Log that we are remembering variable $1"),
indent(),
integer_equal("$#", 0),
and_(),
eol(),
indent(2),
log_error("Variable name is required"),
and_(),
eol(),
indent(2),
abort_script(),
indent(),
command("local", "-r", "Name=$1"),
eol(),
indent(),
log_debug("Remembering ", vr("Name"), " = '${!Name}'"),
eol(),
),
]
def _detect_operating_system():
# TODO: Make appropriate constants
local = "_result"
return [
comment("Detect operating system"),
todo("Write as function"),
todo("Add detection of various Linux, when we care"),
assign(vn(local), substitute("uname")),
eol(),
if_(
string_equals(dq(vr(local)), "Darwin"),
indent(),
export(vn("BO_OS"), "macOS"),
eol(),
),
else_(
indent(),
export(vn("BO_OS"), "UNKNOWN"),
eol(),
),
fi(),
remembering("BO_OS"),
eol(),
]
def _remember_paths():
project_path = x(
vr("BO_Project"), "/BriteOnyx/bin", ":", vr("BO_Project"), "/bin"
)
return [
note("We can now use BriteOnyx Bash functionality."),
line(),
comment("BriteOnyx scripts"),
comment("must precede"),
comment("project-specific scripts"),
comment("on the PATH"),
comment("so that collisions fail fast."),
comment("Any collision should be resolved"),
comment("by renaming"),
comment("the project-specific script"),
comment("to avoid that collision."),
line(),
export(vn("BO_PathProject"), project_path),
eol(),
line(),
export_if_null("BO_PathSystem", vr("PATH")),
eol(),
export_if_null("BO_PathUser", x(vr("HOME"), "/bin")),
eol(),
line(),
remembering("BO_PathProject"),
eol(),
remembering("BO_PathSystem"),
eol(),
remembering("BO_PathUser"),
eol(),
]
def _remember_project_root():
return [
export(vn("BO_Project"), vr("PWD")),
eol(),
remembering("BO_Project"),
eol(),
]
def build():
script = "_Script"
status = "_Status"
alias_sample = x(vr("BO_Project"), "/cfg/sample/alias.bash")
briteonyx_alias_script = x(
vr("BO_Project"), "/BriteOnyx/bin/lib/alias.bash"
)
briteonyx_declare_script = x(
vr("BO_Project"), "/BriteOnyx/bin/lib/declare.bash"
)
configure_python_script = x(
vr("BO_Project"), "/BriteOnyx/bin/lib/configure-Python.bash"
)
context_sample = x(vr("BO_Project"), "/cfg/sample/context.bash")
log4bash_script = x(vr("PWD"), "/BriteOnyx/bin/lib/declare-log4bash.bash")
log_directory = x(vr("BO_Project"), "/log")
project_alias_script = x(vr("BO_Project"), "/alias.bash")
project_context_script = x(vr("BO_Project"), "/context.bash")
project_declare_script = x(vr("BO_Project"), "/bin/lib/declare.bash")
pve_activate_script = x(
vr("BO_Project"), "/BriteOnyx/bin/lib/pve-activate.bash"
)
set_path_script = x(vr("BO_Project"), "/BriteOnyx/bin/lib/set_path.bash")
return [
header_activation(),
_comments(),
_abort_if_activated(),
line(),
_abort_if_missing_pwd(),
line(),
_capture_environment("incoming"),
line(),
source_or_abort(log4bash_script, script, status),
line(),
log_info("Activating ", sq(vr("PWD")), " as the current project"),
eol(),
line(),
_declare_remembering(),
line(),
_remember_project_root(),
remembering("INTERACTIVE_MODE"),
eol(),
line(),
source_or_abort(briteonyx_declare_script, script, status),
line(),
_remember_paths(),
line(),
source_or_abort(set_path_script, script, status),
line(),
_detect_operating_system(),
line(),
_create_random_tmpdir(),
line(),
command("maybe_create_directory_tree", log_directory),
eol(),
line(),
_activate_python_virtual_environment(
pve_activate_script, script, status
),
line(),
maybe_copy_file(alias_sample, project_alias_script),
eol(),
maybe_copy_file(context_sample, project_context_script),
eol(),
line(),
maybe_source_or_abort(project_declare_script, script, status),
line(),
source_or_abort(project_context_script, script, status),
line(),
source_or_abort(briteonyx_alias_script, script, status),
line(),
source_or_abort(project_alias_script, script, status),
line(),
_capture_environment("outgoing"),
log_good("BriteOnyx has successfully activated this project"),
eol(),
log_info("To get started, try executing the 'cycle' alias..."),
eol(),
line(),
disabled_content_footer(),
]
def generate(directory):
gen(build(), directory, "activate.bash")
"""DisabledContent
source(configure_python_script), eol(),
source(briteonyx_alias_script), eol(),
line(),
"""
|
python
|
from HTML import Curve_Write_HTML
from Data import Curve_Write_Data
from SVG import Curve_Write_SVG
class Curve_Write(
Curve_Write_HTML,
Curve_Write_Data,
Curve_Write_SVG
):
##!
##!
##!
def sifsdifdsgf(self):
return 0
|
python
|
"""
Python script containing methods for building machine learning model
"""
import utils.text_processing as tp
def classifier(X, y, tokenizer, config):
word_ind_dict = tokenizer.word_index
glove_path = config.get("glove_path")
vocab_size = config.get("vocab_size")
seq_len = config.get("seq_len")
embed_dim = config.get("embed_dim")
num_words = min(vocab_size, len(word_ind_dict) + 1)
embed_matrix = tp.get_embedding_matrix(
glove_path, word_ind_dict, num_words, embed_dim, vocab_size
)
embed_layer = tp.get_embedding_layer(num_words, embed_dim, embed_matrix, seq_len)
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/diff-img.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='proto/diff-img.proto',
package='Diff',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x14proto/diff-img.proto\x12\x04\x44iff\",\n\x0b\x44iffRequest\x12\r\n\x05\x66irst\x18\x01 \x01(\t\x12\x0e\n\x06second\x18\x02 \x01(\t\")\n\x0c\x44iffResponse\x12\x0b\n\x03res\x18\x01 \x01(\t\x12\x0c\n\x04ssim\x18\x02 \x01(\x02\x32=\n\x07\x44iffImg\x12\x32\n\x07getDiff\x12\x11.Diff.DiffRequest\x1a\x12.Diff.DiffResponse\"\x00\x62\x06proto3'
)
_DIFFREQUEST = _descriptor.Descriptor(
name='DiffRequest',
full_name='Diff.DiffRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='first', full_name='Diff.DiffRequest.first', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='second', full_name='Diff.DiffRequest.second', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=74,
)
_DIFFRESPONSE = _descriptor.Descriptor(
name='DiffResponse',
full_name='Diff.DiffResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='res', full_name='Diff.DiffResponse.res', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ssim', full_name='Diff.DiffResponse.ssim', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=117,
)
DESCRIPTOR.message_types_by_name['DiffRequest'] = _DIFFREQUEST
DESCRIPTOR.message_types_by_name['DiffResponse'] = _DIFFRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DiffRequest = _reflection.GeneratedProtocolMessageType('DiffRequest', (_message.Message,), {
'DESCRIPTOR' : _DIFFREQUEST,
'__module__' : 'proto.diff_img_pb2'
# @@protoc_insertion_point(class_scope:Diff.DiffRequest)
})
_sym_db.RegisterMessage(DiffRequest)
DiffResponse = _reflection.GeneratedProtocolMessageType('DiffResponse', (_message.Message,), {
'DESCRIPTOR' : _DIFFRESPONSE,
'__module__' : 'proto.diff_img_pb2'
# @@protoc_insertion_point(class_scope:Diff.DiffResponse)
})
_sym_db.RegisterMessage(DiffResponse)
_DIFFIMG = _descriptor.ServiceDescriptor(
name='DiffImg',
full_name='Diff.DiffImg',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=119,
serialized_end=180,
methods=[
_descriptor.MethodDescriptor(
name='getDiff',
full_name='Diff.DiffImg.getDiff',
index=0,
containing_service=None,
input_type=_DIFFREQUEST,
output_type=_DIFFRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_DIFFIMG)
DESCRIPTOR.services_by_name['DiffImg'] = _DIFFIMG
# @@protoc_insertion_point(module_scope)
|
python
|
"""Example training a nobrainer model for brain extraction."""
import nobrainer
# Instantiate object to perform real-time data augmentation on training data.
# This object is similar to `keras.preprocessing.image.ImageDataGenerator` but
# works with volumetric data.
volume_data_generator = nobrainer.VolumeDataGenerator(
samplewise_minmax=True,
rot90_x=True,
rot90_y=True,
rot90_z=True,
flip_x=True,
flip_y=True,
flip_z=True,
salt_and_pepper=True,
gaussian=True,
reduce_contrast=True,
binarize_y=True)
# Instantiate TensorFlow model.
model = nobrainer.HighRes3DNet(
n_classes=2, # Two classes for brain extraction (i.e., brain vs not brain)
optimizer='Adam',
learning_rate=0.01,
# Model-specific options.
one_batchnorm_per_resblock=True,
dropout_rate=0.25)
# Read in filepaths to features and labels.
filepaths = nobrainer.read_csv("features_labels.csv")
# Most GPUs do not have enough memory to represent a 256**3 volume during
# training, so we train on blocks of data. Here, we set the shape of the
# blocks.
block_shape = (128, 128, 128)
# Train model.
nobrainer.train(
model=model,
volume_data_generator=volume_data_generator,
filepaths=filepaths,
volume_shape=(256, 256, 256),
block_shape=block_shape,
strides=block_shape,
batch_size=1, # number of blocks per training step
n_epochs=1, # number of passes through the training set
prefetch=4) # prefetch this many full volumes.
|
python
|
from bflib import dice, movement, units
from bflib.attacks import AttackSet, Bite, Gaze
from bflib.attacks import specialproperties
from bflib.characters import specialabilities
from bflib.characters.classes.fighter import Fighter
from bflib.monsters import listing
from bflib.monsters.appearingset import AppearingSet
from bflib.monsters.reptilians.base import Reptilian
from bflib.sizes import Size
from bflib.tables.attackbonus import AttackBonusTable
from bflib.treasuretypes import TreasureType
@listing.register_type
@listing.register_monster
class Basilisk(Reptilian):
name = "Basilisk"
hit_dice = dice.D8(6)
attack_bonus = AttackBonusTable.get_by_hit_dice(hit_dice.amount)
attack_sets = [AttackSet(Bite(dice.D10(1))), AttackSet(Gaze(None), special_properties=specialproperties.Petrify)]
base_armor_class = 16
morale = 9
movement = movement.MovementSet(walk=units.FeetPerGameTurn(20), turning_distance=units.Feet(10))
no_appearing = AppearingSet(dice_dungeon=dice.D6(1), dice_wild=dice.D6(1), dice_lair=dice.D6(1))
save_as = Fighter.level_table.levels[hit_dice.amount].saving_throws_set
size = Size.Large
special_abilities = specialabilities.CombatFrenzy,
treasure_type = TreasureType.F
weight = units.Pound(300)
xp = 610
|
python
|
import requests
from bs4 import BeautifulSoup
address = []
def get_html(url):
r = requests.get(url, headers={
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/97.0.4692.99 Safari/537.36',
'accept': '*/*'})
return r
def get_address(html):
soup = BeautifulSoup(html, 'html.parser')
items = soup.find('tbody').find_all('tr')
for item in items:
address.append(item.find('a').get_text(strip=True))
def repeat():
for i in range(1, 5):
url = "https://etherscan.io/accounts/" + str(i)
html = get_html(url)
get_address(html.text)
repeat()
|
python
|
# Copyright 2018 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python import *
import typed_python._types as _types
from nativepython.runtime import Runtime
import unittest
def Compiled(f):
f = Function(f)
return Runtime.singleton().compile(f)
class TestPointerToCompilation(unittest.TestCase):
def test_pointer_operations(self):
T = ListOf(int)
def testfun(x: T):
pointer = x.pointerUnsafe(0)
pointer.set(20)
(pointer+1).set(20)
(pointer+2).set((pointer+1).get()+1)
(pointer+3).initialize((pointer+2).get())
(pointer+4).cast(float).set(1.0)
return pointer[3]
compiledFun = Compiled(testfun)
l1 = T(list(range(10)))
l2 = T(list(range(10)))
self.assertEqual(testfun(l1), l1[3])
self.assertEqual(compiledFun(l2), l2[3])
self.assertEqual(l1, l2)
self.assertEqual(l1[0], 20)
self.assertEqual(l1[1], 20)
self.assertEqual(l1[2], 21)
self.assertEqual(l1[3], 21)
self.assertEqual(l1[4], 0x3ff0000000000000) # hex representation of 64 bit float 1.0
def test_bytecount(self):
def testfun(x):
return _types.bytecount(type(x))
self.assertEqual(testfun(0), 8)
def check(x):
self.assertEqual(
testfun(x),
Runtime.singleton().compile(testfun, {'x': type(x)})(x)
)
check(0)
check(0.0)
check(ListOf(int)([10]))
check(Tuple(int, int, int)((10, 10, 10)))
def test_pointer_subtraction(self):
T = ListOf(int)
def testfun(x: T):
pointer = x.pointerUnsafe(0)
return (pointer + 1) - pointer
compiledFun = Compiled(testfun)
self.assertEqual(testfun(T()), 1)
self.assertEqual(compiledFun(T()), 1)
|
python
|
from blinker import NamedSignal, signal
from sagas.nlu.events import ResultDataset, RequestMeta
from sagas.conf.conf import cf
import sagas.tracker_fn as tc
from pprint import pprint
watch = signal('watch')
# evts=[watch]
@watch.connect
def console_watch(sender, **kw):
import datetime
from sagas.nlu.nlu_tools import NluTools
ds:ResultDataset=kw['dataset']
meta:RequestMeta=kw['meta']
print(f"****** watch {sender}")
tc.emp('magenta', meta)
tools = NluTools()
if cf.is_enabled('print_tree'):
tools.main_domains(meta.sents,
lang=meta.lang,
engine=meta.engine,
print_domains=False)
return datetime.datetime.now()
|
python
|
import numpy as np
class SpatioTemporalSignal(object):
"""
This Class is used to create a group of N signals that interact in space
and time. The way that this interaction is carrieud out can be fully
determined by the user through a SpatioTemporal matrix of vectors that
specify how the signals mix.
"""
def __init__(self, dt=0.1, delay=10, Tmax=100, Nseries=2):
# Intialize time parameters
self.dt = dt
self.delay = delay
self.Tmax = Tmax
self.Nseries = Nseries
# Put time in proper units
self.NTmax = int(self.Tmax * 1.0 / self.dt)
self.Ndelay = int(self.delay * 1.0 / self.dt)
self.time = np.arange(self.NTmax) * self.dt
# Initialize series
self.series = np.zeros((self.Nseries, self.NTmax))
# Intialize interaction
self.interaction = np.zeros((self.Nseries, self.Nseries, self.NTmax))
def set_initial_conditions(self, initial):
"""
Set the initial conditions
"""
self.series[..., 0] = initial
def construct_series(self):
"""
This is the function that construct the series with a given interaction
Doesn't work for one dimensional series
"""
for t in range(self.NTmax - 1):
print '------------'
print 'Time t', t
# First let's set the correct delay
if t + 1 > self.Ndelay:
delay_aux = self.Ndelay
else:
delay_aux = t + 1
# Update signal_index
for series_idx in xrange(self.Nseries):
# Intialize vector to save time contribuionts
vec_aux = np.zeros(self.Nseries)
# Accomulate time contributions
for delay_index in range(delay_aux):
aux1 = self.series[:, t - delay_index]
aux2 = self.interaction[series_idx, :, delay_index]
vec_aux += aux1 * aux2
# print 'vec_aux', vec_aux
# Combine time contributions and normalize
self.series[series_idx, t + 1] = np.sum(vec_aux) / (delay_aux)
def construct_series_verbose(self):
"""
This is the function that construct the series with a given interaction
"""
for t in range(self.NTmax - 1):
print '------------'
print 'Time t', t
# First let's set the correct delay
if t + 1 > self.Ndelay:
delay_aux = self.Ndelay
else:
delay_aux = t + 1
# Update signal_index
for series_idx in xrange(self.Nseries):
print 'series_idx', series_idx
print 'delay_aux of delay', delay_aux, self.Ndelay
# Intialize vector to save time contribuionts
vec_aux = np.zeros(self.Nseries)
# Accomulate time contributions
for delay_index in range(delay_aux):
aux1 = self.series[:, t - delay_index]
aux2 = self.interaction[series_idx, :, delay_index]
print 'series', aux1
print 'interactions', aux2
vec_aux += aux1 * aux2
# print 'vec_aux', vec_aux
# Combine time contributions and normalize
print 'Contribution ', vec_aux
print 'Total contribution (BN) ', np.sum(vec_aux)
self.series[series_idx, t + 1] = np.sum(vec_aux) / (delay_aux)
print 'next value series', self.series[series_idx, t + 1]
def set_interaction(self, interaction_matrix):
"""
This function is used whenever the user wants
to pass a particular interaction matrix
"""
self.interaction = interaction_matrix
class TrigonometricMix(SpatioTemporalSignal):
"""
This should allow us to initialize mixed signals easier
"""
def __init__(self, dt=0.1, delay=10, Tmax=100, Nseries=2,
phase_m=None, frequency_m=None):
"""
Overrides the initialization but also gets the frequency
and phases matrix that are sufficient to determine a
trignometric mix.
"""
super(TrigonometricMix, self).__init__(dt, delay,
Tmax, Nseries)
self.phase_matrix = phase_m
self.frequency_matrix = frequency_m
# Create trigonometric matrix
aux = []
for phase, frequency in zip(self.phase_matrix.flatten(),
self.frequency_matrix.flatten()):
aux.append(np.cos(frequency * self.time + frequency))
# Transform to array and reshape
aux = np.array(aux)
self.interaction = aux.reshape((self.Nseries,
self.Nseries, self.NTmax))
def main():
print 'This is all right'
return SpatioTemporalSignal()
if __name__ == '__main__':
x = main()
|
python
|
from . import Token
class TreeNode(object):
# dictionary mapping {str: TreeNode}
id_treeNodes = {}
def getTreeNode(idx):
return TreeNode.id_treeNodes[idx]
def __init__(self, idx, tkn):
self._id = idx
self._tkn = tkn
self._children = {}
TreeNode.id_treeNodes[idx] = self
def addChild(self, dep, child):
try:
tns = self._children[dep]
except KeyError:
tns = set(child)
self._children[dep] = tns
else:
self._children[dep] = tns.add(child)
return None
def getId(self):
return self._id
def getToken(self):
return self._tkn
def getChildren(self):
return self._children
def compareTo(self, z):
if not isinstance(z, TreeNode):
raise ValueError
return self._tkn.compareTo(z.tkn_)
def equals(self, o):
return self.compareTo(o) == 0
def toString(self):
return self._tkn.toString()
def getTreeStr(self):
id_str = {}
if (len(self._children) > 0):
for dep in self._children.keys():
nodes = self._children[dep]
s = ''
for node in nodes:
if dep.startswith('prep_') or dep.startswith('conj_'):
s = dep[5:] + ' '
s = s + node.getTreeStr()
id_str[node.getId()] = s
id_str[self._id] = self._tkn.getLemma()
result = ' '.join([id_str[x] for x in id_str.keys()])
return result
|
python
|
from flask import Flask
from chatpy.api import API
from chatpy.auth import TokenAuthHandler
app = Flask(__name__)
app.config.from_object('config')
chatwork = API(auth_handler=TokenAuthHandler(app.config['CHATWORK_TOKEN']))
from app.endpoint import *
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: aci_fabric_scheduler
short_description: This modules creates ACI schedulers.
version_added: "2.8"
description:
- With the module you can create schedule policies that can be a shell, onetime execution or recurring
options:
name:
description:
- The name of the Scheduler.
required: yes
aliases: [ name, scheduler_name ]
description:
description:
- Description for the Scheduler.
aliases: [ descr ]
recurring:
description:
- If you want to make the Scheduler a recurring it would be a "True" and for a
oneTime execution it would be "False". For a shell just exclude this option from
the task
type: bool
default: 'no'
windowname:
description:
- This is the name for your what recurring or oneTime execution
concurCap:
description:
- This is the amount of devices that can be executed on at a time
type: int
maxTime:
description:
- This is the amount MAX amount of time a process can be executed
date:
description:
- This is the date and time that the scheduler will execute
hour:
description:
- This set the hour of execution
minute:
description:
- This sets the minute of execution, used in conjunction with hour
day:
description:
- This sets the day when execution will take place
default: "every-day"
choices: ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday','Sunday', 'even-day', 'odd-day', 'every-day']
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
default: present
choices: [ absent, present, query ]
extends_documentation_fragment: aci
author:
- Steven Gerhart (@sgerhart)
'''
EXAMPLES = '''
- name: Simple Scheduler (Empty)
aci_fabric_scheduler:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
name: simpleScheduler
state: present
- name: Remove Simple Scheduler
aci_fabric_scheduler:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
name: simpleScheduler
state: absent
- name: One Time Scheduler
aci_fabric_scheduler:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
name: OneTime
windowname: OneTime
recurring: False
concurCap: 20
date: "2018-11-20T24:00:00"
state: present
- name: Recurring Scheduler
aci_fabric_scheduler:
host: "{{ inventory_hostname }}"
username: "{{ user }}"
password: "{{ pass }}"
validate_certs: no
name: Recurring
windowname: Recurring
recurring: True
concurCap: 20
hour: 13
minute: 30
day: Tuesday
state: present
'''
RETURN = '''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
import json
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
name=dict(type='str', aliases=['name', 'scheduler_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
windowname=dict(type='str', aliases=['windowname']),
recurring=dict(type='bool'),
concurCap=dict(type='int'), # Number of devices it will run against concurrently
maxTime=dict(type='str'), # The amount of minutes a process will be able to run (unlimited or dd:hh:mm:ss)
date=dict(type='str', aliases=['date']), # The date the process will run YYYY-MM-DDTHH:MM:SS
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
hour=dict(type='int'),
minute=dict(type='int'),
day=dict(type='str', default='every-day', choices=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday', 'every-day', 'even-day', 'odd-day']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['name']],
['state', 'present', ['name']],
],
)
state = module.params['state']
name = module.params['name']
windowname = module.params['windowname']
recurring = module.params['recurring']
date = module.params['date']
hour = module.params['hour']
minute = module.params['minute']
maxTime = module.params['maxTime']
concurCap = module.params['concurCap']
day = module.params['day']
description = module.params['description']
if recurring:
child_configs = [dict(trigRecurrWindowP=dict(attributes=dict(name=windowname, hour=hour, minute=minute,
procCa=maxTime, concurCap=concurCap, day=day,)))]
elif recurring is False:
child_configs = [dict(trigAbsWindowP=dict(attributes=dict(name=windowname, procCap=maxTime,
concurCap=concurCap, date=date,)))]
else:
child_configs = []
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='trigSchedP',
aci_rn='fabric/schedp-{0}'.format(name),
target_filter={'name': name},
module_object=name,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='trigSchedP',
class_config=dict(
name=name,
descr=description,
),
child_configs=child_configs,
)
aci.get_diff(aci_class='trigSchedP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
python
|
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from ..libs.client import ProMortClient
from ..libs.client import ProMortAuthenticationError
from argparse import ArgumentError
import sys, requests
from urllib.parse import urljoin
from functools import reduce
class SlideImporter(object):
def __init__(self, host, user, passwd, session_id, logger):
self.promort_client = ProMortClient(host, user, passwd, session_id)
self.logger = logger
def _get_case_label(self, slide_label):
return slide_label.split('-')[0]
def _import_case(self, case_label):
response = self.promort_client.post(
api_url='api/cases/',
payload={'id': case_label}
)
if response.status_code == requests.codes.CREATED:
self.logger.info('Case created')
elif response.status_code == requests.codes.CONFLICT:
self.logger.info('Case already exist')
elif response.status_code == requests.codes.BAD:
self.logger.error('ERROR while creating Case: {0}'.format(response.text))
sys.exit('ERROR while creating Case')
def _import_slide(self, slide_label, case_label, omero_id=None, mirax_file=False,
omero_host=None, ignore_duplicated=False):
if mirax_file:
file_type = 'MIRAX'
else:
file_type = 'OMERO_IMG'
response = self.promort_client.post(
api_url='api/slides/',
payload={'id': slide_label, 'case': case_label, 'omero_id': omero_id, 'image_type': file_type}
)
if response.status_code == requests.codes.CREATED:
self.logger.info('Slide created')
if omero_id is not None and omero_host is not None:
self._update_slide(slide_label, omero_id, mirax_file, omero_host)
elif response.status_code == requests.codes.CONFLICT:
if ignore_duplicated:
self.logger.info('Slide already exists')
if omero_id is not None and omero_host is not None:
self._update_slide(slide_label, omero_id, mirax_file, omero_host)
else:
self.logger.error('A slide with the same ID already exists')
sys.exit('ERROR: duplicated slide')
elif response.status_code == requests.codes.BAD:
self.logger.error('ERROR while creating Slide: {0}'.format(response.text))
sys.exit('ERROR while creating Slide')
def _update_slide(self, slide_label, omero_id, mirax_file, omero_host):
if mirax_file:
join_items = (omero_host, 'ome_seadragon/mirax/deepzoom/get/', '{0}_metadata.json'.format(slide_label))
else:
join_items = (omero_host, 'ome_seadragon/deepzoom/get/', '{0}_metadata.json'.format(omero_id))
ome_url = reduce(urljoin, join_items)
response = requests.get(ome_url)
if response.status_code == requests.codes.OK:
slide_mpp = response.json()['image_mpp']
response = self.promort_client.put(
api_url='api/slides/{0}/'.format(slide_label),
payload={'image_microns_per_pixel': slide_mpp, 'omero_id': omero_id}
)
self.logger.info('Slide updated')
def run(self, args):
if args.case_label is None and not args.extract_case:
raise ArgumentError(args.case_label,
message='ERROR! Must specify a case label or enable the extract-case flag')
if args.case_label is not None:
if args.extract_case:
self.logger.info('Using label passed through CLI, ignoring the extract-case flag')
case_label = args.case_label
else:
case_label = self._get_case_label(args.slide_label)
try:
self.promort_client.login()
except ProMortAuthenticationError:
self.logger.critical('Authentication error, exit')
sys.exit('Authentication error, exit')
self._import_case(case_label)
self._import_slide(args.slide_label, case_label, args.omero_id, args.mirax,
args.omero_host, args.ignore_duplicated)
self.logger.info('Import job completed')
self.promort_client.logout()
help_doc = """
TBD
"""
def implementation(host, user, passwd, session_id, logger, args):
slide_importer = SlideImporter(host, user, passwd, session_id, logger)
slide_importer.run(args)
def make_parser(parser):
parser.add_argument('--slide-label', type=str, required=True, help='slide label')
parser.add_argument('--case-label', type=str, required=False, help='case label')
parser.add_argument('--omero-id', type=int,
help='OMERO ID, only required if the slide was previously uploaded to an OMERO server')
parser.add_argument('--omero-host', type=str,
help='OMERO host used to retrieve slide details (if omero-id was specified)')
parser.add_argument('--mirax', action='store_true', help='slide is a 3DHISTECH MIRAX')
parser.add_argument('--extract-case', action='store_true', help='extract case ID from slide label')
parser.add_argument('--ignore-duplicated', action='store_true',
help='if enabled, trying to import an existing slide will not produce an error')
def register(registration_list):
registration_list.append(('slides_importer', help_doc, make_parser, implementation))
|
python
|
import billboard
import json
import urllib
from urllib.parse import quote
apikey = 'APIKEY'
# make the empty dictionary
songs = {}
# loop through the years we're interested in
for x in range(1960, 2016):
# another dictionary inside
songs[x] = {}
# get the chart for the last week of that year
chart = billboard.ChartData('hot-100', '%s-12-19' % str(x))
# for every song on the chart, keep its rank, title, and author
for song in chart:
songs[x][song.rank] = {}
songs[x][song.rank]['rank'] = song.rank
songs[x][song.rank]['title'] = song.title
songs[x][song.rank]['artist'] = song.artist
# look up the song in musixmatch
api_url = "http://api.musixmatch.com/ws/1.1/matcher.track.get?apikey=%s&q_artist=%s&q_track=%s" % (apikey, quote(song.artist, safe=''), quote(song.title, safe=''))
url = urllib.request.urlopen(api_url).read().decode('UTF-8')
result = json.loads(url)
songs[x][song.rank]['musixmatch'] = result
# use lyrics id to get lyrics info and store that instead of all the junk from musixmatch
api_url_lyrics = "http://api.musixmatch.com/ws/1.1/matcher.lyrics.get?apikey=%s&q_track=%s&q_artist=%s" % (apikey, quote(song.title, safe=''), quote(song.artist, safe=''))
url_lyrics = urllib.request.urlopen(api_url_lyrics).read().decode('UTF-8')
lyrics = json.loads(url_lyrics)
#checks against any songs not in MusixMatch database and any songs without lyrics
if result['message']['header']['status_code'] != 404 and result['message']['body']['track']['has_lyrics'] == 1:
lyrics_id = result['message']['body']['track']['lyrics_id']
get_lyrics = lyrics['message']['body']['lyrics']['lyrics_body']
songs[x][song.rank]['lyrics'] = get_lyrics
#dump all the data to a json file (readable output)
with open('song-data.json', 'w') as out_file:
for x in sorted(songs):
out_file.write('>')
json.dump(x, out_file)
out_file.write('\n')
for y in songs[x]:
if 'lyrics' in songs[x][y]:
out_file.write('(')
json.dump(y, out_file)
out_file.write(') ' + songs[x][y]['title'] + ' - ' + songs[x][y]['artist'])
out_file.write('\n')
json.dump(songs[x][y]['lyrics'].replace('\n', ' '), out_file)
out_file.write('\n')
out_file.write('\n')
|
python
|
#! /usr/bin/env python
"""Perform massive transformations on a document tree created from the LaTeX
of the Python documentation, and dump the ESIS data for the transformed tree.
"""
import errno
import esistools
import re
import string
import sys
import xml.dom
import xml.dom.minidom
ELEMENT = xml.dom.Node.ELEMENT_NODE
ENTITY_REFERENCE = xml.dom.Node.ENTITY_REFERENCE_NODE
TEXT = xml.dom.Node.TEXT_NODE
class ConversionError(Exception):
pass
ewrite = sys.stderr.write
try:
# We can only do this trick on Unix (if tput is on $PATH)!
if sys.platform != "posix" or not sys.stderr.isatty():
raise ImportError
import commands
except ImportError:
bwrite = ewrite
else:
def bwrite(s, BOLDON=commands.getoutput("tput bold"),
BOLDOFF=commands.getoutput("tput sgr0")):
ewrite("%s%s%s" % (BOLDON, s, BOLDOFF))
PARA_ELEMENT = "para"
DEBUG_PARA_FIXER = 0
if DEBUG_PARA_FIXER:
def para_msg(s):
ewrite("*** %s\n" % s)
else:
def para_msg(s):
pass
def get_first_element(doc, gi):
for n in doc.childNodes:
if n.nodeName == gi:
return n
def extract_first_element(doc, gi):
node = get_first_element(doc, gi)
if node is not None:
doc.removeChild(node)
return node
def get_documentElement(node):
result = None
for child in node.childNodes:
if child.nodeType == ELEMENT:
result = child
return result
def set_tagName(elem, gi):
elem.nodeName = elem.tagName = gi
def find_all_elements(doc, gi):
nodes = []
if doc.nodeName == gi:
nodes.append(doc)
for child in doc.childNodes:
if child.nodeType == ELEMENT:
if child.tagName == gi:
nodes.append(child)
for node in child.getElementsByTagName(gi):
nodes.append(node)
return nodes
def find_all_child_elements(doc, gi):
nodes = []
for child in doc.childNodes:
if child.nodeName == gi:
nodes.append(child)
return nodes
def find_all_elements_from_set(doc, gi_set):
return __find_all_elements_from_set(doc, gi_set, [])
def __find_all_elements_from_set(doc, gi_set, nodes):
if doc.nodeName in gi_set:
nodes.append(doc)
for child in doc.childNodes:
if child.nodeType == ELEMENT:
__find_all_elements_from_set(child, gi_set, nodes)
return nodes
def simplify(doc, fragment):
# Try to rationalize the document a bit, since these things are simply
# not valid SGML/XML documents as they stand, and need a little work.
documentclass = "document"
inputs = []
node = extract_first_element(fragment, "documentclass")
if node is not None:
documentclass = node.getAttribute("classname")
node = extract_first_element(fragment, "title")
if node is not None:
inputs.append(node)
# update the name of the root element
node = get_first_element(fragment, "document")
if node is not None:
set_tagName(node, documentclass)
while 1:
node = extract_first_element(fragment, "input")
if node is None:
break
inputs.append(node)
if inputs:
docelem = get_documentElement(fragment)
inputs.reverse()
for node in inputs:
text = doc.createTextNode("\n")
docelem.insertBefore(text, docelem.firstChild)
docelem.insertBefore(node, text)
docelem.insertBefore(doc.createTextNode("\n"), docelem.firstChild)
while fragment.firstChild and fragment.firstChild.nodeType == TEXT:
fragment.removeChild(fragment.firstChild)
def cleanup_root_text(doc):
discards = []
skip = 0
for n in doc.childNodes:
prevskip = skip
skip = 0
if n.nodeType == TEXT and not prevskip:
discards.append(n)
elif n.nodeName == "COMMENT":
skip = 1
for node in discards:
doc.removeChild(node)
DESCRIPTOR_ELEMENTS = (
"cfuncdesc", "cvardesc", "ctypedesc",
"classdesc", "memberdesc", "memberdescni", "methoddesc", "methoddescni",
"excdesc", "funcdesc", "funcdescni", "opcodedesc",
"datadesc", "datadescni",
)
def fixup_descriptors(doc, fragment):
sections = find_all_elements(fragment, "section")
for section in sections:
find_and_fix_descriptors(doc, section)
def find_and_fix_descriptors(doc, container):
children = container.childNodes
for child in children:
if child.nodeType == ELEMENT:
tagName = child.tagName
if tagName in DESCRIPTOR_ELEMENTS:
rewrite_descriptor(doc, child)
elif tagName == "subsection":
find_and_fix_descriptors(doc, child)
def rewrite_descriptor(doc, descriptor):
#
# Do these things:
# 1. Add an "index='no'" attribute to the element if the tagName
# ends in 'ni', removing the 'ni' from the name.
# 2. Create a <signature> from the name attribute
# 2a.Create an <args> if it appears to be available.
# 3. Create additional <signature>s from <*line{,ni}> elements,
# if found.
# 4. If a <versionadded> is found, move it to an attribute on the
# descriptor.
# 5. Move remaining child nodes to a <description> element.
# 6. Put it back together.
#
# 1.
descname = descriptor.tagName
index = 1
if descname[-2:] == "ni":
descname = descname[:-2]
descriptor.setAttribute("index", "no")
set_tagName(descriptor, descname)
index = 0
desctype = descname[:-4] # remove 'desc'
linename = desctype + "line"
if not index:
linename = linename + "ni"
# 2.
signature = doc.createElement("signature")
name = doc.createElement("name")
signature.appendChild(doc.createTextNode("\n "))
signature.appendChild(name)
name.appendChild(doc.createTextNode(descriptor.getAttribute("name")))
descriptor.removeAttribute("name")
# 2a.
if descriptor.hasAttribute("var"):
if descname != "opcodedesc":
raise RuntimeError, \
"got 'var' attribute on descriptor other than opcodedesc"
variable = descriptor.getAttribute("var")
if variable:
args = doc.createElement("args")
args.appendChild(doc.createTextNode(variable))
signature.appendChild(doc.createTextNode("\n "))
signature.appendChild(args)
descriptor.removeAttribute("var")
newchildren = [signature]
children = descriptor.childNodes
pos = skip_leading_nodes(children)
if pos < len(children):
child = children[pos]
if child.nodeName == "args":
# move <args> to <signature>, or remove if empty:
child.parentNode.removeChild(child)
if len(child.childNodes):
signature.appendChild(doc.createTextNode("\n "))
signature.appendChild(child)
signature.appendChild(doc.createTextNode("\n "))
# 3, 4.
pos = skip_leading_nodes(children, pos)
while pos < len(children) \
and children[pos].nodeName in (linename, "versionadded"):
if children[pos].tagName == linename:
# this is really a supplemental signature, create <signature>
oldchild = children[pos].cloneNode(1)
try:
sig = methodline_to_signature(doc, children[pos])
except KeyError:
print oldchild.toxml()
raise
newchildren.append(sig)
else:
# <versionadded added=...>
descriptor.setAttribute(
"added", children[pos].getAttribute("version"))
pos = skip_leading_nodes(children, pos + 1)
# 5.
description = doc.createElement("description")
description.appendChild(doc.createTextNode("\n"))
newchildren.append(description)
move_children(descriptor, description, pos)
last = description.childNodes[-1]
if last.nodeType == TEXT:
last.data = string.rstrip(last.data) + "\n "
# 6.
# should have nothing but whitespace and signature lines in <descriptor>;
# discard them
while descriptor.childNodes:
descriptor.removeChild(descriptor.childNodes[0])
for node in newchildren:
descriptor.appendChild(doc.createTextNode("\n "))
descriptor.appendChild(node)
descriptor.appendChild(doc.createTextNode("\n"))
def methodline_to_signature(doc, methodline):
signature = doc.createElement("signature")
signature.appendChild(doc.createTextNode("\n "))
name = doc.createElement("name")
name.appendChild(doc.createTextNode(methodline.getAttribute("name")))
methodline.removeAttribute("name")
signature.appendChild(name)
if len(methodline.childNodes):
args = doc.createElement("args")
signature.appendChild(doc.createTextNode("\n "))
signature.appendChild(args)
move_children(methodline, args)
signature.appendChild(doc.createTextNode("\n "))
return signature
def move_children(origin, dest, start=0):
children = origin.childNodes
while start < len(children):
node = children[start]
origin.removeChild(node)
dest.appendChild(node)
def handle_appendix(doc, fragment):
# must be called after simplfy() if document is multi-rooted to begin with
docelem = get_documentElement(fragment)
toplevel = docelem.tagName == "manual" and "chapter" or "section"
appendices = 0
nodes = []
for node in docelem.childNodes:
if appendices:
nodes.append(node)
elif node.nodeType == ELEMENT:
appnodes = node.getElementsByTagName("appendix")
if appnodes:
appendices = 1
parent = appnodes[0].parentNode
parent.removeChild(appnodes[0])
parent.normalize()
if nodes:
map(docelem.removeChild, nodes)
docelem.appendChild(doc.createTextNode("\n\n\n"))
back = doc.createElement("back-matter")
docelem.appendChild(back)
back.appendChild(doc.createTextNode("\n"))
while nodes and nodes[0].nodeType == TEXT \
and not string.strip(nodes[0].data):
del nodes[0]
map(back.appendChild, nodes)
docelem.appendChild(doc.createTextNode("\n"))
def handle_labels(doc, fragment):
for label in find_all_elements(fragment, "label"):
id = label.getAttribute("id")
if not id:
continue
parent = label.parentNode
parentTagName = parent.tagName
if parentTagName == "title":
parent.parentNode.setAttribute("id", id)
else:
parent.setAttribute("id", id)
# now, remove <label id="..."/> from parent:
parent.removeChild(label)
if parentTagName == "title":
parent.normalize()
children = parent.childNodes
if children[-1].nodeType == TEXT:
children[-1].data = string.rstrip(children[-1].data)
def fixup_trailing_whitespace(doc, wsmap):
queue = [doc]
while queue:
node = queue[0]
del queue[0]
if wsmap.has_key(node.nodeName):
ws = wsmap[node.tagName]
children = node.childNodes
children.reverse()
if children[0].nodeType == TEXT:
data = string.rstrip(children[0].data) + ws
children[0].data = data
children.reverse()
# hack to get the title in place:
if node.tagName == "title" \
and node.parentNode.firstChild.nodeType == ELEMENT:
node.parentNode.insertBefore(doc.createText("\n "),
node.parentNode.firstChild)
for child in node.childNodes:
if child.nodeType == ELEMENT:
queue.append(child)
def normalize(doc):
for node in doc.childNodes:
if node.nodeType == ELEMENT:
node.normalize()
def cleanup_trailing_parens(doc, element_names):
d = {}
for gi in element_names:
d[gi] = gi
rewrite_element = d.has_key
queue = []
for node in doc.childNodes:
if node.nodeType == ELEMENT:
queue.append(node)
while queue:
node = queue[0]
del queue[0]
if rewrite_element(node.tagName):
children = node.childNodes
if len(children) == 1 \
and children[0].nodeType == TEXT:
data = children[0].data
if data[-2:] == "()":
children[0].data = data[:-2]
else:
for child in node.childNodes:
if child.nodeType == ELEMENT:
queue.append(child)
def contents_match(left, right):
left_children = left.childNodes
right_children = right.childNodes
if len(left_children) != len(right_children):
return 0
for l, r in map(None, left_children, right_children):
nodeType = l.nodeType
if nodeType != r.nodeType:
return 0
if nodeType == ELEMENT:
if l.tagName != r.tagName:
return 0
# should check attributes, but that's not a problem here
if not contents_match(l, r):
return 0
elif nodeType == TEXT:
if l.data != r.data:
return 0
else:
# not quite right, but good enough
return 0
return 1
def create_module_info(doc, section):
# Heavy.
node = extract_first_element(section, "modulesynopsis")
if node is None:
return
set_tagName(node, "synopsis")
lastchild = node.childNodes[-1]
if lastchild.nodeType == TEXT \
and lastchild.data[-1:] == ".":
lastchild.data = lastchild.data[:-1]
modauthor = extract_first_element(section, "moduleauthor")
if modauthor:
set_tagName(modauthor, "author")
modauthor.appendChild(doc.createTextNode(
modauthor.getAttribute("name")))
modauthor.removeAttribute("name")
platform = extract_first_element(section, "platform")
if section.tagName == "section":
modinfo_pos = 2
modinfo = doc.createElement("moduleinfo")
moddecl = extract_first_element(section, "declaremodule")
name = None
if moddecl:
modinfo.appendChild(doc.createTextNode("\n "))
name = moddecl.attributes["name"].value
namenode = doc.createElement("name")
namenode.appendChild(doc.createTextNode(name))
modinfo.appendChild(namenode)
type = moddecl.attributes.get("type")
if type:
type = type.value
modinfo.appendChild(doc.createTextNode("\n "))
typenode = doc.createElement("type")
typenode.appendChild(doc.createTextNode(type))
modinfo.appendChild(typenode)
versionadded = extract_first_element(section, "versionadded")
if versionadded:
modinfo.setAttribute("added", versionadded.getAttribute("version"))
title = get_first_element(section, "title")
if title:
children = title.childNodes
if len(children) >= 2 \
and children[0].nodeName == "module" \
and children[0].childNodes[0].data == name:
# this is it; morph the <title> into <short-synopsis>
first_data = children[1]
if first_data.data[:4] == " ---":
first_data.data = string.lstrip(first_data.data[4:])
set_tagName(title, "short-synopsis")
if children[-1].nodeType == TEXT \
and children[-1].data[-1:] == ".":
children[-1].data = children[-1].data[:-1]
section.removeChild(title)
section.removeChild(section.childNodes[0])
title.removeChild(children[0])
modinfo_pos = 0
else:
ewrite("module name in title doesn't match"
" <declaremodule/>; no <short-synopsis/>\n")
else:
ewrite("Unexpected condition: <section/> without <title/>\n")
modinfo.appendChild(doc.createTextNode("\n "))
modinfo.appendChild(node)
if title and not contents_match(title, node):
# The short synopsis is actually different,
# and needs to be stored:
modinfo.appendChild(doc.createTextNode("\n "))
modinfo.appendChild(title)
if modauthor:
modinfo.appendChild(doc.createTextNode("\n "))
modinfo.appendChild(modauthor)
if platform:
modinfo.appendChild(doc.createTextNode("\n "))
modinfo.appendChild(platform)
modinfo.appendChild(doc.createTextNode("\n "))
section.insertBefore(modinfo, section.childNodes[modinfo_pos])
section.insertBefore(doc.createTextNode("\n "), modinfo)
#
# The rest of this removes extra newlines from where we cut out
# a lot of elements. A lot of code for minimal value, but keeps
# keeps the generated *ML from being too funny looking.
#
section.normalize()
children = section.childNodes
for i in range(len(children)):
node = children[i]
if node.nodeName == "moduleinfo":
nextnode = children[i+1]
if nextnode.nodeType == TEXT:
data = nextnode.data
if len(string.lstrip(data)) < (len(data) - 4):
nextnode.data = "\n\n\n" + string.lstrip(data)
def cleanup_synopses(doc, fragment):
for node in find_all_elements(fragment, "section"):
create_module_info(doc, node)
def fixup_table_structures(doc, fragment):
for table in find_all_elements(fragment, "table"):
fixup_table(doc, table)
def fixup_table(doc, table):
# create the table head
thead = doc.createElement("thead")
row = doc.createElement("row")
move_elements_by_name(doc, table, row, "entry")
thead.appendChild(doc.createTextNode("\n "))
thead.appendChild(row)
thead.appendChild(doc.createTextNode("\n "))
# create the table body
tbody = doc.createElement("tbody")
prev_row = None
last_was_hline = 0
children = table.childNodes
for child in children:
if child.nodeType == ELEMENT:
tagName = child.tagName
if tagName == "hline" and prev_row is not None:
prev_row.setAttribute("rowsep", "1")
elif tagName == "row":
prev_row = child
# save the rows:
tbody.appendChild(doc.createTextNode("\n "))
move_elements_by_name(doc, table, tbody, "row", sep="\n ")
# and toss the rest:
while children:
child = children[0]
nodeType = child.nodeType
if nodeType == TEXT:
if string.strip(child.data):
raise ConversionError("unexpected free data in <%s>: %r"
% (table.tagName, child.data))
table.removeChild(child)
continue
if nodeType == ELEMENT:
if child.tagName != "hline":
raise ConversionError(
"unexpected <%s> in table" % child.tagName)
table.removeChild(child)
continue
raise ConversionError(
"unexpected %s node in table" % child.__class__.__name__)
# nothing left in the <table>; add the <thead> and <tbody>
tgroup = doc.createElement("tgroup")
tgroup.appendChild(doc.createTextNode("\n "))
tgroup.appendChild(thead)
tgroup.appendChild(doc.createTextNode("\n "))
tgroup.appendChild(tbody)
tgroup.appendChild(doc.createTextNode("\n "))
table.appendChild(tgroup)
# now make the <entry>s look nice:
for row in table.getElementsByTagName("row"):
fixup_row(doc, row)
def fixup_row(doc, row):
entries = []
map(entries.append, row.childNodes[1:])
for entry in entries:
row.insertBefore(doc.createTextNode("\n "), entry)
# row.appendChild(doc.createTextNode("\n "))
def move_elements_by_name(doc, source, dest, name, sep=None):
nodes = []
for child in source.childNodes:
if child.nodeName == name:
nodes.append(child)
for node in nodes:
source.removeChild(node)
dest.appendChild(node)
if sep:
dest.appendChild(doc.createTextNode(sep))
RECURSE_INTO_PARA_CONTAINERS = (
"chapter", "abstract", "enumerate",
"section", "subsection", "subsubsection",
"paragraph", "subparagraph", "back-matter",
"howto", "manual",
"item", "itemize", "fulllineitems", "enumeration", "descriptionlist",
"definitionlist", "definition",
)
PARA_LEVEL_ELEMENTS = (
"moduleinfo", "title", "verbatim", "enumerate", "item",
"interpreter-session", "back-matter", "interactive-session",
"opcodedesc", "classdesc", "datadesc",
"funcdesc", "methoddesc", "excdesc", "memberdesc", "membderdescni",
"funcdescni", "methoddescni", "excdescni",
"tableii", "tableiii", "tableiv", "localmoduletable",
"sectionauthor", "seealso", "itemize",
# include <para>, so we can just do it again to get subsequent paras:
PARA_ELEMENT,
)
PARA_LEVEL_PRECEEDERS = (
"setindexsubitem", "author",
"stindex", "obindex", "COMMENT", "label", "input", "title",
"versionadded", "versionchanged", "declaremodule", "modulesynopsis",
"moduleauthor", "indexterm", "leader",
)
def fixup_paras(doc, fragment):
for child in fragment.childNodes:
if child.nodeName in RECURSE_INTO_PARA_CONTAINERS:
fixup_paras_helper(doc, child)
descriptions = find_all_elements(fragment, "description")
for description in descriptions:
fixup_paras_helper(doc, description)
def fixup_paras_helper(doc, container, depth=0):
# document is already normalized
children = container.childNodes
start = skip_leading_nodes(children)
while len(children) > start:
if children[start].nodeName in RECURSE_INTO_PARA_CONTAINERS:
# Something to recurse into:
fixup_paras_helper(doc, children[start])
else:
# Paragraph material:
build_para(doc, container, start, len(children))
if DEBUG_PARA_FIXER and depth == 10:
sys.exit(1)
start = skip_leading_nodes(children, start + 1)
def build_para(doc, parent, start, i):
children = parent.childNodes
after = start + 1
have_last = 0
BREAK_ELEMENTS = PARA_LEVEL_ELEMENTS + RECURSE_INTO_PARA_CONTAINERS
# Collect all children until \n\n+ is found in a text node or a
# member of BREAK_ELEMENTS is found.
for j in range(start, i):
after = j + 1
child = children[j]
nodeType = child.nodeType
if nodeType == ELEMENT:
if child.tagName in BREAK_ELEMENTS:
after = j
break
elif nodeType == TEXT:
pos = string.find(child.data, "\n\n")
if pos == 0:
after = j
break
if pos >= 1:
child.splitText(pos)
break
else:
have_last = 1
if (start + 1) > after:
raise ConversionError(
"build_para() could not identify content to turn into a paragraph")
if children[after - 1].nodeType == TEXT:
# we may need to split off trailing white space:
child = children[after - 1]
data = child.data
if string.rstrip(data) != data:
have_last = 0
child.splitText(len(string.rstrip(data)))
para = doc.createElement(PARA_ELEMENT)
prev = None
indexes = range(start, after)
indexes.reverse()
for j in indexes:
node = parent.childNodes[j]
parent.removeChild(node)
para.insertBefore(node, prev)
prev = node
if have_last:
parent.appendChild(para)
parent.appendChild(doc.createTextNode("\n\n"))
return len(parent.childNodes)
else:
nextnode = parent.childNodes[start]
if nextnode.nodeType == TEXT:
if nextnode.data and nextnode.data[0] != "\n":
nextnode.data = "\n" + nextnode.data
else:
newnode = doc.createTextNode("\n")
parent.insertBefore(newnode, nextnode)
nextnode = newnode
start = start + 1
parent.insertBefore(para, nextnode)
return start + 1
def skip_leading_nodes(children, start=0):
"""Return index into children of a node at which paragraph building should
begin or a recursive call to fixup_paras_helper() should be made (for
subsections, etc.).
When the return value >= len(children), we've built all the paras we can
from this list of children.
"""
i = len(children)
while i > start:
# skip over leading comments and whitespace:
child = children[start]
nodeType = child.nodeType
if nodeType == TEXT:
data = child.data
shortened = string.lstrip(data)
if shortened:
if data != shortened:
# break into two nodes: whitespace and non-whitespace
child.splitText(len(data) - len(shortened))
return start + 1
return start
# all whitespace, just skip
elif nodeType == ELEMENT:
tagName = child.tagName
if tagName in RECURSE_INTO_PARA_CONTAINERS:
return start
if tagName not in PARA_LEVEL_ELEMENTS + PARA_LEVEL_PRECEEDERS:
return start
start = start + 1
return start
def fixup_rfc_references(doc, fragment):
for rfcnode in find_all_elements(fragment, "rfc"):
rfcnode.appendChild(doc.createTextNode(
"RFC " + rfcnode.getAttribute("num")))
def fixup_signatures(doc, fragment):
for child in fragment.childNodes:
if child.nodeType == ELEMENT:
args = child.getElementsByTagName("args")
for arg in args:
fixup_args(doc, arg)
arg.normalize()
args = child.getElementsByTagName("constructor-args")
for arg in args:
fixup_args(doc, arg)
arg.normalize()
def fixup_args(doc, arglist):
for child in arglist.childNodes:
if child.nodeName == "optional":
# found it; fix and return
arglist.insertBefore(doc.createTextNode("["), child)
optkids = child.childNodes
while optkids:
k = optkids[0]
child.removeChild(k)
arglist.insertBefore(k, child)
arglist.insertBefore(doc.createTextNode("]"), child)
arglist.removeChild(child)
return fixup_args(doc, arglist)
def fixup_sectionauthors(doc, fragment):
for sectauth in find_all_elements(fragment, "sectionauthor"):
section = sectauth.parentNode
section.removeChild(sectauth)
set_tagName(sectauth, "author")
sectauth.appendChild(doc.createTextNode(
sectauth.getAttribute("name")))
sectauth.removeAttribute("name")
after = section.childNodes[2]
title = section.childNodes[1]
if title.nodeName != "title":
after = section.childNodes[0]
section.insertBefore(doc.createTextNode("\n "), after)
section.insertBefore(sectauth, after)
def fixup_verbatims(doc):
for verbatim in find_all_elements(doc, "verbatim"):
child = verbatim.childNodes[0]
if child.nodeType == TEXT \
and string.lstrip(child.data)[:3] == ">>>":
set_tagName(verbatim, "interactive-session")
def add_node_ids(fragment, counter=0):
fragment.node_id = counter
for node in fragment.childNodes:
counter = counter + 1
if node.nodeType == ELEMENT:
counter = add_node_ids(node, counter)
else:
node.node_id = counter
return counter + 1
REFMODINDEX_ELEMENTS = ('refmodindex', 'refbimodindex',
'refexmodindex', 'refstmodindex')
def fixup_refmodindexes(fragment):
# Locate <ref*modindex>...</> co-located with <module>...</>, and
# remove the <ref*modindex>, replacing it with index=index on the
# <module> element.
nodes = find_all_elements_from_set(fragment, REFMODINDEX_ELEMENTS)
d = {}
for node in nodes:
parent = node.parentNode
d[parent.node_id] = parent
del nodes
map(fixup_refmodindexes_chunk, d.values())
def fixup_refmodindexes_chunk(container):
# node is probably a <para>; let's see how often it isn't:
if container.tagName != PARA_ELEMENT:
bwrite("--- fixup_refmodindexes_chunk(%s)\n" % container)
module_entries = find_all_elements(container, "module")
if not module_entries:
return
index_entries = find_all_elements_from_set(container, REFMODINDEX_ELEMENTS)
removes = []
for entry in index_entries:
children = entry.childNodes
if len(children) != 0:
bwrite("--- unexpected number of children for %s node:\n"
% entry.tagName)
ewrite(entry.toxml() + "\n")
continue
found = 0
module_name = entry.getAttribute("module")
for node in module_entries:
if len(node.childNodes) != 1:
continue
this_name = node.childNodes[0].data
if this_name == module_name:
found = 1
node.setAttribute("index", "yes")
if found:
removes.append(entry)
for node in removes:
container.removeChild(node)
def fixup_bifuncindexes(fragment):
nodes = find_all_elements(fragment, 'bifuncindex')
d = {}
# make sure that each parent is only processed once:
for node in nodes:
parent = node.parentNode
d[parent.node_id] = parent
del nodes
map(fixup_bifuncindexes_chunk, d.values())
def fixup_bifuncindexes_chunk(container):
removes = []
entries = find_all_child_elements(container, "bifuncindex")
function_entries = find_all_child_elements(container, "function")
for entry in entries:
function_name = entry.getAttribute("name")
found = 0
for func_entry in function_entries:
t2 = func_entry.childNodes[0].data
if t2[-2:] != "()":
continue
t2 = t2[:-2]
if t2 == function_name:
func_entry.setAttribute("index", "yes")
func_entry.setAttribute("module", "__builtin__")
if not found:
found = 1
removes.append(entry)
for entry in removes:
container.removeChild(entry)
def join_adjacent_elements(container, gi):
queue = [container]
while queue:
parent = queue.pop()
i = 0
children = parent.childNodes
nchildren = len(children)
while i < (nchildren - 1):
child = children[i]
if child.nodeName == gi:
if children[i+1].nodeName == gi:
ewrite("--- merging two <%s/> elements\n" % gi)
child = children[i]
nextchild = children[i+1]
nextchildren = nextchild.childNodes
while len(nextchildren):
node = nextchildren[0]
nextchild.removeChild(node)
child.appendChild(node)
parent.removeChild(nextchild)
continue
if child.nodeType == ELEMENT:
queue.append(child)
i = i + 1
_token_rx = re.compile(r"[a-zA-Z][a-zA-Z0-9.-]*$")
def write_esis(doc, ofp, knownempty):
for node in doc.childNodes:
nodeType = node.nodeType
if nodeType == ELEMENT:
gi = node.tagName
if knownempty(gi):
if node.hasChildNodes():
raise ValueError, \
"declared-empty node <%s> has children" % gi
ofp.write("e\n")
for k, value in node.attributes.items():
if _token_rx.match(value):
dtype = "TOKEN"
else:
dtype = "CDATA"
ofp.write("A%s %s %s\n" % (k, dtype, esistools.encode(value)))
ofp.write("(%s\n" % gi)
write_esis(node, ofp, knownempty)
ofp.write(")%s\n" % gi)
elif nodeType == TEXT:
ofp.write("-%s\n" % esistools.encode(node.data))
elif nodeType == ENTITY_REFERENCE:
ofp.write("&%s\n" % node.nodeName)
else:
raise RuntimeError, "unsupported node type: %s" % nodeType
def convert(ifp, ofp):
events = esistools.parse(ifp)
toktype, doc = events.getEvent()
fragment = doc.createDocumentFragment()
events.expandNode(fragment)
normalize(fragment)
simplify(doc, fragment)
handle_labels(doc, fragment)
handle_appendix(doc, fragment)
fixup_trailing_whitespace(doc, {
"abstract": "\n",
"title": "",
"chapter": "\n\n",
"section": "\n\n",
"subsection": "\n\n",
"subsubsection": "\n\n",
"paragraph": "\n\n",
"subparagraph": "\n\n",
})
cleanup_root_text(doc)
cleanup_trailing_parens(fragment, ["function", "method", "cfunction"])
cleanup_synopses(doc, fragment)
fixup_descriptors(doc, fragment)
fixup_verbatims(fragment)
normalize(fragment)
fixup_paras(doc, fragment)
fixup_sectionauthors(doc, fragment)
fixup_table_structures(doc, fragment)
fixup_rfc_references(doc, fragment)
fixup_signatures(doc, fragment)
add_node_ids(fragment)
fixup_refmodindexes(fragment)
fixup_bifuncindexes(fragment)
# Take care of ugly hacks in the LaTeX markup to avoid LaTeX and
# LaTeX2HTML screwing with GNU-style long options (the '--' problem).
join_adjacent_elements(fragment, "option")
#
d = {}
for gi in events.parser.get_empties():
d[gi] = gi
if d.has_key("author"):
del d["author"]
if d.has_key("rfc"):
del d["rfc"]
knownempty = d.has_key
#
try:
write_esis(fragment, ofp, knownempty)
except IOError, (err, msg):
# Ignore EPIPE; it just means that whoever we're writing to stopped
# reading. The rest of the output would be ignored. All other errors
# should still be reported,
if err != errno.EPIPE:
raise
def main():
if len(sys.argv) == 1:
ifp = sys.stdin
ofp = sys.stdout
elif len(sys.argv) == 2:
ifp = open(sys.argv[1])
ofp = sys.stdout
elif len(sys.argv) == 3:
ifp = open(sys.argv[1])
import StringIO
ofp = StringIO.StringIO()
else:
usage()
sys.exit(2)
convert(ifp, ofp)
if len(sys.argv) == 3:
fp = open(sys.argv[2], "w")
fp.write(ofp.getvalue())
fp.close()
ofp.close()
if __name__ == "__main__":
main()
|
python
|
import importlib.util
import os
def vyLoadModuleFromFilePath(filePath, moduleName=None):
if moduleName == None:
replacements = [
('/', '.'),
('\\', '.'),
('-', '_'),
(' ', '_'),
]
filePathSansExt = os.path.splitext(filePath)[0]
for issue, replacement in replacements:
filePathSansExt = filePathSansExt.replace(issue, replacement)
moduleName = filePathSansExt
spec = importlib.util.spec_from_file_location(moduleName, filePath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
python
|
from ._plugin import Workplane
from ._plugin import extend
|
python
|
"""Testing File for roman_ssg_util"""
import os
import shutil
import roman_ssg_util
def setup_test():
"""Setup Tests for tests"""
os.chdir(os.getcwd())
if os.path.isdir("dist"):
shutil.rmtree("dist")
if os.path.isdir("testCustomDirectory"):
shutil.rmtree("testCustomDirectory")
def test_get_local_files():
"""Test get_local_files function"""
tempArr = ["File1.txt"]
if os.name == "posix":
assert roman_ssg_util.get_local_files(tempArr) == [
os.getcwd() + "/" + "File1.txt"
]
else:
assert roman_ssg_util.get_local_files(tempArr) == [
os.getcwd() + "\\" + "File1.txt"
]
def test_create_css_file():
"""Test Create CSS File function"""
roman_ssg_util.create_css_file(True)
assert os.path.isfile("./main.css")
os.remove("main.css")
def test_create_css_file_fail():
"""Test fail of Create CSS File function"""
roman_ssg_util.create_css_file(False)
assert not os.path.isfile("./main.css")
def test_write_to_file():
"""Test write_to_file function"""
if os.name == "posix":
filePath = os.getcwd() + "/" + "example.txt"
else:
filePath = os.getcwd() + "\\" + "example.txt"
roman_ssg_util.write_to_file(
"en-CA",
filePath,
0,
["example.txt"],
)
assert os.path.isfile("example.html")
os.remove("example.html")
def test_conversion_func_file_non_custom_dir():
"""Test Conversion File function without Custom Directory"""
fileArr = []
fileArr.append("example.txt")
fileArr.append("test.md")
if os.name == "posix":
roman_ssg_util.conversion_func_file(
"en-CA",
False,
fileArr,
"",
os.getcwd() + "/" + "dist",
)
else:
roman_ssg_util.conversion_func_file(
"en-CA",
False,
fileArr,
"",
os.getcwd() + "\\" + "dist",
)
assert os.path.isfile("example.html")
assert os.path.isfile("test.html")
def test_conversion_func_file_custom_dir():
"""Test Conversion File function with Custom Directory"""
os.chdir("..")
setup_test()
fileArr = []
fileArr.append("example.txt")
fileArr.append("test.md")
os.mkdir("testCustomDirectory")
if os.name == "posix":
roman_ssg_util.conversion_func_file(
"en-CA",
True,
fileArr,
os.getcwd() + "/" + "testCustomDirectory",
os.getcwd() + "/" + "dist",
)
else:
roman_ssg_util.conversion_func_file(
"en-CA",
True,
fileArr,
os.getcwd() + "\\" + "testCustomDirectory",
os.getcwd() + "\\" + "dist",
)
assert os.path.isfile("example.html")
assert os.path.isfile("test.html")
def test_converstion_func_folder_non_custom_dir():
"""Test Conversion Folder function without Custom Directory"""
os.chdir("..")
setup_test()
arrayOfFiles = []
arrayOfFiles.append("Silver Blaze.txt")
arrayOfFiles.append("The Adventure of the Six Napoleans.txt")
arrayOfFiles.append("The Adventure of the Speckled Band.txt")
arrayOfFiles.append("The Naval Treaty.txt")
arrayOfFiles.append("The Red Headed League.txt")
if os.name == "posix":
roman_ssg_util.conversion_func_folder(
"en-CA",
os.getcwd() + "/" + "Sherlock-Holmes-Selected-Stories",
False,
arrayOfFiles,
"",
os.getcwd() + "/" + "dist",
)
else:
roman_ssg_util.conversion_func_folder(
"en-CA",
os.getcwd() + "\\" + "Sherlock-Holmes-Selected-Stories",
False,
arrayOfFiles,
"",
os.getcwd() + "\\" + "dist",
)
assert os.path.isfile("Silver Blaze.html")
assert os.path.isfile("The Adventure of the Six Napoleans.html")
assert os.path.isfile("The Adventure of the Speckled Band.html")
assert os.path.isfile("The Naval Treaty.html")
assert os.path.isfile("The Red Headed League.html")
def test_converstion_func_folder_custom_dir():
"""Test Conversion Folder function with Custom Directory"""
os.chdir("..")
setup_test()
arrayOfFiles = []
arrayOfFiles.append("Silver Blaze.txt")
arrayOfFiles.append("The Adventure of the Six Napoleans.txt")
arrayOfFiles.append("The Adventure of the Speckled Band.txt")
arrayOfFiles.append("The Naval Treaty.txt")
arrayOfFiles.append("The Red Headed League.txt")
if os.path.isdir("testCustomDirectory"):
shutil.rmtree("testCustomDirectory")
os.mkdir("testCustomDirectory")
else:
os.mkdir("testCustomDirectory")
if os.name == "posix":
roman_ssg_util.conversion_func_folder(
"en-CA",
os.getcwd() + "/" + "Sherlock-Holmes-Selected-Stories",
True,
arrayOfFiles,
os.getcwd() + "/" + "testCustomDirectory",
os.getcwd() + "/" + "dist",
)
else:
roman_ssg_util.conversion_func_folder(
"en-CA",
os.getcwd() + "\\" + "Sherlock-Holmes-Selected-Stories",
True,
arrayOfFiles,
os.getcwd() + "\\" + "testCustomDirectory",
os.getcwd() + "\\" + "dist",
)
assert os.path.isfile("Silver Blaze.html")
assert os.path.isfile("The Adventure of the Six Napoleans.html")
assert os.path.isfile("The Adventure of the Speckled Band.html")
assert os.path.isfile("The Naval Treaty.html")
assert os.path.isfile("The Red Headed League.html")
os.chdir("..")
shutil.rmtree("testCustomDirectory")
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND/intropylab-classifying-images/check_images.py
#
# DONE: 0. Fill in your information in the programming header below
# PROGRAMMER: Aimee Ukasick
# DATE CREATED: 11 April 2018
# REVISED DATE: <=(Date Revised - if any)
# PURPOSE: Check images & report results: read them in, predict their
# content (classifier), compare prediction to actual value labels
# and output results
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
import argparse
# Imports time() and sleep() functions from time module
from time import time, sleep
from os import listdir
# Imports classifier function for using CNN to classify images
from classifier import classifier
# Main program function defined below
def main():
# DONE: 1. Define start_time to measure total program runtime by
# collecting start time
start_time = time()
# DONE: 2. Define get_input_args() function to create & retrieve command
# line arguments
print("***** calling get_input_args *****")
in_args = get_input_args()
#print_command_line_args(in_args)
# DONE: 3. Define get_pet_labels() function to create pet image labels by
# creating a dictionary with key=filename and value=file label to be used
# to check the accuracy of the classifier function
print("***** calling get_pet_labels *****")
petlabels_dict = get_pet_labels(in_args.dir)
# print("***** print_petlabels_dict(petlabels_dict) *****")
# print_petlabels_dict(petlabels_dict)
# DONE: 4. Define classify_images() function to create the classifier
# labels with the classifier function using in_arg.arch, comparing the
# labels, and creating a dictionary of results (result_dic)
# print("***** calling classify_images *****")
# result_dic = classify_images(in_args.dir, petlabels_dict, in_args.arch)
# print("***** printing my code classify_images result_dic *****")
# print_result_dic(result_dic)
print("***** calling classify_images_udacity *****")
result_dic = classify_images_udacity(in_args.dir, petlabels_dict,
in_args.arch)
# print("***** printing classify_images_udacity result_dic *****")
# print_result_dic(result_dic)
# DONE: 5. Define adjust_results4_isadog() function to adjust the results
# dictionary(result_dic) to determine if classifier correctly classified
# images as 'a dog' or 'not a dog'. This demonstrates if the model can
# correctly classify dog images as dogs (regardless of breed)
# print("***** calling adjust_results4_isadog *****")
# adjust_results4_isadog(result_dic, in_args.dogfile)
# print("***** printing my adjust_results4_isadog *****")
# print_adjust_results4_isadog(result_dic)
print("***** calling adjust_results4_isadog_udacity *****")
adjust_results4_isadog_udacity(result_dic, in_args.dogfile)
# print("***** printing my adjust_results4_isadog_udacity *****")
# print_adjust_results4_isadog(result_dic)
# DONE: 6. Define calculates_results_stats() function to calculate
# results of run and puts statistics in a results statistics
# dictionary (results_stats_dic)
print("***** calculates_results_stats *****")
results_stats_dic = calculates_results_stats(result_dic)
print("***** check_results_stats *****")
check_results_stats(results_stats_dic, result_dic)
# DONE: 7. Define print_results() function to print summary results,
# incorrect classifications of dogs and breeds if requested.
print_results(result_dic, results_stats_dic, in_args.arch,
True, True)
# DONE: 1. Define end_time to measure total program runtime
# by collecting end time
end_time = time()
# DONE: 1. Define tot_time to computes overall runtime in
# seconds & prints it in hh:mm:ss format
tot_time = end_time - start_time
hours = int((tot_time / 3600))
minutes = int(((tot_time % 3600) / 60))
seconds = int(((tot_time % 3600) % 60))
print("\n** Total Elapsed Runtime:", str(hours) +
":" + str(minutes) + ":" + str(seconds))
# TODO: 2.-to-7. Define all the function below. Notice that the input
# paramaters and return values have been left in the function's docstrings.
# This is to provide guidance for acheiving a solution similar to the
# instructor provided solution. Feel free to ignore this guidance as long as
# you are able to acheive the desired outcomes with this lab.
def get_input_args():
"""
Retrieves and parses the command line arguments created and defined using
the argparse module. This function returns these arguments as an
ArgumentParser object.
3 command line arguements are created:
dir - Path to the pet image files(default- 'pet_images/')
arch - CNN model architecture to use for image classification(default-
pick any of the following vgg, alexnet, resnet)
dogfile - Text file that contains all labels associated to dogs(default-
'dognames.txt'
Parameters:
None - simply using argparse module to create & store command line arguments
Returns:
parse_args() -data structure that stores the command line arguments object
"""
parser = argparse.ArgumentParser()
# arg 1 - path to folder with default
parser.add_argument('--dir', type=str, default='pet_images/',
help='path to the folder that contains the images; default is pet_images')
# arg 2 - CNN model architecture to use for image classification
parser.add_argument('--arch', type=str, default='vgg',
help='CNN model to use for image classification; default is vgg')
# arg 3 - file that contains all labels associated to dogs
parser.add_argument('--dogfile', type=str, default='dognames.txt',
help='file that contains all labels associated to dogs;default is dognames.txt')
# Assigns variable in_args to parse_args()
in_args = parser.parse_args()
return in_args
def get_pet_labels(image_dir):
"""
Creates a dictionary of pet labels based upon the filenames of the image
files. Reads in pet filenames and extracts the pet image labels from the
filenames and returns these label as petlabel_dic. This is used to check the accuracy of the image classifier model.
The pet image labels are in all lower letters, have a single space separating each word in the multi-word pet labels, and that they correctly represent the filenames.
Parameters:
image_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
Returns:
petlabels_dic - Dictionary storing image filename (as key) and Pet Image
Labels (as value)
"""
filename_list = listdir(image_dir)
#print("\nPrints 10 filenames from folder ", image_dir)
# for idx in range(0, 10, 1):
#print("%2d file: %-25s" % (idx + 1, filename_list[idx]))
petlabels_dic = dict()
for filename in filename_list:
if filename not in petlabels_dic:
# d['mynewkey'] = 'mynewvalue'
name = filename.split(".")[0]
name = name.replace("_", " ").lower()
final_name = ''.join(
char for char in name if not char.isdigit()).rstrip(" ")
petlabels_dic[filename] = final_name
else:
print("WARNING: ", filename, " already exists in dictionary!")
#udacity solution
# in_files = listdir(image_dir)
# petlabels_dic2 = dict()
# for idx in range(0, len(in_files), 1):
# if in_files[idx][0] != ".": #only for Mac
# image_name = in_files[idx].split("_")
# pet_label = ""
# for word in image_name:
# if word.isalpha():
# pet_label += word.lower() + " "
#
# pet_label = pet_label.strip()
#
# if in_files[idx] not in petlabels_dic2:
# petlabels_dic2[in_files[idx]] = pet_label
#
# else:
# print("Warning: Duplicate files exist in directory",
# in_files[idx])
#
# print("\n PRINTING petlabels_dic2")
# print_petlabels_dict(petlabels_dic2)
return petlabels_dic
def classify_images(images_dir, petlabel_dic, model):
"""
Creates classifier labels with classifier function, compares labels, and
creates a dictionary containing both labels and comparison of them to be
returned.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images in this function.
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
petlabel_dic - Dictionary that contains the pet image(true) labels
that classify what's in the image, where its key is the
pet image filename & its value is pet image label where
label is lowercase with space between each word in label
model - pre-trained CNN whose architecture is indicated by this
parameter,
values must be: resnet alexnet vgg (string)
Returns:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifier labels and 0 = no match between labels
"""
results_dic = {}
for filename in petlabel_dic.keys():
pet_label = petlabel_dic[filename]
path = images_dir + "/" + filename
classifier_label = classifier(path, model)
classifier_label = classifier_label.lower()
# remove leading and trailing whitespaces
classifier_label = classifier_label.strip()
found_index = classifier_label.find(pet_label)
# if found, make sure the pet_label is a whole standalone word within
# the classifier_label and not part of another word
# example: cat can be part of polecat, which is a skunk, and that
# would result in incorrect classification
is_whole_word_match = 0
#if found_index >= 0:
# remove whitespace after comma
# c_label = classifier_label.replace(", ", ",")
# create list from classifier_label
# label_list = c_label.split(",")
# if pet_label in label_list:
# is_whole_word_match = 1
if found_index >= 0:
conda = found_index == 0 and len(pet_label) == len(
classifier_label)
condb = found_index == 0 or classifier_label[
found_index - 1] == " "
condc = found_index + len(pet_label) == len(classifier_label)
condd = classifier_label[found_index + len(pet_label):
found_index + len(pet_label) + 1] in (
",", " ")
if conda or (condb and (condc or condd)):
is_whole_word_match = 1
value_list = [pet_label, classifier_label, is_whole_word_match]
if pet_label not in results_dic:
results_dic[pet_label] = value_list
return results_dic
def classify_images_udacity(images_dir, petlabel_dic, model):
"""
Creates classifier labels with classifier function, compares labels, and
creates a dictionary containing both labels and comparison of them to be
returned.
PLEASE NOTE: This function uses the classifier() function defined in
classifier.py within this function. The proper use of this function is
in test_classifier.py Please refer to this program prior to using the
classifier() function to classify images in this function.
Parameters:
images_dir - The (full) path to the folder of images that are to be
classified by pretrained CNN models (string)
petlabel_dic - Dictionary that contains the pet image(true) labels
that classify what's in the image, where its key is the
pet image filename & its value is pet image label where
label is lowercase with space between each word in label
model - pre-trained CNN whose architecture is indicated by this
parameter,
values must be: resnet alexnet vgg (string)
Returns:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifier labels and 0 = no match between labels
"""
results_dic = dict()
for key in petlabel_dic:
model_label = classifier(images_dir+key, model)
model_label = model_label.lower()
model_label = model_label.strip()
# defines truth as pet image label and tries to find it using find()
# string function to find it within classifier_label(model_label)
truth = petlabel_dic[key]
found = model_label.find(truth)
if found >= 0:
conda = found == 0 and len(truth) == len(
model_label)
condb = found == 0 or model_label[
found - 1] == " "
condc = found + len(truth) == len(model_label)
condd = model_label[found + len(truth):
found + len(truth) + 1] in (
",", " ")
if conda or (condb and (condc or condd)):
if key not in results_dic:
results_dic[key] = [truth, model_label, 1]
# found within a word/term not a label existing on its own
else:
if key not in results_dic:
results_dic[key] = [truth, model_label, 0]
return results_dic
def adjust_results4_isadog(results_dic, dogsfilename):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifier labels and 0 = no match between labels
--- where idx 3 & idx 4 are added by this function ---
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogsfile - A text file that contains names of all dogs from ImageNet
1000 labels (used by classifier model) and dog names from
the pet image files. This file has one dog name per line
dog names are all in lowercase with spaces separating the
distinct words of the dog name. This file should have been
passed in as a command line argument. (string - indicates
text file's name)
Returns:
None - results_dic is mutable data type so no return needed.
"""
# a match between classifier label and pet label match dogsfilename entry
# if pet label in dogsfilename list, idx 3 = 1
# if classifier label in dogsfilename list, idx 4 is 1
dogsname_dic = dict()
try:
with open(dogsfilename) as f:
for line in f:
line = line.rstrip()
if line not in dogsname_dic:
dogsname_dic[line] = 1
else:
print("WARNING: duplicate dog name: " + line)
print("dogsname_dic length = ", len(dogsname_dic))
except BaseException as be:
print("***** ERROR *****")
print(be)
for filename in results_dic:
#pet label image IS of dog/found in dognames_dic
pet_label = results_dic[filename][0]
classifier_label = results_dic[filename][1]
if pet_label in dogsname_dic:
if classifier_label in dogsname_dic:
#if classifier_label in dognames_dic, extend by 1, 1
results_dic[filename].extend((1, 1))
else:
#classifier is not a dog; extend by 1.0
results_dic[filename].extend((1, 0))
else:
if classifier_label in dogsname_dic:
results_dic[filename].extend((0, 1))
else:
results_dic[filename].extend((0, 0))
def adjust_results4_isadog_udacity(results_dic, dogsfile):
"""
Adjusts the results dictionary to determine if classifier correctly
classified images 'as a dog' or 'not a dog' especially when not a match.
Demonstrates if model architecture correctly classifies dog images even if
it gets dog breed wrong (not a match).
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifier labels and 0 = no match between labels
--- where idx 3 & idx 4 are added by this function ---
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
dogsfile - A text file that contains names of all dogs from ImageNet
1000 labels (used by classifier model) and dog names from
the pet image files. This file has one dog name per line
dog names are all in lowercase with spaces separating the
distinct words of the dog name. This file should have been
passed in as a command line argument. (string - indicates
text file's name)
Returns:
None - results_dic is mutable data type so no return needed.
"""
dognames_dic = dict()
with open(dogsfile, "r") as infile:
line = infile.readline()
while line != "":
line = line.rstrip()
if line not in dognames_dic:
dognames_dic[line] = 1
else:
print("Warning: duplicate dognames", line)
line = infile.readline()
for key in results_dic:
if results_dic[key][0] in dognames_dic:
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((1, 1))
else:
results_dic[key].extend((1, 0))
else:
if results_dic[key][1] in dognames_dic:
results_dic[key].extend((0, 1))
else:
results_dic[key].extend((0, 0))
def calculates_results_stats(results_dic):
"""
Calculates statistics of the results of the run using classifier's model
architecture on classifying images. Then puts the results statistics in a
dictionary (results_stats) so that it's returned for printing as to help
the user to determine the 'best' model for classifying images. Note that
the statistics calculated as the results are either percentages or counts.
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifer labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
Returns:
results_stats - Dictionary that contains the results statistics (either a
percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value
"""
#{'Beagle_01141.jpg': ['beagle', 'walker hound, walker foxhound', 0, 1, 1]}
# key = statistic's name (e.g. n_correct_dogs, pct_correct_dogs, n_correct_breed, pct_correct_breed)
# value = statistic's value (e.g. 30, 100%, 24, 80%)
# example_dictionary = {'n_correct_dogs': 30, 'pct_correct_dogs': 100.0, 'n_correct_breed': 24, 'pct_correct_breed': 80.0}
results_stats = dict()
# sets all counters to initial values of zero so they can be incremented
# while processing through the images in results_dic
results_stats['n_dogs_img'] = 0
results_stats['n_match'] = 0
results_stats['n_correct_dogs'] = 0
results_stats['n_correct_notdogs'] = 0
results_stats['n_correct_breed'] = 0
for key in results_dic:
# labels match exactly
if results_dic[key][2] == 1:
results_stats['n_match'] += 1
# pet image label is a dog AND labels match - counts correct breed
if sum(results_dic[key][2:]) == 3:
results_stats['n_correct_breed'] += 1
# pet image label is a dog - counts num dog images
if results_dic[key][3] == 1:
results_stats['n_dogs_img'] += 1
# classifier classifies image as Dog (& pet image is a dog)
# counts number of correct dog classifications
if results_dic[key][4] == 1:
results_stats['n_correct_dogs'] += 1
# pet image label is NOT a dog
else:
# classifier classifies image as NOT a Dog
# (& pet image is NOT a dog)
# counts number of correct dog classifications
if results_dic[key][4] == 0:
results_stats['n_correct_notdogs'] += 1
# calc num total images
results_stats['n_images'] = len(results_dic)
# calc num of not-a-dog images using images & dog images counts
results_stats['n_notdogs_img'] = (results_stats['n_images'] -
results_stats['n_dogs_img'])
# calc % correct matches
results_stats['pct_match'] = (results_stats['n_match'] /
results_stats['n_images']) * 100.0
# calc % correct matches
results_stats['pct_correct_dogs'] = (results_stats['n_correct_dogs'] /
results_stats['n_dogs_img']) * 100.0
# calc % correct breed of dog
results_stats['pct_correct_breed'] = (results_stats['n_correct_breed'] /
results_stats['n_dogs_img']) * 100.0
# calc % correct not-a-dog images
# uses conditional statement for when no 'not a dog' images were submitted
if results_stats['n_notdogs_img'] > 0:
results_stats['pct_correct_notdogs'] = (results_stats[
'n_correct_notdogs'] /
results_stats['n_notdogs_img']) *100.0
else:
results_stats['pct_correct_notdogs'] = 0.0
return results_stats
def print_results(results_dic, results_stats, model,
print_incorrect_dogs = False, print_incorrect_breed = False):
"""
Prints summary results on the classification and then prints incorrectly
classified dogs and incorrectly classified dog breeds if user indicates
they want those printouts (use non-default values)
Parameters:
results_dic - Dictionary with key as image filename and value as a List
(index)idx 0 = pet image label (string)
idx 1 = classifier label (string)
idx 2 = 1/0 (int) where 1 = match between pet image and
classifier labels and 0 = no match between labels
idx 3 = 1/0 (int) where 1 = pet image 'is-a' dog and
0 = pet Image 'is-NOT-a' dog.
idx 4 = 1/0 (int) where 1 = Classifier classifies image
'as-a' dog and 0 = Classifier classifies image
'as-NOT-a' dog.
results_stats - Dictionary that contains the results statistics (either a
percentage or a count) where the key is the statistic's
name (starting with 'pct' for percentage or 'n' for count)
and the value is the statistic's value
model - pretrained CNN whose architecture is indicated by this parameter,
values must be: resnet alexnet vgg (string)
print_incorrect_dogs - True prints incorrectly classified dog images and
False doesn't print anything(default) (bool)
print_incorrect_breed - True prints incorrectly classified dog breeds and
False doesn't print anything(default) (bool)
Returns:
None - simply printing results.
"""
# OLD STRING FORMAT see following link:
# https://docs.python.org/2/library/stdtypes.html#string-formatting
# NEW STRING FORMAT see following link:
# https://docs.python.org/3/library/string.html#format-string-syntax
print("/nResults Summary for Model Architecture: ", model.upper())
print("%20s: %3d" % ("N Images", results_stats['n_images']))
print("%20s: %3d" % ("N Dog Images", results_stats['n_dogs_img']))
print("%20s: %3d" % ("N Not-Dog Images", results_stats['n_notdogs_img']))
# prints summary stats on model run
print(" ")
for key in results_stats:
if key[0] == 'p':
print("%20s: %5.1f" % (key, results_stats[key]))
if (print_incorrect_dogs and
((results_stats['n_correct_dogs'] +
results_stats['n_correct_notdogs'])
!= results_stats['n_images'])):
print("\nINCORRECT Dog/NOT Dog Assignments:")
for key in results_dic:
if sum(results_dic[key][3:]) == 1:
print("Real: {0} Classifier: {1}".format(
results_dic[key][0], results_dic[key][1]))
if (print_incorrect_breed and
(results_stats['n_correct_dogs'] != results_stats[
'n_correct_breed'])):
print("\nINCORRECT Dog Breed Assignment:")
for key in results_dic:
if sum(results_dic[key][3:]) == 2 and results_dic[key][2] == 0:
print("Real: {0} Classifier: {1}".format(
results_dic[key][0], results_dic[key][1]))
def print_result_dic(result_dic):
# temp code to print out result_dic
print("\nprint_result_dic")
print("\nMATCH:")
n_match = 0
n_notmatch = 0
for key in result_dic:
if result_dic[key][2] == 1:
n_match += 1
print("Pet Label: %-26s Classifier Label: %-30s" % (result_dic[
key][0],
result_dic[key][1]))
print("\nNOT A MATCH:")
for key in result_dic:
if result_dic[key][2] == 0:
n_notmatch += 1
print("Pet Label: %-26s Classifier Label: %-30s" % (result_dic[
key][0],
result_dic[key][1]))
print("\n# Total Images:", n_match + n_notmatch, "# Matches:", n_match,
" # NOT MATCH:", n_notmatch)
def print_petlabels_dict(petlabels_dict):
print("petlabels_dict has ", len(petlabels_dict), " key-value pairs. ")
prnt = 0
for key in petlabels_dict:
print("{} key: {} ; value: {}".format((prnt+1), key, petlabels_dict[key]))
prnt += 1
def print_command_line_args(in_args):
print("arg1 --dir: ", in_args.dir, "; arg2 --arch: ", in_args.arch,
"; arg3 --dogfile: ", in_args.dogfile)
def print_adjust_results4_isadog(result_dic):
match = 0
nomatch = 0
print("\nMATCH:")
for key in result_dic:
if result_dic[key][2] == 1:
match += 1
print("Pet Label: %-26s Classifier Label: %-30s PetLabelDog: "
"%1d ClassLabelDog: %1d" % (result_dic[key][0],
result_dic[key][1],
result_dic[key][3],
result_dic[key][4]))
print("\nNOT A MATCH:")
for key in result_dic:
if result_dic[key][2] == 0:
nomatch += 1
print("Pet Label: %-26s Classifier Label: %-30s PetLabelDog: "
"%1d ClassLabelDog: %1d" % (result_dic[key][0],
result_dic[key][1],
result_dic[key][3],
result_dic[key][4]))
print("\n# Total Images:", match + nomatch, "# Matches:", match,
" # NOT MATCH:", nomatch)
def check_results_stats(results_stats, result_dic):
n_images = len(result_dic)
n_pet_dog = 0
n_class_cdog = 0
n_class_cnotd = 0
n_match_breed = 0
for key in result_dic:
if result_dic[key][2] == 1:
if result_dic[key][3] == 1:
n_pet_dog += 1
if result_dic[key][4] == 1:
n_class_cdog += 1
n_match_breed += 1
else:
if result_dic[key][4] == 0:
n_class_cnotd += 1
else:
if result_dic[key][3] == 1:
n_pet_dog += 1
if result_dic[key][4] == 0:
n_class_cnotd += 1
n_pet_notd = n_images - n_pet_dog
pct_corr_dog = (n_class_cdog / n_pet_dog)*100
pct_corr_notdog = (n_class_cnotd / n_pet_notd)*100
pct_corr_breed = (n_match_breed / n_pet_dog)*100
print("\n ** Function's Stats:")
print("N images: %2d N Dog Images: %2d N Not Dog Images: %2d \nPct Corr "
"dog: %5.1f Pct Correct not-a-dog: %5.1f Pct Correct Breed: %5.1f"
% (results_stats['n_images'], results_stats['n_dogs_img'],
results_stats['n_notdogs_img'], results_stats['pct_correct_dogs'],
results_stats['pct_correct_notdogs'], results_stats['pct_correct_breed']))
print("\n ** Check Stats:")
print(
"N images: %2d N Dog Images: %2d N Not Dog Images: %2d \nPet Corr "
"dog: %5.lf Pct Correct not-a-dog: %5.1f Pct Correct Breed: %5.1f"
% (n_images, n_pet_dog, n_pet_notd, pct_corr_dog, pct_corr_notdog,
pct_corr_breed))
# Call to main function to run the program
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class RDMonitorPayload(object):
def __init__(self, dt=None, rt=None, di=None, with_rts=[], st=None, groups=[], mid=None, purl=None, local_path=None):
self.dt = dt
self.rt = rt
self.di = di
self.with_rts = with_rts
self.st = st
self.mid = mid
self.groups = groups
self.purl = purl
self.local_path = local_path
def equals(self, obj):
if self == obj:
return True
if obj is None or getattr(self, '__class__') != getattr(
obj, '__class__'):
return False
other = obj
if other.mid.__eq__(self.mid):
return True
else:
return False
def hash_code(self):
return 0 if self.mid is None else self.mid.__hash__()
def to_json(self):
bf = []
# bf.append("{")
if self.dt is not None:
bf.append("\"dt\":\"" + self.dt + "\"")
if self.rt is not None:
bf.append("\"rt\":\"" + self.rt + "\"")
if self.di is not None:
bf.append("\"di\":\"" + str(self.di) + "\"")
if len(self.with_rts) != 0 :
bf.append("\"with_rts\":" + json.dumps(self.with_rts))
if self.st is not None:
bf.append("\"st\":\"" + str(self.st) + "\"")
if self.mid is not None:
bf.append("\"mid\":\"" + self.mid + "\"")
if len(self.groups) != 0:
bf.append("\"groups\":" + json.dumps(self.groups))
if self.purl is not None:
bf.append("\"purl\":\"" + self.purl + "\"")
# if self.local_path is not None:
# bf.append("\"local_path\":\"" + self.local_path + "\"")
# bf.append("}")
return '{' + ','.join(bf) + '}'
|
python
|
import os
import sys
import hashlib
def e(s):
if type(s) == str:
return s
return s.encode('utf-8')
def d(s):
if type(s) == unicode:
return s
return unicode(s, 'utf-8')
def mkid(s):
return hashlib.sha1(e(s)).hexdigest()[:2*4]
def running_in_virtualenv():
return hasattr(sys, 'real_prefix')
def running_in_tools_labs():
return os.path.exists('/etc/wmflabs-project')
class Logger(object):
def __init__(self):
self._mode = 'INFO'
def progress(self, message):
message = e(message)
if not sys.stderr.isatty():
return
if self._mode == 'PROGRESS':
print >>sys.stderr, '\r',
print >>sys.stderr, message,
self._mode = 'PROGRESS'
def info(self, message):
message = e(message)
if self._mode == 'PROGRESS':
print >>sys.stderr
print >>sys.stderr, message
self._mode = 'INFO'
|
python
|
import unittest
import pprint
import os
from numpy import testing
import invest_natcap.fisheries.fisheries_hst as main
import invest_natcap.fisheries.fisheries_hst_io as io
pp = pprint.PrettyPrinter(indent=4)
workspace_dir = './invest-data/test/data/fisheries/'
data_dir = './invest-data/Fisheries'
inputs_dir = os.path.join(data_dir, 'input/Habitat_Scenario_Tool')
outputs_dir = os.path.join(workspace_dir, 'output')
class TestConvertSurvivalMatrix(unittest.TestCase):
def setUp(self):
self.args = {
'workspace_dir': workspace_dir,
'sexsp': 'No',
'population_csv_uri': os.path.join(
inputs_dir, 'pop_params.csv'),
'habitat_dep_csv_uri': os.path.join(
inputs_dir, 'habitat_dep_params.csv'),
'habitat_chg_csv_uri': os.path.join(
inputs_dir, 'habitat_chg_params.csv'),
'gamma': 0.5,
}
self.check = {
'workspace_dir': workspace_dir,
'sexsp': 'No',
'population_csv_uri': os.path.join(
outputs_dir, 'pop_params_spreadsheet_mod.csv'),
'habitat_dep_csv_uri': os.path.join(
inputs_dir, 'habitat_dep_params.csv'),
'habitat_chg_csv_uri': os.path.join(
inputs_dir, 'habitat_chg_params.csv'),
'gamma': 0.5,
}
def test_convert_spreadsheet(self):
'''
Test an example from the provided spreadsheet
'''
# Fetch pre and post variables
vars_dict = io.fetch_args(self.args)
check = io.fetch_args(self.check)
# Run operation
guess = main.convert_survival_matrix(vars_dict)
# Check for correctness
testing.assert_array_almost_equal(
guess['Surv_nat_xsa_mod'], check['Surv_nat_xsa'])
if __name__ == '__main__':
unittest.main()
|
python
|
"""
Decorators
"""
import sys
from contextlib import contextmanager
import mock
from maya_mock.cmds import MockedCmdsSession
from maya_mock.pymel import MockedPymelSession, MockedPymelNode, MockedPymelPort
@contextmanager
def _patched_sys_modules(data):
"""
Temporary override sys.modules with provided data.
This will take control of the import process.
:param dict data: The data to overrides.
"""
# Hold sys.modules
old_data = {key: sys.modules.get(key) for key in data}
# Patch sys.modules
for key, val in data.items():
sys.modules[key] = val
yield
# Restore sys.modules
for key, val in old_data.item():
if val is None:
sys.modules.pop(key)
else:
sys.modules[key] = val
def _create_cmds_module_mock(cmds):
"""
Create a MagicMock for the cmds module.
"""
kwargs = {"cmds": cmds}
module_maya = mock.MagicMock(**kwargs)
return module_maya
@contextmanager
def mock_cmds(session):
"""
Context that temporary intercept maya.session with our mock.
Use this to run complex maya operations in a mocked env.
Usage:
>>> with mock_cmds(session) as session:
>>> cmds.createNode('transform1')
:param MockedSession session: The session to mock.
:return: A context
:rtype: contextmanager.GeneratorContextManager
"""
cmds = (
session
if isinstance(session, MockedCmdsSession)
else MockedCmdsSession(session)
)
# Prepare sys.modules patch
module_maya = _create_cmds_module_mock(cmds)
new_sys = {"maya": module_maya, "maya.cmds": cmds}
with _patched_sys_modules(new_sys):
yield cmds
def _create_pymel_module_mock(pymel):
"""
Create a pymel module mock from a mocked pymel session.
:param MockedPymelSession pymel: A mocked pymel session
:return: A MagicMock
:rtype: mock.MagicMock
"""
kwargs = {
"core.PyNode": MockedPymelNode,
"core.Attribute": MockedPymelPort,
}
for attr in dir(pymel):
if not attr.startswith("_"):
kwargs["core.{}".format(attr)] = getattr(pymel, attr)
module_pymel = mock.MagicMock(**kwargs)
return module_pymel
@contextmanager
def mock_pymel(session):
"""
Context that temporary intercept maya.cmds with our mock.
Use this to run complex maya operations in a mocked env.
Usage:
>>> with mock_pymel(session) as pymel:
>>> pymel.createNode('transform')
:param MockedPymelSession session: The session to mock.
:return: A context
:rtype: contextmanager.GeneratorContextManager
"""
pymel = (
session
if isinstance(session, MockedPymelSession)
else MockedPymelSession(session)
)
# Prepare sys.modules patch
module_pymel = _create_pymel_module_mock(pymel)
sys_data = {
"pymel": module_pymel,
"pymel.core": module_pymel.core,
"pymel.core.PyNode": module_pymel.core.PyNode,
"pymel.core.Attribute": module_pymel.core.Attribute,
}
with _patched_sys_modules(sys_data):
yield pymel
|
python
|
# -*- coding: utf-8 -*-
from .torsimany import main
main()
|
python
|
from cumulusci.tasks.metadata_etl.base import (
BaseMetadataETLTask,
BaseMetadataSynthesisTask,
BaseMetadataTransformTask,
MetadataSingleEntityTransformTask,
MetadataOperation,
)
from cumulusci.tasks.metadata_etl.duplicate_rules import SetDuplicateRuleStatus
from cumulusci.tasks.metadata_etl.layouts import AddRelatedLists
from cumulusci.tasks.metadata_etl.permissions import AddPermissionSetPermissions
from cumulusci.tasks.metadata_etl.value_sets import AddValueSetEntries
from cumulusci.tasks.metadata_etl.sharing import SetOrgWideDefaults
flake8 = (
BaseMetadataETLTask,
BaseMetadataSynthesisTask,
BaseMetadataTransformTask,
MetadataSingleEntityTransformTask,
AddRelatedLists,
AddPermissionSetPermissions,
AddValueSetEntries,
SetOrgWideDefaults,
MetadataOperation,
SetDuplicateRuleStatus,
)
|
python
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modifications copyright (C) 2021 Immanuel Weber
# derived from https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/callbacks/lr_monitor.py
def get_scheduler_names(schedulers):
names = []
for scheduler in schedulers:
sch = scheduler["scheduler"]
if scheduler["name"] is not None:
name = scheduler["name"]
else:
opt_name = "lr-" + sch.optimizer.__class__.__name__
i, name = 1, opt_name
# Multiple scheduler of the same type
while True:
if name not in names:
break
i, name = i + 1, f"{opt_name}-{i}"
param_groups = sch.optimizer.param_groups
if len(param_groups) != 1:
for i in range(len(param_groups)):
names.append(f"{name}/pg{i + 1}")
else:
names.append(name)
return names
def get_lrs(schedulers, scheduler_names, interval):
latest_stat = {}
for name, scheduler in zip(scheduler_names, schedulers):
if scheduler["interval"] == interval or interval == "any":
opt = scheduler["scheduler"].optimizer
param_groups = opt.param_groups
for i, pg in enumerate(param_groups):
suffix = f"/pg{i + 1}" if len(param_groups) > 1 else ""
lr = {f"{name}{suffix}": pg.get("lr")}
latest_stat.update(lr)
else:
print(f"warning: interval {scheduler['interval']} not supported yet.")
return latest_stat
|
python
|
features = [1,2,3]
prices = [1,2,3]
theta = [0,0]
LEARNING_RATE=0.01
NO_TRAINING_EXAMPLES=len(features)
EPSILON=0.000000001
#Cost function to calculate half the average of the squared errors for the given theta
def cost(features, prices, theta):
sum = 0
for i in range(NO_TRAINING_EXAMPLES):
sum += (predict(features[i],theta)-prices[i])**2
cost = sum/(2*NO_TRAINING_EXAMPLES)
return cost
#prediction function to find the price given the feature and theta
def predict(feature,theta):
y=theta[0]+theta[1]*feature
return y
#gradient descent algorithm to find the value of theta that makes the prediction most accurate
#i.e causing the cost function to be minimum
def gradient_descent(features,prices,theta):
old_cost=cost(features,prices,theta)
while True:
#evaluate the partial derivative for theta0 and theta1
sum0 = 0
sum1 = 0
for i in range(NO_TRAINING_EXAMPLES):
sum0 += (predict(features[i],theta)-prices[i])
sum1 += (predict(features[i], theta) - prices[i]) * features[i]
#update both thetas simultaneously
theta[0] = theta[0] - (LEARNING_RATE / NO_TRAINING_EXAMPLES) * sum0
theta[1] = theta[1] - (LEARNING_RATE / NO_TRAINING_EXAMPLES) * sum1
new_cost=cost(features,prices,theta)
#test for convergence
if abs(old_cost-new_cost) < EPSILON:
break
else:
old_cost=new_cost
return theta
print(gradient_descent(features,prices,theta))
|
python
|
__all__ = ('WeakMap',)
from .docs import has_docs
from .removed_descriptor import RemovedDescriptor
from .weak_core import WeakReferer, add_to_pending_removals
@has_docs
class _WeakMapCallback:
"""
Callback used by ``WeakMap``-s.
Attributes
----------
_parent : ``WeakReferer`` to ``WeakMap``
The parent weak map.
"""
__slots__ = ('_parent', )
@has_docs
def __new__(cls, parent):
"""
Creates a new ``_WeakMapCallback`` bound to the given ``WeakMap``.
Parameters
----------
parent : ``WeakMap``
The parent weak map.
"""
parent = WeakReferer(parent)
self = object.__new__(cls)
self._parent = parent
return self
@has_docs
def __call__(self, reference):
"""
Called when an element of the respective weak map is garbage collected.
Parameters
----------
reference : ``WeakReferer``
Weakreference to the respective object.
"""
parent = self._parent()
if parent is None:
return
if parent._iterating:
add_to_pending_removals(parent, reference)
else:
try:
dict.__delitem__(parent, reference)
except KeyError:
pass
@has_docs
class WeakMap(dict):
"""
Weak map is a mix of weak dictionaries and weak sets. Can be used to retrieve an already existing weakreferenced
value from itself.
Attributes
----------
_pending_removals : `None`, `set` of ``WeakReferer``
Pending removals of the weak map if applicable.
_iterating : `int`
Whether the weak map is iterating and how much times.
_callback : ``_WeakMapCallback``
Callback added to the ``WeakMap``'s weak keys.
Class Attributes
----------------
MAX_REPR_ELEMENT_LIMIT : `int` = `50`
The maximal amount of items to render by ``.__repr__``.
Notes
-----
``WeakMap``-s are weakreferable.
"""
__slots__ = ('__weakref__', '_pending_removals', '_iterating', '_callback')
MAX_REPR_ELEMENT_LIMIT = 50
@has_docs
def _commit_removals(self):
"""
Commits the pending removals of the weak map if applicable.
"""
if self._iterating:
return
pending_removals = self._pending_removals
if pending_removals is None:
return
for reference in pending_removals:
try:
dict.__delitem__(self, reference)
except KeyError:
pass
self._pending_removals = None
# __class__ -> same
@has_docs
def __contains__(self, key):
"""Returns whether the weak map contains the given key."""
try:
reference = WeakReferer(key)
except TypeError:
return False
return dict.__contains__(self, reference)
# __delattr__ -> same
@has_docs
def __delitem__(self, key):
"""Deletes the given key from the weak map"""
try:
reference = WeakReferer(key)
except TypeError:
raise KeyError(key) from None
try:
dict.__delitem__(self, reference)
except KeyError as err:
raise KeyError(key) from None
# __dir__ -> same
# __doc__ -> same
def __eq__(self, other):
"""returns whether the two weak maps are equal."""
if isinstance(other, type(self)):
return dict.__eq__(self, other)
if isinstance(other, set):
pass
elif hasattr(type(other), '__iter__'):
other = set(other)
else:
return NotImplemented
self_set = set(iter(self))
return self_set == other
# __format__ -> same
# __ge__ -> same
# __getattribute__ -> same
@has_docs
def __getitem__(self, key):
"""Gets the already existing key from the weak map, which matches the given one."""
try:
reference = WeakReferer(key)
except TypeError:
raise KeyError(key) from None
reference = dict.__getitem__(self, reference)
key = reference()
if (key is None):
if self._iterating:
add_to_pending_removals(self, reference)
else:
dict.__delitem__(self, reference)
raise KeyError(key)
return key
# __gt__ -> same
# __hash__ -> same
@has_docs
def __init__(self, iterable=None):
"""
Creates a new ``WeakMap`` from the given iterable.
Parameters
----------
iterable : `None`, `iterable` = `None`, Optional
Iterable to update the created map with.
"""
self._pending_removals = None
self._iterating = 0
self._callback = _WeakMapCallback(self)
if (iterable is not None):
self.update(iterable)
# __init_subclass__ -> same
@has_docs
def __iter__(self):
"""
Iterates over the weak map's elements.
This method is an iterable generator,
"""
self._iterating += 1
try:
for reference in dict.__iter__(self):
key = reference()
if (key is None):
add_to_pending_removals(self, reference)
continue
yield key
continue
finally:
self._iterating -= 1
self._commit_removals()
# __le__ -> same
@has_docs
def __len__(self):
"""Returns the length of the weak map."""
length = dict.__len__(self)
pending_removals = self._pending_removals
if (pending_removals is not None):
length -= len(pending_removals)
return length
# __lt__ -> same
def __ne__(self, other):
"""returns whether the two weak maps are equal."""
if isinstance(other, type(self)):
return dict.__ne__(self, other)
if isinstance(other, set):
pass
elif hasattr(type(other), '__iter__'):
other = set(other)
else:
return NotImplemented
self_set = set(iter(self))
return self_set != other
# __new__ -> same
@has_docs
def __reduce__(self):
"""Reduces the map to a picklable object."""
return (type(self), list(self))
@has_docs
def __reduce_ex__(self, version):
"""Reduces the map to a picklable object."""
return type(self).__reduce__(self)
@has_docs
def __repr__(self):
"""Returns the weak map's representation."""
result = [self.__class__.__name__, '({']
if len(self):
limit = self.MAX_REPR_ELEMENT_LIMIT
collected = 0
for reference in dict.__iter__(self):
key = reference()
if (key is None):
add_to_pending_removals(self, reference)
continue
result.append(repr(key))
result.append(', ')
collected +=1
if collected != limit:
continue
leftover = len(self) - collected
if leftover:
result.append('...}, ')
result.append(str(leftover))
result.append(' truncated)')
else:
result[-1] = '})'
break
else:
result[-1] = '})'
self._commit_removals()
else:
result.append('})')
return ''.join(result)
# __setattr__ -> same
__setitem__ = RemovedDescriptor()
# __sizeof__ -> same
__str__ = __repr__
# __subclasshook__ -> same
@has_docs
def clear(self):
"""
Clear's the weak map.
"""
dict.clear(self)
self._pending_removals = None
@has_docs
def copy(self):
"""
Copies the weak map.
Returns
-------
new : ``WeakMap``
"""
new = dict.__new__(type(self))
new._iterating = 0
new._pending_removals = None
new._callback = callback = _WeakMapCallback(new)
for reference in dict.__iter__(self):
key = reference()
if (key is None):
add_to_pending_removals(self, reference)
continue
reference = WeakReferer(key, callback)
dict.__setitem__(new, reference, reference)
continue
self._commit_removals()
return new
@has_docs
def get(self, key, default=None):
"""
Gets the key of the weak map, which matches the given one.
Parameters
----------
key : `Any`
A key to match.
default : `Any` = `None`, Optional
Default value to return if the given `key` could not be matched.
Returns
-------
real_key : `Any`, `default`
The matched key. If no key was matched returns the `default` value.
"""
try:
reference = WeakReferer(key)
except TypeError:
return default
real_reference = dict.get(self, reference, reference)
if real_reference is reference:
return default
real_key = real_reference()
if (real_key is not None):
return real_key
if self._iterating:
add_to_pending_removals(self, real_reference)
else:
dict.__delitem__(self, real_reference)
return default
items = RemovedDescriptor()
keys = RemovedDescriptor()
@has_docs
def pop(self, key, default=...):
"""
Pops a key from the weak map which matches the given one.
Parameters
----------
key : `Any`
A key to match.
default : `Any`, Optional
Default value to return if the given `key` could not be matched.
Returns
-------
real_key : `Any`, `default`
The matched key. If no key was matched and `default` value is given, then returns that.
Raises
------
KeyError
If `key` could not be matched and `default` value is was not given either.
"""
try:
reference = WeakReferer(key)
except TypeError:
pass
else:
real_reference = dict.pop(self, reference, ...)
if (real_reference is not ...):
real_key = real_reference()
if (real_key is not None):
return real_key
if self._iterating:
add_to_pending_removals(self, real_reference)
else:
dict.__delitem__(self, real_reference)
if default is ...:
raise KeyError(key)
return default
popitem = RemovedDescriptor()
setdefault = RemovedDescriptor()
@has_docs
def update(self, iterable):
"""
Updates the map with the given iterable.
Parameters
----------
iterable : `iterable`
The iterable to update the map with.
Raises
------
TypeError
If the given value is not `iterable`, or any of it's elements is not weakreferable.
"""
if hasattr(type(iterable), '__iter__'):
# Make sure, we have unique elements, so convert other to set
for element in iterable:
self.set(element)
else:
raise TypeError(
f'Parameter `iterable` must be an iterable, got {iterable.__class__.__name__}; {iterable!r}.'
)
values = RemovedDescriptor()
@has_docs
def set(self, key):
"""
Sets a key to the ``WeakMap`` and then returns it. If they given key is already present in the ``WeakMap``,
returns that instead.
Parameters
----------
key : `Any`
A key to match.
Returns
-------
real_key : `Any`
The matched key, or the given one.
Raises
------
TypeError
If `key` not supports weakreferencing.
"""
reference = WeakReferer(key, self._callback)
real_reference = dict.get(self, reference, None)
if (real_reference is not None):
real_key = real_reference()
if (real_key is not None):
return real_key
dict.__setitem__(self, reference, reference)
return key
|
python
|
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from database import get_data_cache, get_all_food_data
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', '/assets/style.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
colors = ['#000000', '#FC6D41', '#274228', '#274228', '#7FB800', '#955E42', '#000000', '#F0A202', '#706C61', '#65743A']
|
python
|
# Generated by Django 3.1.7 on 2021-04-07 15:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Rubric',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=20, unique=True, verbose_name='ะะฐะทะฒะฐะฝะธะต')),
('order', models.SmallIntegerField(db_index=True, default=0, verbose_name='ะะพััะดะพะบ')),
],
),
migrations.CreateModel(
name='SubRubric',
fields=[
],
options={
'verbose_name': 'ะะพะดััะฑัะธะบะฐ',
'verbose_name_plural': 'ะะพะดััะฑัะธะบะธ',
'ordering': ('super_rubric__order', 'super_rubric__name', 'order', 'name'),
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('main.rubric',),
),
migrations.CreateModel(
name='SuperRubric',
fields=[
],
options={
'verbose_name': 'ะะฐะดััะฑัะธะบะฐ',
'verbose_name_plural': 'ะะฐะดััะฑัะธะบะธ',
'ordering': ('order', 'name'),
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('main.rubric',),
),
migrations.AddField(
model_name='rubric',
name='super_rubric',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='main.superrubric', verbose_name='ะะฐะดััะฑัะธะบะฐ'),
),
]
|
python
|
# based on django-markdownify
# https://github.com/erwinmatijsen/django-markdownify
# https://django-markdownify.readthedocs.io/en/latest/settings.html
from functools import partial
from django import template
from django.conf import settings
import bleach
from markdownx.utils import markdownify as markdownx_markdownify
def markdownify(value):
# Get the settings or set defaults if not set
# Bleach settings
whitelist_tags = getattr(settings, 'MARKDOWNX_WHITELIST_TAGS', bleach.sanitizer.ALLOWED_TAGS)
whitelist_attrs = getattr(settings, 'MARKDOWNX_WHITELIST_ATTRS', bleach.sanitizer.ALLOWED_ATTRIBUTES)
whitelist_styles = getattr(settings, 'MARKDOWNX_WHITELIST_STYLES', bleach.sanitizer.ALLOWED_STYLES)
whitelist_protocols = getattr(settings, 'MARKDOWNX_WHITELIST_PROTOCOLS', bleach.sanitizer.ALLOWED_PROTOCOLS)
# Markdown settings
strip = getattr(settings, 'MARKDOWNX_STRIP', True)
# Bleach Linkify
linkify = None
linkify_text = getattr(settings, 'MARKDOWNX_LINKIFY_TEXT', True)
if linkify_text:
linkify_parse_email = getattr(settings, 'MARKDOWNX_LINKIFY_PARSE_EMAIL', False)
linkify_callbacks = getattr(settings, 'MARKDOWNX_LINKIFY_CALLBACKS', None)
linkify_skip_tags = getattr(settings, 'MARKDOWNX_LINKIFY_SKIP_TAGS', None)
linkifyfilter = bleach.linkifier.LinkifyFilter
linkify = [partial(linkifyfilter,
callbacks=linkify_callbacks,
skip_tags=linkify_skip_tags,
parse_email=linkify_parse_email
)]
# Convert markdown to html
html = markdownx_markdownify(value) #.replace('&', '&')
# Sanitize html if wanted
if getattr(settings, 'MARKDOWNX_BLEACH', True):
cleaner = bleach.Cleaner(tags=whitelist_tags,
attributes=whitelist_attrs,
styles=whitelist_styles,
protocols=whitelist_protocols,
strip=strip,
filters=linkify,
)
html = cleaner.clean(html)
return html
|
python
|
"""
Artificial Images Simulator
============================
The Artificial Images Simulator (AIS) class was developed to generate
artificial star images, similar to those images that would be acquired by
using the acquisition system of the instrument. To accomplish this,
the AIS models as star flux as a 2D gaussian distribution. Then, the star
flux is added to an image with a background level given by counts distribution
of an image of the SPARC4 cameras, as a function of its operation mode.
"""
import openpyxl
import astropy.io.fits as fits
from PSF import Point_Spread_Function
from BGI import Background_Image
from HDR import Header
from CHC import (Concrete_Channel_1,
Concrete_Channel_2,
Concrete_Channel_3,
Concrete_Channel_4)
class Artificial_Image_Simulator:
"""Create an image cube with the star flux distribution.
Parameters
----------
star_magitude : float
Magnitude of the star
sky_magnitude: float
Magnitude of the sky
gaussian_stddev: int
Number of pixels of the gaussian standard deviation
ccd_operation_mode: dictionary
A python dictionary with the CCD operation mode. The allowed keywords
values for the dictionary are
* em_mode: {0, 1}
Use the 0 for the Conventional Mode and 1 for the EM Mode
* em_gain: float
Electron Multiplying gain
* preamp: {1, 2}
Pre-amplification
* hss: {0.1, 1, 10, 20, 30}
Horizontal Shift Speed (readout rate) in MHz
* bin: int
Number of the binned pixels
* t_exp: float
Exposure time in seconds
ccd_temp: float, optional
CCD temperature
serial_number: {9914, 9915, 9916 or 9917}, optional
CCD serial number
bias_level: int, optional
Bias level, in ADU, of the image
image_dir: str, optional
Directory where the image should be saved
Yields
------
image cube: array like
An image cube in the FITS format with the star flux distribution
Notes
-----
Explicar o cรณdigo; background; passo-a-passo
Examples
--------
Incluir exemplos
References
----------
.. [#Bernardes_2018] Bernardes, D. V., Martioli, E., and Rodrigues, C. V., โCharacterization of the SPARC4 CCDsโ, <i>Publications of the Astronomical Society of the Pacific</i>, vol. 130, no. 991, p. 95002, 2018. doi:10.1088/1538-3873/aacb1e.
"""
def __init__(self,
star_magnitude,
sky_magnitude,
gaussian_std,
ccd_operation_mode,
channel,
bias_level=500,
image_dir=''):
"""Initialize the class."""
if type(star_magnitude) not in [int, float]:
raise ValueError('The star flux must be a number: '
+ f'{star_magnitude}')
elif star_magnitude <= 0:
raise ValueError(
f'The star flux must be greater than zero: {star_magnitude}')
else:
self.star_magnitude = star_magnitude
if type(sky_magnitude) not in [int, float]:
raise ValueError(f'The sky flux must be a number: {sky_magnitude}')
elif sky_magnitude <= 0:
raise ValueError(
f'The sky flux must be greater than zero: {sky_magnitude}')
else:
self.sky_magnitude = sky_magnitude
if type(gaussian_std) is not int:
raise ValueError(
f'The gaussian standard deviation must be \
an integer: {gaussian_std}')
elif gaussian_std <= 0:
raise ValueError(
f'The gaussian standard deviation must be greater \
than zero: {gaussian_std}')
else:
self.gaussian_std = gaussian_std
if channel in [1, 2, 3, 4]:
self.channel = channel
else:
raise ValueError(
'There is no camera with the provided'
+ f'serial number: {channel}')
if type(bias_level) is not int:
raise ValueError(
f'The bias level must be an integer: {bias_level}')
elif bias_level <= 0:
raise ValueError(f'The bias level must be positive: {bias_level}')
else:
self.bias_level = bias_level
if type(image_dir) is not str:
raise ValueError(
f'The directory path must be a string: {image_dir}')
else:
if image_dir != '':
if '/' not in image_dir[-1]:
image_dir += '/'
self.image_dir = image_dir
self._verify_ccd_operation_mode(ccd_operation_mode)
self._configure_gain(ccd_operation_mode)
self._configure_image_name(ccd_operation_mode)
CHC = 0
if channel == 1:
CHC = Concrete_Channel_1(ccd_operation_mode['ccd_temp'],
sparc4_acquisition_mode='phot')
elif channel == 2:
CHC = Concrete_Channel_2(ccd_operation_mode['ccd_temp'],
sparc4_acquisition_mode='phot')
elif channel == 3:
CHC = Concrete_Channel_3(ccd_operation_mode['ccd_temp'],
sparc4_acquisition_mode='phot')
elif channel == 4:
CHC = Concrete_Channel_4(ccd_operation_mode['ccd_temp'],
sparc4_acquisition_mode='phot')
self.CHC = CHC
self.PSF = Point_Spread_Function(
CHC, ccd_operation_mode, self.ccd_gain, self.gaussian_std)
self.BGI = Background_Image(CHC, ccd_operation_mode, self.ccd_gain,
self.bias_level)
self.HDR = Header(ccd_operation_mode, self.ccd_gain,
CHC.get_serial_number())
def _verify_ccd_operation_mode(self, ccd_operation_mode):
"""Verify if the provided CCD operation mode is correct."""
em_mode = ccd_operation_mode['em_mode']
em_gain = ccd_operation_mode['em_gain']
hss = ccd_operation_mode['hss']
preamp = ccd_operation_mode['preamp']
binn = ccd_operation_mode['binn']
t_exp = ccd_operation_mode['t_exp']
ccd_temp = ccd_operation_mode['ccd_temp']
dic_keywords_list = [
'binn', 'ccd_temp', 'em_gain', 'em_mode', 'hss', 'preamp', 't_exp']
for key in ccd_operation_mode.keys():
if key not in dic_keywords_list:
raise ValueError(
f'The name provided is not a CCD parameter: {key}')
if list(ccd_operation_mode.keys()).sort() != dic_keywords_list.sort():
raise ValueError(
'There is a missing parameter of the CCD operation mode')
if em_mode not in [0, 1]:
raise ValueError(
f'Invalid value for the EM mode: {em_mode}')
if em_mode == 0:
if em_gain != 1:
raise ValueError(
'The EM Gain must be 1 for the Conventional'
+ f' Mode: {em_gain}')
else:
if em_gain not in [float, int]:
raise ValueError(
f'The EM gain must be a number: {em_gain}')
elif em_gain < 2 or em_gain > 300:
raise ValueError(
f'EM gain out of range [2, 300]: {em_gain}')
if preamp not in [1, 2]:
raise ValueError(
f'Invalid value for the pre-amplification: {preamp}')
if hss not in [0.1, 1, 10, 20, 30]:
raise ValueError(
f'Invalid value for the Readout rate: {hss}')
if binn not in [1, 2]:
raise ValueError(
f'Invalid value for the binning: {bin}')
if type(t_exp) not in [float, int]:
raise ValueError(
f'The exposure time must be a number: {t_exp}')
elif ccd_operation_mode['t_exp'] < 1e-5:
raise ValueError(
f'Invalid value for the exposure time: {t_exp}')
if type(ccd_temp) not in [float, int]:
raise ValueError(
f'The CCD temperature must be a number: {ccd_temp}')
if ccd_temp < -80 or ccd_temp > 20:
raise ValueError(
f'CCD temperature out of range [-80, 20]: {ccd_temp}')
def get_channel_ID(self):
"""Return the ID for the respective SPARC4 channel."""
return self.CHC.get_channel_ID()
def _configure_image_name(self, ccd_operation_mode,
include_star_mag=False):
"""Create the image name.
The image name will be created based on the provided information
Parameters
----------
include_star_flux: bool, optional
Indicate if it is needed to include the star flux value in the
image name
"""
dic = ccd_operation_mode
em_gain = '_G' + str(dic['em_gain'])
em_mode = 'CONV'
if dic['em_mode'] == 1:
em_mode = 'EM'
hss = '_HSS' + str(dic['hss'])
preamp = '_PA' + str(dic['preamp'])
binn = '_B' + str(dic['binn'])
t_exp = '_TEXP' + str(dic['t_exp'])
self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain
if include_star_mag:
star_flux = '_S' + str(self.star_magnitude)
self.image_name += star_flux
def _configure_gain(self, ccd_operation_mode):
"""Configure the CCD gain based on its operation mode."""
em_mode = ccd_operation_mode['em_mode']
hss = ccd_operation_mode['hss']
preamp = ccd_operation_mode['preamp']
tab_index = 0
if hss == 0.1:
tab_index = 23
elif hss == 1:
tab_index = 19
if em_mode == 1:
tab_index = 15
elif hss == 10:
tab_index = 11
elif hss == 20:
tab_index = 7
elif hss == 30:
tab_index = 3
else:
raise ValueError('Unexpected value for the readout rate: {hss}')
if preamp == 2:
tab_index += 2
spreadsheet = openpyxl.load_workbook(
f'code/RNC/spreadsheet/Channel {self.channel}'
+ '/Read_noise_and_gain_values.xlsx').active
self.ccd_gain = spreadsheet.cell(tab_index, 5).value
def create_artificial_image(self):
"""Create the artificial star image.
This function will sum the background image with the star SPF image
to create an artificil image, similar to those acquired by the
SPARC4 cameras.
Returns
-------
Star Image:
A FITS file with the calculated artificial image
"""
background = self.BGI.create_background_image()
star_PSF = self.PSF.create_star_PSF()
header = self.HDR.create_header()
fits.writeto(self.image_dir + self.image_name + '.fits',
background + star_PSF, overwrite=True, header=header)
|
python
|
from rest_framework import viewsets
from .models import RESP, REGONError, REGON, JSTConnection, Institution, ESP
from .serializers import RESPSerializer, REGONSerializer, REGONErrorSerializer, JSTConnectionSerializer, \
InstitutionSerializer, ESPSerializer
class InstitutionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Institution.objects.prefetch_related('esp_set', 'regon_data__regonerror_set').\
select_related('jstconnection', 'regon_data', 'resp').all()
serializer_class = InstitutionSerializer
class ESPViewSet(viewsets.ReadOnlyModelViewSet):
queryset = ESP.objects.select_related('institution').all()
serializer_class = ESPSerializer
class RESPViewSet(viewsets.ReadOnlyModelViewSet):
queryset = RESP.objects.select_related('institution').all()
serializer_class = RESPSerializer
class REGONViewSet(viewsets.ReadOnlyModelViewSet):
queryset = REGON.objects.prefetch_related('regonerror_set').select_related('institution').all()
serializer_class = REGONSerializer
class REGONErrorViewSet(viewsets.ReadOnlyModelViewSet):
queryset = REGONError.objects.select_related('regon').all()
serializer_class = REGONErrorSerializer
class JSTConnectionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = JSTConnection.objects.select_related('institution', 'jst').all()
serializer_class = JSTConnectionSerializer
|
python
|
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlEleAansluitvermogen(KeuzelijstField):
"""Keuzelijst met gangbare waarden voor elektrisch aansluitvermogen."""
naam = 'KlEleAansluitvermogen'
label = 'Elektrisch aansluitvermogen'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlEleAansluitvermogen'
definition = 'Keuzelijst met gangbare waarden voor elektrisch aansluitvermogen.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlEleAansluitvermogen'
options = {
'16A-230Vdriefasig-6.4kVA': KeuzelijstWaarde(invulwaarde='16A-230Vdriefasig-6.4kVA',
label='16A 230Vdriefasig-6.4kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-230Vdriefasig-6.4kVA'),
'16A-230Veenfasig-3.7kVA': KeuzelijstWaarde(invulwaarde='16A-230Veenfasig-3.7kVA',
label='16A 230Veenfasig-3.7kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-230Veenfasig-3.7kVA'),
'16A-400Vdriefasig-11.1kVA': KeuzelijstWaarde(invulwaarde='16A-400Vdriefasig-11.1kVA',
label='16A 400Vdriefasig-11.1kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/16A-400Vdriefasig-11.1kVA'),
'20A-230Vdriefasig-8kVA': KeuzelijstWaarde(invulwaarde='20A-230Vdriefasig-8kVA',
label='20A 230Vdriefasig-8kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-230Vdriefasig-8kVA'),
'20A-230Veenfasig-4.6kVA': KeuzelijstWaarde(invulwaarde='20A-230Veenfasig-4.6kVA',
label='20A 230Veenfasig-4.6kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-230Veenfasig-4.6kVA'),
'20A-400Vdriefasig-13.9kVA': KeuzelijstWaarde(invulwaarde='20A-400Vdriefasig-13.9kVA',
label='20A 400Vdriefasig-13.9kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/20A-400Vdriefasig-13.9kVA'),
'25A-230Vdriefasig-10kVA': KeuzelijstWaarde(invulwaarde='25A-230Vdriefasig-10kVA',
label='25A 230Vdriefasig-10kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-230Vdriefasig-10kVA'),
'25A-230Veenfasig-5.8kVA': KeuzelijstWaarde(invulwaarde='25A-230Veenfasig-5.8kVA',
label='25A 230Veenfasig-5.8kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-230Veenfasig-5.8kVA'),
'25A-400Vdriefasig-17.3kVA': KeuzelijstWaarde(invulwaarde='25A-400Vdriefasig-17.3kVA',
label='25A 400Vdriefasig-17.3kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/25A-400Vdriefasig-17.3kVA'),
'32A-230Vdriefasig-12.7kVA': KeuzelijstWaarde(invulwaarde='32A-230Vdriefasig-12.7kVA',
label='32A 230Vdriefasig-12.7kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-230Vdriefasig-12.7kVA'),
'32A-230Veenfasig-7.4kVA': KeuzelijstWaarde(invulwaarde='32A-230Veenfasig-7.4kVA',
label='32A 230Veenfasig-7.4kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-230Veenfasig-7.4kVA'),
'32A-400Vdriefasig-22.2kVA': KeuzelijstWaarde(invulwaarde='32A-400Vdriefasig-22.2kVA',
label='32A 400Vdriefasig-22.2kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/32A-400Vdriefasig-22.2kVA'),
'40A-230Vdriefasig-15.9kVA': KeuzelijstWaarde(invulwaarde='40A-230Vdriefasig-15.9kVA',
label='40A 230Vdriefasig-15.9kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-230Vdriefasig-15.9kVA'),
'40A-230Veenfasig-9.2kVA': KeuzelijstWaarde(invulwaarde='40A-230Veenfasig-9.2kVA',
label='40A 230Veenfasig-9.2kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-230Veenfasig-9.2kVA'),
'40A-400Vdriefasig-27.7kVA': KeuzelijstWaarde(invulwaarde='40A-400Vdriefasig-27.7kVA',
label='40A 400Vdriefasig-27.7kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/40A-400Vdriefasig-27.7kVA'),
'50A-230Vdriefasig-19.9kVA': KeuzelijstWaarde(invulwaarde='50A-230Vdriefasig-19.9kVA',
label='50A 230Vdriefasig-19.9kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-230Vdriefasig-19.9kVA'),
'50A-230Veenfasig-11.5kVA': KeuzelijstWaarde(invulwaarde='50A-230Veenfasig-11.5kVA',
label='50A 230Veenfasig-11.5kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-230Veenfasig-11.5kVA'),
'50A-400Vdriefasig-34.6kVA': KeuzelijstWaarde(invulwaarde='50A-400Vdriefasig-34.6kVA',
label='50A 400Vdriefasig-34.6kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/50A-400Vdriefasig-34.6kVA'),
'63A-230Vdriefasig-25.1kVA': KeuzelijstWaarde(invulwaarde='63A-230Vdriefasig-25.1kVA',
label='63A 230Vdriefasig-25.1kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-230Vdriefasig-25.1kVA'),
'63A-230Veenfasig-14.5kVA': KeuzelijstWaarde(invulwaarde='63A-230Veenfasig-14.5kVA',
label='63A 230Veenfasig-14.5kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-230Veenfasig-14.5kVA'),
'63A-400Vdriefasig-43.6kVA': KeuzelijstWaarde(invulwaarde='63A-400Vdriefasig-43.6kVA',
label='63A 400Vdriefasig-43.6kVA',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlEleAansluitvermogen/63A-400Vdriefasig-43.6kVA')
}
|
python
|
# Generated by Django 2.0.2 on 2018-04-26 17:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20180406_1917'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='img_src',
field=models.ImageField(blank=True, default='sampleavatar.png', upload_to=''),
),
]
|
python
|
# -*- coding: utf-8 -*-
'''
:file: score.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/20 20:06:29
'''
# cjcx/cjcx_cxDgXscj.html?doType=query&gnmkdm=N305005&su=2018133209
from school_sdk.client.api import BaseCrawler
class Score(BaseCrawler):
def __init__(self, user_client) -> None:
super().__init__(user_client)
self.endpoints: dict = self.school.config['url_endpoints']
self.raw_score = None
self.score_dict:dict = {}
self.score_list:list = []
def get_score(self, **kwargs):
return self.get_score_dict(**kwargs)
def get_score_list(self, **kwargs):
"""่ทๅๆ็ปฉๆธ
ๅ-ๅ่กจ
Returns:
list: ๆ็ปฉๅ่กจ
"""
if not self.score_list:
self.parse(**kwargs)
return self.score_list
def get_score_dict(self, **kwargs):
"""่ทๅๆ็ปฉๆธ
ๅ-ๅญๅ
ธ
Returns:
dict: ๆ็ปฉๅญๅ
ธๆธ
ๅ
"""
if not self.score_dict:
self.parse(**kwargs)
return self.score_dict
def parse(self, **kwargs):
"""่งฃๆๆฐๆฎ
"""
if self.raw_score is None:
self.load_score(**kwargs)
self._parse(self.raw_score)
def load_score(self, **kwargs) -> None:
"""ๅ ่ฝฝ่ฏพ่กจ
"""
self.raw_score = self._get_score(**kwargs)
def _get_score(self, year: int, term: int = 1, **kwargs):
"""่ทๅๆๅก็ณป็ปๆ็ปฉ
Args:
year (int): ๅญฆๅนด
term (int, optional): ๅญฆๆ. Defaults to 1.
Returns:
json: jsonๆฐๆฎ
"""
url = self.endpoints['SCORE']['API']
params = {
'doType': 'query',
'gnmkdm': 'N305005',
'su': self.account
}
data = {
'xnm': year,
'xqm': self.TERM.get(term, 3),
'_search': False,
'nd': self.t,
'queryModel.showCount': 500,
'queryModel.currentPage': 1,
'queryModel.sortName': None,
'queryModel.sortOrder': 'asc',
'time': 4,
}
res = self.post(url=url, params=params, data=data, **kwargs)
return res.json()
def _parse(self, raw: dict):
# kcmc -> ่ฏพ็จๅ็งฐ # kcxzmc -> ่ฏพ็จๆง่ดจๅ็งฐ # kcbj -> ่ฏพ็จๆ ่ฎฐ # jsxm -> ๆๅธๅงๅ
# khfsmc -> ่ๆ ธๆนๅผ # ksxz -> ่่ฏๆง่ดจ # xf -> ๅญฆๅ # kkbmmc -> ๅผ่ฏพ้จ้จๅ็งฐ # cj -> ๆ็ปฉ
# njdm_id -> ๅนด็บงไปฃ็
"""่งฃๆๆๅก็ณป็ปๆ็ปฉ
Args:
raw (dict): ๆๅก็ณป็ป็ๅๅงๆฐๆฎ
"""
items = raw.get('items')
for item in items:
format_item = {
"course_name": item.get('kcmc'),
'course_nature': item.get('kcxzmc'),
'course_target': item.get('kcbj'),
'teacher': item.get('jsxm'),
'exam_method': item.get('khfsmc'),
'exam_nature': item.get('ksxz'),
'exam_result': item.get('cj'),
'credit': item.get('xf'),
'course_group': item.get('kkbmmc'),
'grade': item.get('njdm_id')
}
self.score_list.append(format_item)
self.score_dict.setdefault(item.get('kcmc'), format_item)
|
python
|
# --------------------------------------------------------
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License
# --------------------------------------------------------
import os
import random
from PIL import Image
from PIL import ImageFile
from torch.utils.data import Dataset
from .transforms import transform_train, transform_test
ImageFile.LOAD_TRUNCATED_IMAGES = True
class CommonDataset(Dataset):
def __init__(self, is_train: bool = True):
self.data = []
self.domain_id = []
self.image_root = ''
self.transform = transform_train() if is_train else transform_test()
self._domains = None
self.num_domain = 1
@property
def domains(self):
return self._domains
def __getitem__(self, index):
# domain = random.randint(0, self.num_domain - 1)
# path, label = self.data[domain][index]
domain = self.domain_id[index]
path, label = self.data[index]
path = os.path.join(self.image_root, path)
with Image.open(path) as image:
image = image.convert('RGB')
if self.transform is not None:
image = self.transform(image)
return {
'image': image,
'label': label,
'domain': domain
}
def __len__(self):
pass
|
python
|
#
""""""
from os import getpid
import logging.handlers
from .snippet import T2I
def mapped_level(name):
levels = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'fatal': logging.FATAL}
return levels[name] if name in levels else logging.WARNING;
def mapped_when(name):
when = ['S', 'M', 'H', 'D']
return name if name in when else when[-1]
def mapped_backup_count(name):
return T2I(name, default=7)
def mapped_interval(name):
return T2I(name, 1)
def init_logging_parameters(**kwargs):
logger = logging.getLogger()
filename = kwargs.get('log.file', 'default.{}.log'.format(getpid()))
level = mapped_level(kwargs.get('log.level', 'warning'))
backup = mapped_backup_count(kwargs.get('log.backup', 7))
when = mapped_when(kwargs.get('log.when', 'D').upper())
interval = mapped_interval(kwargs.get('log.interval', 1))
handler = logging.handlers.TimedRotatingFileHandler(filename, backupCount = backup, when = when, interval = interval)
formatter = logging.Formatter('[%(asctime)s]+%(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(handler)
|
python
|
"""
October 2018
Simulations of a Ramsey experiment in the presence of flux 1/f noise
"""
import time
import numpy as np
import qutip as qtp
from pycqed.measurement import detector_functions as det
from scipy.interpolate import interp1d
import scipy
import matplotlib.pyplot as plt
import logging
from pycqed.simulations import cz_superoperator_simulation_withdistortions_newdevice_singlequbitphases_newcode_fluxnoise2 as czu
from pycqed.tests import test_ramsey_simulations as tests
def time_evolution(H_vec, c_ops, sim_step):
'''
Arguments:
H: list of Hamiltonians at different times, each on for a time = sim_step
c_ops: list of collapse operators. if an element of the list is a single operator, then it is a time-independent one,
otherwise, if it's a 2-list, then the first el. is the operator and the second one is a list of time-dependent coefficients.
Note that in the first case the coefficient is included in the operator
sim_step: time for which each H[t] is on.
'''
exp_L_total=1
for i in range(len(H_vec)):
H=H_vec[i]
if c_ops != []:
c_ops_temp=[]
for c in range(len(c_ops)):
if isinstance(c_ops[c],list):
c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis
else:
c_ops_temp.append(c_ops[c])
liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()
else:
liouville_exp_t=(-1j*H*sim_step).expm()
exp_L_total=liouville_exp_t*exp_L_total
return exp_L_total
def freq_shift_from_fluxbias(frequency,frequency_target,fluxbias_q0,positive_arc):
'''
frequency_target = max frequency of the qubit
positive_arc (bool) for single and double-sided
'''
if frequency > frequency_target:
logging.warning('Detuning can only be negative. Freq = {}, Freq_max = {}'.format(frequency,frequency_target))
frequency = frequency_target
if positive_arc:
sign = 1
else:
sign = -1
# formula obtained for omega = omega_0 * sqrt(abs(cos(pi Phi/Phi_0)))
frequency_biased = frequency - np.pi/2 * (frequency_target**2/frequency) * np.sqrt(1 - (frequency**4/frequency_target**4)) * fluxbias_q0 * sign - \
- np.pi**2/2 * frequency_target * (1+(frequency**4/frequency_target**4)) / (frequency/frequency_target)**3 * fluxbias_q0**2
# with sigma up to circa 1e-3 \mu\Phi_0 the second order is irrelevant
return frequency_biased
def calc_populations(U):
hadamard_singleq = qtp.Qobj([[1,1,0],
[1,-1,0],
[0,0,0]])/np.sqrt(2)
hadamard_q0 = qtp.tensor(qtp.qeye(3),hadamard_singleq)
if U.type == 'oper':
U_pi2_pulsed = hadamard_q0 * U * hadamard_q0
populations = {'population_in_0': np.abs(U_pi2_pulsed[0,0])**2, 'population_in_1': np.abs(U_pi2_pulsed[0,1])**2}
elif U.type == 'super':
U_pi2_pulsed = qtp.to_super(hadamard_q0) * U * qtp.to_super(hadamard_q0)
populations = {'population_in_0': np.real(U_pi2_pulsed[0,0]), 'population_in_1': np.real(U_pi2_pulsed[0,10])}
return populations
class ramsey_experiment(det.Soft_Detector):
def __init__(self, fluxlutman, noise_parameters_CZ, control_parameters_ramsey):
"""
Detector for simulating a Ramsey experiment.
Args:
fluxlutman (instr): an instrument that contains the parameters
required to generate the waveform for the trajectory, and the hamiltonian as well.
noise_parameters_CZ: instrument that contains the noise parameters, plus some more
control_parameters_ramsey: instrument containing some parameters for ramsey that are passed via notebook
"""
super().__init__()
self.value_names = ['population_in_0','population_in_1']
self.value_units = ['%', '%']
self.fluxlutman = fluxlutman
self.noise_parameters_CZ = noise_parameters_CZ
self.control_parameters_ramsey = control_parameters_ramsey
def acquire_data_point(self, **kw):
ramsey = self.control_parameters_ramsey.ramsey() # True for Ram-Z, False for Echo-Z
sigma = self.control_parameters_ramsey.sigma() # width of the Gaussian distribution of the fluxbias
detuning = self.control_parameters_ramsey.detuning_ramsey() # how much the freq of q0 is offset from the sweetspot
t = self.control_parameters_ramsey.pulse_length() # separation time between the two pi/2 pulses
qoi_plot = list() # used to verify convergence properties. If len(n_sampling_gaussian_vec)==1, it is useless
n_sampling_gaussian_vec = [101] # 11 guarantees excellent convergence. We choose it odd so that the central point of the Gaussian is included.
# ALWAYS choose it odd
for n_sampling_gaussian in n_sampling_gaussian_vec:
# If sigma=0 there's no need for sampling
weights=[]
if sigma != 0:
samplingpoints_gaussian = np.linspace(-5*sigma,5*sigma,n_sampling_gaussian) # after 5 sigmas we cut the integral
delta_x = samplingpoints_gaussian[1]-samplingpoints_gaussian[0]
values_gaussian = czu.gaussian(samplingpoints_gaussian,mean=0,sigma=sigma)
else:
samplingpoints_gaussian = np.array([0])
delta_x = 1
values_gaussian = np.array([1])
U_final_vec = list()
for j_q0 in range(len(samplingpoints_gaussian)):
fluxbias_q0 = samplingpoints_gaussian[j_q0]
if sigma != 0:
weight=values_gaussian[j_q0]*delta_x
weights.append(weight)
else:
weight=1
weights.append(weight)
f_q0_sweetspot = self.fluxlutman.q_freq_01()
f_q0_detuned = f_q0_sweetspot + detuning
H=[]
if ramsey: # the freq shift takes a different sign at first order on the two sides of Echo-Z
positive = [True]
else:
positive = [True, False]
for pos in positive:
f_q0_biased = freq_shift_from_fluxbias(f_q0_detuned,f_q0_sweetspot,fluxbias_q0,positive_arc=pos)
freq_rotating_frame_detuned = f_q0_biased-f_q0_sweetspot-detuning
H.append(czu.coupled_transmons_hamiltonian_new(w_q0=freq_rotating_frame_detuned, w_q1=0, alpha_q0=-2*freq_rotating_frame_detuned, alpha_q1=0, J=0))
# convenient way of getting the uncpupled Hamiltonian for one qubit
sim_step = t/len(positive)
c_ops=[]
U_final = time_evolution(H, c_ops, sim_step)
if U_final.type == 'oper':
U_final = qtp.to_super(U_final)
U_final_vec.append(U_final*weight)
weights = np.array(weights)
U_superop_average = np.sum(np.array(U_final_vec)) # computing resulting superoperator
qoi = calc_populations(U_superop_average)
quantities_of_interest = [qoi['population_in_0']*100, qoi['population_in_1']*100]
qoi_vec=np.array(quantities_of_interest)
qoi_plot.append(qoi_vec)
qoi_plot = np.array(qoi_plot)
### Plot to study the convergence properties of averaging over a Gaussian
# for i in range(len(qoi_plot[0])):
# czu.plot(x_plot_vec=[n_sampling_gaussian_vec],
# y_plot_vec=[qoi_plot[:,i]],
# title='Study of convergence of average',
# xlabel='n_sampling_gaussian points',ylabel=self.value_names[i])
return qoi_plot[0,0], qoi_plot[0,1]
|
python
|
from .cloudchain import checkconfig
from .cloudchain import CloudChainConfigError
from .cloudchain import CloudChainError
from .cloudchain import CloudChain
from .cloudchain import decryptcreds
from .cloudchain import encryptcreds
from .cloudchain import endpoint_url
from .cloudchain import getconn
from .cloudchain import keyalias
from .cloudchain import read_configfile
from .cloudchain import readcreds
from .cloudchain import region_name
from .cloudchain import savecreds
from .cloudchain import tablename
from .cloudchain import get_default_cloud_chain
|
python
|
import logging
from typing import List, Dict
from bs4 import BeautifulSoup, element
from .row_utils import (
movies_utils,
series_utils,
books_utils,
comics_utils,
music_utils,
videogames_utils,
)
logger = logging.getLogger(__name__)
def get_rows_from_topchart(soup: BeautifulSoup) -> List[element.ResultSet]:
"""Returns a list of rows from a topchart."""
return soup.find("ol", {"class": "elto-list"}).find_all(
"li", {"class": "elto-item"}
)
def get_topchart_infos(soup: BeautifulSoup, category: str) -> List[Dict]:
"""Returns a list of dict containing data of a topchart."""
rows = get_rows_from_topchart(soup)
if category == "films":
return [movies_utils.get_movies_infos_from_row(x) for x in rows]
elif category == "series":
return [series_utils.get_series_infos_from_row(x) for x in rows]
elif category == "jeuxvideo":
return [videogames_utils.get_videogames_infos_from_row(x) for x in rows]
elif category == "livres":
return [books_utils.get_books_infos_from_row(x) for x in rows]
elif category == "bd":
return [comics_utils.get_comics_infos_from_row(x) for x in rows]
elif category == "musique":
return [music_utils.get_music_infos_from_row(x) for x in rows]
else:
logger.error(f"Category {category} not supported.")
return []
def get_topchart_order(category: str) -> List:
"""Returns the order of columns for a topchart based on its category."""
if category == "films":
return movies_utils.get_order_movies_columns()
elif category == "series":
return series_utils.get_order_series_columns()
elif category == "jeuxvideo":
return videogames_utils.get_order_videogames_columns()
elif category == "livres":
return books_utils.get_order_books_columns()
elif category == "bd":
return comics_utils.get_order_comics_columns()
elif category == "musique":
return music_utils.get_order_music_columns()
else:
logger.error(f"Category {category} not supported.")
return []
|
python
|
#!/usr/bin/env python3
"""
For each family and device, obtain a tilegrid and save it in the database
"""
import os
from os import path
import subprocess
import extract_tilegrid
import database
def main():
devices = database.get_devices()
for family in sorted(devices["families"].keys()):
for device in sorted(devices["families"][family]["devices"].keys()):
output_file = path.join(database.get_db_subdir(family, device), "tilegrid.json")
subprocess.check_call(["./get_device_tilegrid.sh", device])
extract_tilegrid.main(["extract_tilegrid", device, "../minitests/simple/wire.dump", output_file])
if __name__ == "__main__":
main()
|
python
|
import os
import random
import string
import pytest
from check_mk_web_api import WebApi, CheckMkWebApiException
api = WebApi(
os.environ['CHECK_MK_URL'],
os.environ['CHECK_MK_USER'],
os.environ['CHECK_MK_SECRET']
)
def setup():
api.delete_all_hosts()
api.delete_all_hostgroups()
api.delete_all_servicegroups()
for group in api.get_all_contactgroups():
if group != 'all':
api.delete_contactgroup(group)
for user_id in api.get_all_users():
if user_id != 'cmkadmin' and user_id != os.environ['CHECK_MK_USER']:
api.delete_user(user_id)
for folder in api.get_all_folders():
if folder != '':
api.delete_folder(folder)
def test_add_host():
api.add_host('host00')
assert 'host00' in api.get_all_hosts()
def test_add_duplicate_host():
with pytest.raises(CheckMkWebApiException):
api.add_host('host00')
api.add_host('host00')
def test_edit_host():
api.add_host('host00', ipaddress='192.168.0.100')
assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.100'
api.edit_host('host00', ipaddress='192.168.0.101')
assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.101'
def test_unset_host_attribute():
api.add_host('host00', ipaddress='192.168.0.100')
assert api.get_host('host00')['attributes']['ipaddress'] == '192.168.0.100'
api.edit_host('host00', unset_attributes=['ipaddress'])
assert 'ipaddress' not in api.get_host('host00')['attributes']
def test_edit_nonexistent_host():
with pytest.raises(CheckMkWebApiException):
api.edit_host('host00', ipaddress='192.168.0.101')
def test_get_host():
api.add_host('host00')
assert api.get_host('host00')['hostname'] == 'host00'
def test_get_nonexistent_host():
with pytest.raises(CheckMkWebApiException):
api.get_host('host00')
def test_get_all_hosts():
api.add_host('host00')
api.add_host('host01')
all_hosts = api.get_all_hosts()
assert len(all_hosts) == 2
assert 'host00' in all_hosts
assert 'host01' in all_hosts
def test_get_hosts_by_folder():
api.add_folder('test')
api.add_host('host00', 'test')
api.add_host('host01', 'test')
hosts = api.get_hosts_by_folder('test')
assert len(hosts) == 2
assert 'host00' in hosts
assert 'host01' in hosts
def test_delete_host():
api.add_host('host00')
assert len(api.get_all_hosts()) == 1
api.delete_host('host00')
assert len(api.get_all_hosts()) == 0
def test_delete_nonexistent_host():
with pytest.raises(CheckMkWebApiException):
api.delete_host('host00')
def test_delete_all_hosts():
api.add_host('host00')
api.add_host('host01')
assert len(api.get_all_hosts()) == 2
api.delete_all_hosts()
assert len(api.get_all_hosts()) == 0
def test_discover_services():
api.add_host('localhost')
api.discover_services('localhost')
def test_discover_services_for_nonexistent_host():
with pytest.raises(CheckMkWebApiException):
api.discover_services('localhost')
def test_get_user():
api.add_user('user00', 'User 00', 'p4ssw0rd')
assert api.get_user('user00')['alias'] == 'User 00'
def test_get_all_users():
api.add_user('user00', 'User 00', 'p4ssw0rd')
api.add_user('user01', 'User 01', 'p4ssw0rd')
users = api.get_all_users()
assert 'user00' in users
assert 'user01' in users
def test_add_user():
api.add_user('user00', 'User 00', 'p4ssw0rd')
assert 'user00' in api.get_all_users()
def test_add_automation_user():
api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234')
assert 'automation00' in api.get_all_users()
def test_add_duplicate_user():
with pytest.raises(CheckMkWebApiException):
api.add_user('user00', 'User 00', 'p4ssw0rd')
api.add_user('user00', 'User 00', 'p4ssw0rd')
def test_add_duplicate_automation_user():
with pytest.raises(CheckMkWebApiException):
api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234')
api.add_automation_user('automation00', 'Automation 00', 's3cr3t1234')
def test_edit_user():
api.add_user('user00', 'User 00', 'p4ssw0rd')
assert api.get_all_users()['user00']['alias'] == 'User 00'
api.edit_user('user00', {'alias': 'User 0'})
assert api.get_all_users()['user00']['alias'] == 'User 0'
def test_unset_user_attribute():
api.add_user('user00', 'User 00', 'p4ssw0rd', pager='49123456789')
assert api.get_all_users()['user00']['pager'] == '49123456789'
api.edit_user('user00', {}, unset_attributes=['pager'])
assert 'pager' not in api.get_all_users()['user00']
def test_edit_nonexistent_user():
with pytest.raises(CheckMkWebApiException):
api.edit_user('user00', {})
def test_delete_user():
api.add_user('user00', 'User 00', 'p4ssw0rd')
assert 'user00' in api.get_all_users()
api.delete_user('user00')
assert 'user00' not in api.get_all_users()
def test_delete_nonexistent_user():
with pytest.raises(CheckMkWebApiException):
api.delete_user('user00')
def test_get_folder():
api.add_folder('productive')
assert api.get_folder('productive')
def test_get_nonexistent_folder():
with pytest.raises(CheckMkWebApiException):
assert api.get_folder('productive')
def test_get_all_folders():
api.add_folder('productive')
api.add_folder('testing')
folders = api.get_all_folders()
assert 'productive' in folders
assert 'testing' in folders
def test_add_folder():
api.add_folder('productive')
assert 'productive' in api.get_all_folders()
def test_edit_folder():
api.add_folder('productive', snmp_community='public')
assert api.get_folder('productive')['attributes']['snmp_community'] == 'public'
api.edit_folder('productive', snmp_community='private')
assert api.get_folder('productive')['attributes']['snmp_community'] == 'private'
def test_edit_nonexistent_folder():
with pytest.raises(CheckMkWebApiException):
assert api.edit_folder('productive')
def test_delete_folder():
api.add_folder('productive')
assert 'productive' in api.get_all_folders()
api.delete_folder('productive')
assert 'productive' not in api.get_all_folders()
def test_delete_nonexistent_folder():
with pytest.raises(CheckMkWebApiException):
api.delete_folder('productive')
def test_get_contactgroup():
api.add_contactgroup('user', 'User')
assert api.get_contactgroup('user')
def test_get_all_contactgroups():
api.add_contactgroup('user', 'User')
api.add_contactgroup('admin', 'Admin')
groups = api.get_all_contactgroups()
assert 'user' in groups
assert 'admin' in groups
def test_get_nonexistent_contactgroup():
with pytest.raises(KeyError):
api.get_contactgroup('user')
def test_add_contactgroup():
api.add_contactgroup('user', 'User')
assert api.get_contactgroup('user')['alias'] == 'User'
def test_add_duplicate_contactgroup():
with pytest.raises(CheckMkWebApiException):
api.add_contactgroup('user', 'User')
api.add_contactgroup('user', 'User')
def test_edit_contactgroup():
api.add_contactgroup('user', 'User')
assert api.get_contactgroup('user')['alias'] == 'User'
api.edit_contactgroup('user', 'Users')
assert api.get_contactgroup('user')['alias'] == 'Users'
def test_edit_nonexisting_contactgroup():
with pytest.raises(CheckMkWebApiException):
api.edit_contactgroup('user', 'Users')
def test_delete_contactgroup():
api.add_contactgroup('user', 'User')
assert 'user' in api.get_all_contactgroups()
api.delete_contactgroup('user')
assert 'user' not in api.get_all_contactgroups()
def test_delete_nonexistent_contactgroup():
with pytest.raises(CheckMkWebApiException):
api.delete_contactgroup('user')
def test_get_hostgroup():
api.add_hostgroup('vm', 'VM')
api.get_hostgroup('vm')
def test_get_all_hostgroups():
api.add_hostgroup('vm', 'VM')
api.add_hostgroup('physical', 'Physical')
groups = api.get_all_hostgroups()
assert 'vm' in groups
assert 'physical' in groups
def test_get_nonexistent_hostgroup():
with pytest.raises(KeyError):
api.get_hostgroup('vm')
def test_add_hostgroup():
api.add_hostgroup('vm', 'VM')
assert api.get_hostgroup('vm')['alias'] == 'VM'
def test_add_duplicate_hostgroup():
with pytest.raises(CheckMkWebApiException):
api.add_hostgroup('vm', 'VM')
api.add_hostgroup('vm', 'VM')
def test_edit_hostgroup():
api.add_hostgroup('vm', 'VM')
assert api.get_hostgroup('vm')['alias'] == 'VM'
api.edit_hostgroup('vm', 'VMs')
assert api.get_hostgroup('vm')['alias'] == 'VMs'
def test_edit_nonexisting_hostgroup():
with pytest.raises(CheckMkWebApiException):
api.edit_hostgroup('vm', 'VM')
def test_delete_hostgroup():
api.add_hostgroup('vm', 'VM')
assert 'vm' in api.get_all_hostgroups()
api.delete_hostgroup('vm')
assert 'vm' not in api.get_all_hostgroups()
def test_delete_nonexistent_hostgroup():
with pytest.raises(CheckMkWebApiException):
api.delete_hostgroup('vm')
def test_get_servicegroup():
api.add_servicegroup('db', 'Database')
assert api.get_servicegroup('db')
def test_get_all_servicegroups():
api.add_servicegroup('db', 'Database')
api.add_servicegroup('web', 'Webserver')
groups = api.get_all_servicegroups()
assert 'db' in groups
assert 'web' in groups
def test_get_nonexistent_servicegroup():
with pytest.raises(KeyError):
api.get_servicegroup('db')
def test_add_servicegroup():
api.add_servicegroup('db', 'Database')
assert api.get_servicegroup('db')['alias'] == 'Database'
def test_add_duplicate_servicegroup():
with pytest.raises(CheckMkWebApiException):
api.add_servicegroup('db', 'Database')
api.add_servicegroup('db', 'Database')
def test_edit_servicegroup():
api.add_servicegroup('db', 'Database')
assert api.get_servicegroup('db')['alias'] == 'Database'
api.edit_servicegroup('db', 'Databases')
assert api.get_servicegroup('db')['alias'] == 'Databases'
def test_edit_nonexisting_servicegroup():
with pytest.raises(CheckMkWebApiException):
api.edit_servicegroup('db', 'Database')
def test_delete_servicegroup():
api.add_servicegroup('db', 'Database')
assert 'db' in api.get_all_servicegroups()
api.delete_servicegroup('db')
assert 'db' not in api.get_all_servicegroups()
def test_delete_nonexistent_servicegroup():
with pytest.raises(CheckMkWebApiException):
api.delete_servicegroup('db')
def test_get_hosttags():
assert api.get_hosttags()
def test_get_ruleset():
assert api.get_ruleset('checkgroup_parameters:hw_fans_perc')
def test_get_nonexistent_rulesets():
with pytest.raises(CheckMkWebApiException):
api.get_ruleset('nonexistent')
def test_set_nonexistent_rulesets():
with pytest.raises(CheckMkWebApiException):
api.set_ruleset('nonexistent', {})
def test_get_rulesets():
assert api.get_rulesets()
def test_get_site():
assert api.get_site('cmk')
def test_set_site():
random_alias = 'alias_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
config = api.get_site('cmk')['site_config']
config['alias'] = random_alias
api.set_site('cmk', config)
assert api.get_site('cmk')['site_config']['alias'] == random_alias
@pytest.mark.skip(reason="bug in Check_Mk")
def test_login_site():
api.add_user('user00', 'User 00', 'p4ssw0rd')
api.login_site('cmk', 'user00', 'p4ssw0rd')
@pytest.mark.skip(reason="bug in Check_Mk")
def test_logout_site():
api.add_user('user00', 'User 00', 'p4ssw0rd')
api.login_site('cmk', 'user00', 'p4ssw0rd')
api.logout_site('cmk')
|
python
|
from functools import reduce
hashes = [None] + [i for i in range(1, 11)]
index = 0
hash_list = [(lambda _h=h: _h, i == index, []) for i, h in enumerate(hashes)]
print(hashes)
print("zip = {0}".format(zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])))
while len(hash_list) > 1:
hash_list = [
(
lambda _left=left, _right=right: _left() + _right(),
left_f or right_f,
(left_l if left_f else right_l) + [dict(side=1, hash=right) if left_f else dict(side=0, hash=left)],
)
for (left, left_f, left_l), (right, right_f, right_l) in
zip(hash_list[::2], hash_list[1::2] + [hash_list[::2][-1]])
]
def _sum(a):
return a['left']+a['right']
def check_merkle_link(tip_hash, link):
if link['index'] >= 2**len(link['branch']):
raise ValueError('index too large')
return reduce(lambda c, e: _sum(
dict(left=e[1], right=c) if (link['index'] >> e[0]) & 1 else
dict(left=c, right=e[1])
), enumerate(link['branch']), tip_hash)
print(hash_list)
res = [x['hash']() for x in hash_list[0][2]]
print(res)
check = check_merkle_link(0,dict(branch=res, index=index))
print(check)
|
python
|
import numpy as np
import pandas as pd
from dataset import MultiDataset, RegressionDataset
def scatter_path_array(path_data, size, rank):
all_lst = []
for row, item in path_data.iterrows():
path, num = item['path'], int(item['num'])
all_lst.extend([[path, i] for i in range(num)])
all_lst = np.array(all_lst, dtype=object)
all_lst = np.random.permutation(all_lst)
all_lst = all_lst[int(len(all_lst) / size) * rank:int(len(all_lst) / size) * (rank + 1):]
return all_lst[:, 0], all_lst[:, 1]
class Dataproc():
def __init__(self, size, rank, config):
self.verbose = 10 if rank == 0 else 0
self.config = config
np.random.seed(7)
path_data = pd.read_csv(self.config['csv_path'], index_col=0)
# path_data = path_data[path_data.apply(lambda x: int(x['dir_name'][2:5]) < 759, axis=1)]
path_data = path_data.reindex(np.random.permutation(path_data.index)).reset_index(drop=True)
rate = self.config['train_rate']
protein_name_list = set(path_data['dir_name'].unique())
similar_protein = {'T0356', 'T0456', 'T0483', 'T0292', 'T0494', 'T0597', 'T0291', 'T0637', 'T0392', 'T0738',
'T0640', 'T0308', 'T0690', 'T0653', 'T0671', 'T0636', 'T0645', 'T0532', 'T0664', 'T0699',
'T0324', 'T0303', 'T0418', 'T0379', 'T0398', 'T0518'}
protein_name_list = protein_name_list - similar_protein
protein_name_list = np.sort(list(protein_name_list))
protein_name_list = np.random.permutation(protein_name_list)
self.protein_name = {'train': protein_name_list[:int(len(protein_name_list) * rate)],
'test': protein_name_list[int(len(protein_name_list) * rate):]}
self.data_dict = {}
train_data = path_data.ix[path_data['dir_name'].isin(self.protein_name['train'])]
test_data = path_data.ix[path_data['dir_name'].isin(self.protein_name['test'])]
native_data = train_data[train_data['gdtts'] == 1]
other_data = train_data[train_data['gdtts'] != 1]
# random
# other_data = other_data.groupby('dir_name').apply(lambda x: x.sample(frac=self.config['data_frac']))
# upper
# other_data = other_data.groupby('dir_name').apply(
# lambda x: x.sort_values('label_list')[int(x.shape[0] * (1 - self.config['data_frac'])):x.shape[0]])
# lower
other_data = other_data.groupby('dir_name').apply(
lambda x: x.sort_values('gdtts')[:int(x.shape[0] * self.config['data_frac'])])
train_data = pd.concat([native_data, other_data])
path, index = scatter_path_array(path_data=train_data, size=size, rank=rank)
self.data_dict.update({'train': {'path': path, 'index': index}})
path, index = scatter_path_array(path_data=test_data, size=size, rank=rank)
self.data_dict.update({'test': {'path': path, 'index': index}})
if self.config['scop']:
scop_path_data = pd.read_csv('./scop_e_40_path_list.csv', index_col=0)
path, index = scatter_path_array(
path_data=scop_path_data, size=size, rank=rank)
self.data_dict['train']['path'] = np.append(self.data_dict['train']['path'], path)
self.data_dict['train']['index'] = np.append(self.data_dict['train']['index'], index)
def get_protein_name_dict(self):
return self.protein_name
def get_classification_dataset(self, key):
dataset = MultiDataset(path=self.data_dict[key]['path'], index=self.data_dict[key]['index'],
config=self.config)
return dataset
def get_regression_dataset(self, key):
dataset = RegressionDataset(path=self.data_dict[key]['path'], index=self.data_dict[key]['index'],
config=self.config)
return dataset
|
python
|
from . import controller
from . import model
|
python
|
#------------------------------------------------------------------------------
# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# ShardingNumberKey.py
# This script demonstrates how to use sharding keys with a sharded database.
# The sample schema provided does not include support for running this demo. A
# sharded database must first be created. Information on how to create a
# sharded database can be found in the documentation:
# https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=SHARD
#
# This script requires cx_Oracle 6.1 and higher but it is recommended to use
# cx_Oracle 7.3 and higher in order to avoid a set of known issues when using
# sharding capabilities.
#------------------------------------------------------------------------------
import cx_Oracle
import SampleEnv
pool = cx_Oracle.SessionPool(SampleEnv.GetMainUser(),
SampleEnv.GetMainPassword(), SampleEnv.GetConnectString(), min=1,
max=5, increment=1)
def ConnectAndDisplay(shardingKey):
print("Connecting with sharding key:", shardingKey)
with pool.acquire(shardingkey=[shardingKey]) as conn:
cursor = conn.cursor()
cursor.execute("select sys_context('userenv', 'db_name') from dual")
name, = cursor.fetchone()
print("--> connected to database", name)
ConnectAndDisplay(100)
ConnectAndDisplay(167)
|
python
|
'''
Comp_slice is a terminal fork of intra_blob.
-
It traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob.
These low-M high-Ma blobs are vectorized into outlines of adjacent flat or high-M blobs.
(high match: M / Ma, roughly corresponds to low gradient: G / Ga)
-
Vectorization is clustering of Ps + their derivatives (derPs) into PPs: patterns of Ps that describe an edge.
This process is a reduced-dimensionality (2D->1D) version of cross-comp and clustering cycle, common across this project.
As we add higher dimensions (2D alg, 3D alg), this dimensionality reduction is done in salient high-aspect blobs
(likely edges / contours in 2D or surfaces in 3D) to form more compressed "skeletal" representations of full-D patterns.
Most functions should be replaced by casting generic Search, Compare, Cluster functions
'''
from collections import deque
import sys
import numpy as np
from class_cluster import ClusterStructure, NoneType, comp_param, Cdm
from comp_blob import ave_min, ave_inv
# import warnings # to detect overflow issue, in case of infinity loop
# warnings.filterwarnings('error')
ave_g = 30 # change to Ave from the root intra_blob?
flip_ave = .1
flip_ave_FPP = 0 # flip large FPPs only (change to 0 for debug purpose)
div_ave = 200
ave_dX = 10 # difference between median x coords of consecutive Ps
ave_Dx = 10
ave_mP = 8 # just a random number right now.
ave_rmP = .7 # the rate of mP decay per relative dX (x shift) = 1: initial form of distance
ave_ortho = 20
ave_ga = 0.78 # ga at 22.5 degree
# comp_PP
ave_mPP = 0
ave_rM = .7
# comp_param
ave_comp = 0
# comp_PPP
ave_mPPP = 5
class CP(ClusterStructure):
# comp_pixel:
I = int
Dy = int
Dx = int
G = int
M = int
# comp_angle:
Dydy = int
Dxdy = int
Dydx = int
Dxdx = int
Ga = int
Ma = int
# comp_dx:
Mdx = int
Ddx = int
# new:
L = int
x0 = int
x = int # median x
dX = int # shift of average x between P and _P, if any
y = int # for visualization only
sign = NoneType # sign of gradient deviation
dert_ = list # array of pixel-level derts: (p, dy, dx, g, m), extended in intra_blob
upconnect_ = list
downconnect_cnt = int
derP = object # derP object reference
# only in Pd:
Pm = object # reference to root P
dxdert_ = list
# only in Pm:
Pd_ = list
class CderP(ClusterStructure):
layer1 = dict
# derP params
mP = int
dP = int
P = object # lower comparand
_P = object # higher comparand
PP = object # FPP if flip_val, contains this derP
# from comp_dx
fdx = NoneType
distance = int # d_ave_x
class CPP(CP, CderP):
layer1 = dict
# between PPs:
upconnect_ = list
downconnect_cnt = int
fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_
fdiv = NoneType
box = list # for visualization only, original box before flipping
dert__ = list
mask__ = bool
# PP params
derP__ = list
P__ = list
PPmm_ = list
PPdm_ = list
# PPd params
derPd__ = list
Pd__ = list
# comp_dx params
PPmd_ = list
PPdd_ = list
# comp_PP
derPPm_ = list
derPPd_ = list
distance = int
mmPP = int
dmPP = int
mdPP = int
ddPP = int
PPPm = object
PPPd = object
neg_mmPP = int
neg_mdPP = int
class CderPP(ClusterStructure):
layer01 = dict
layer1 = dict
layer11 = dict
PP = object
_PP = object
mmPP = int
dmPP = int
mdPP = int
ddPP = int
class CPPP(CPP, CderPP):
layer01 = dict
layer1 = dict
layer11 = dict
PPm_ = list
PPd_ = list
derPPm_ = list
derPPd_ = list
mmPP = int
dmPP = int
mdPP = int
ddPP = int
# Functions:
'''
leading '_' denotes higher-line variable or structure, vs. same-type lower-line variable or structure
trailing '_' denotes array name, vs. same-name elements of that array. '__' is a 2D array
leading 'f' denotes flag
-
rough workflow:
-
intra_blob -> slice_blob(blob) -> derP_ -> PP,
if flip_val(PP is FPP): pack FPP in blob.PP_ -> flip FPP.dert__ -> slice_blob(FPP) -> pack PP in FPP.PP_
else (PP is PP): pack PP in blob.PP_
'''
def slice_blob(blob, verbose=False): # where should we use this Ave?
'''
Slice_blob converts selected smooth-edge blobs (high G, low Ga or low M, high Ma) into sliced blobs,
adding horizontal blob slices: Ps or 1D patterns
'''
dert__ = blob.dert__
mask__ = blob.mask__
height, width = dert__[0].shape
if verbose: print("Converting to image...")
for fPPd in range(2): # run twice, 1st loop fPPd=0: form PPs, 2nd loop fPPd=1: form PPds
P__ , derP__, Pd__, derPd__ = [], [], [], []
zip_dert__ = zip(*dert__)
_P_ = form_P_(list(zip(*next(zip_dert__))), mask__[0], 0) # 1st upper row
P__ += _P_ # frame of Ps
for y, dert_ in enumerate(zip_dert__, start=1): # scan top down
if verbose: print(f"\rProcessing line {y + 1}/{height}, ", end=""); sys.stdout.flush()
P_ = form_P_(list(zip(*dert_)), mask__[y], y) # horizontal clustering - lower row
derP_ = scan_P_(P_, _P_) # tests for x overlap between Ps, calls comp_slice
Pd_ = form_Pd_(P_) # form Pds within Ps
derPd_ = scan_Pd_(P_, _P_) # adds upconnect_ in Pds and calls derPd_2_PP_derPd_, same as derP_2_PP_
derP__ += derP_; derPd__ += derPd_ # frame of derPs
P__ += P_; Pd__ += Pd_
_P_ = P_ # set current lower row P_ as next upper row _P_
form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd) # form PPs in blob or in FPP
comp_PP_(blob,fPPd)
# if not isinstance(blob, CPP):
# draw_PP_(blob)
def form_P_(idert_, mask_, y): # segment dert__ into P__ in horizontal ) vertical order, sum dert params into P params
P_ = [] # rows of derPs
_dert = list(idert_[0]) # first dert
dert_ = [_dert] # pack 1st dert
_mask = mask_[0] # mask bit per dert
if ~_mask:
# initialize P with first dert
P = CP(I=_dert[0], Dy=_dert[1], Dx=_dert[2], G=_dert[3], M=_dert[4],
Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10],
x0=0, L=1, y=y, dert_=dert_)
for x, dert in enumerate(idert_[1:], start=1): # left to right in each row of derts
mask = mask_[x] # pixel mask
if mask: # masks: if 1,_0: P termination, if 0,_1: P initialization, if 0,_0: P accumulation:
if ~_mask: # _dert is not masked, dert is masked, terminate P:
P.x = P.x0 + (P.L-1) // 2
P_.append(P)
else: # dert is not masked
if _mask: # _dert is masked, initialize P params:
# initialize P with first dert
P = CP(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4],
Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10],
x0=x, L=1, y=y, dert_=dert_)
else:
# _dert is not masked, accumulate P params with (p, dy, dx, g, m, day, dax, ga, ma) = dert
P.accumulate(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4],
Dydy=dert[5], Dxdy=dert[6], Dydx=dert[7], Dxdx=dert[8], Ga=dert[9], Ma=dert[10], L=1)
P.dert_.append(dert)
_mask = mask
if ~_mask: # terminate last P in a row
P.x = P.x0 + (P.L-1) // 2
P_.append(P)
return P_
def form_Pd_(P_): # form Pds from Pm derts by dx sign, otherwise same as form_P
Pd__ = []
for iP in P_:
if (iP.downconnect_cnt>0) or (iP.upconnect_): # form Pd s if at least one connect in P, else they won't be compared
P_Ddx = 0 # sum of Ddx across Pd s
P_Mdx = 0 # sum of Mdx across Pd s
Pd_ = [] # Pds in P
_dert = iP.dert_[0] # 1st dert
dert_ = [_dert]
_sign = _dert[2] > 0
# initialize P with first dert
P = CP(I=_dert[0], Dy=_dert[1], Dx=_dert[2], G=_dert[3], M=_dert[4],
Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10],
x0=iP.x0, dert_=dert_, L=1, y=iP.y, sign=_sign, Pm=iP)
x = 1 # relative x within P
for dert in iP.dert_[1:]:
sign = dert[2] > 0
if sign == _sign: # same Dx sign
# accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert
P.accumulate(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4],
Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10], L=1)
P.dert_.append(dert)
else: # sign change, terminate P
if P.Dx > ave_Dx:
# cross-comp of dx in P.dert_
comp_dx(P); P_Ddx += P.Ddx; P_Mdx += P.Mdx
P.x = P.x0 + (P.L-1) // 2
Pd_.append(P)
# reinitialize params
P = CP(I=dert[0], Dy=dert[1], Dx=dert[2], G=dert[3], M=dert[4],
Dydy=_dert[5], Dxdy=_dert[6], Dydx=_dert[7], Dxdx=_dert[8], Ga=_dert[9], Ma=_dert[10],
x0=iP.x0+x, dert_=[dert], L=1, y=iP.y, sign=sign, Pm=iP)
_sign = sign
x += 1
# terminate last P
if P.Dx > ave_Dx:
comp_dx(P); P_Ddx += P.Ddx; P_Mdx += P.Mdx
P.x = P.x0 + (P.L-1) // 2
Pd_.append(P)
# update Pd params in P
iP.Pd_ = Pd_; iP.Ddx = P_Ddx; iP.Mdx = P_Mdx
Pd__ += Pd_
return Pd__
def scan_P_(P_, _P_): # test for x overlap between Ps, call comp_slice
derP_ = []
for P in P_: # lower row
for _P in _P_: # upper row
# test for x overlap between P and _P in 8 directions
if (P.x0 - 1 < (_P.x0 + _P.L) and (P.x0 + P.L) + 1 > _P.x0): # all Ps here are positive
fcomp = [1 for derP in P.upconnect_ if P is derP.P] # upconnect could be derP or dirP
if not fcomp:
derP = comp_slice(_P, P) # form vertical and directional derivatives
derP_.append(derP)
P.upconnect_.append(derP)
_P.downconnect_cnt += 1
elif (P.x0 + P.L) < _P.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derP_
def scan_Pd_(P_, _P_): # test for x overlap between Pds
derPd_ = []
for P in P_: # lower row
for _P in _P_: # upper row
for Pd in P.Pd_: # lower row Pds
for _Pd in _P.Pd_: # upper row Pds
# test for same sign & x overlap between Pd and _Pd in 8 directions
if (Pd.x0 - 1 < (_Pd.x0 + _Pd.L) and (Pd.x0 + Pd.L) + 1 > _Pd.x0) and (Pd.sign == _Pd.sign):
fcomp = [1 for derPd in Pd.upconnect_ if Pd is derPd.P] # upconnect could be derP or dirP
if not fcomp:
derPd = comp_slice(_Pd, Pd)
derPd_.append(derPd)
Pd.upconnect_.append(derPd)
_Pd.downconnect_cnt += 1
elif (Pd.x0 + Pd.L) < _Pd.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derPd_
def form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd):
'''
form vertically contiguous patterns of patterns by the sign of derP, in blob or in FPP
'''
blob.derP__ = derP__; blob.P__ = P__
blob.derPd__ = derPd__; blob.Pd__ = Pd__
if fPPd:
derP_2_PP_(blob.derP__, blob.PPdm_, 1) # cluster by derPm dP sign
derP_2_PP_(blob.derPd__, blob.PPdd_, 1) # cluster by derPd dP sign, not used
else:
derP_2_PP_(blob.derP__, blob.PPmm_, 0) # cluster by derPm mP sign
derP_2_PP_(blob.derPd__, blob.PPmd_, 0) # cluster by derPd mP sign, not used
def derP_2_PP_(derP_, PP_, fPPd):
'''
first row of derP_ has downconnect_cnt == 0, higher rows may also have them
'''
for derP in reversed(derP_): # bottom-up to follow upconnects, derP is stored top-down
if not derP.P.downconnect_cnt and not isinstance(derP.PP, CPP): # root derP was not terminated in prior call
PP = CPP() # init
accum_PP(PP,derP)
if derP._P.upconnect_: # derP has upconnects
upconnect_2_PP_(derP, PP_, fPPd) # form PPs across _P upconnects
else:
PP_.append(derP.PP)
def upconnect_2_PP_(iderP, PP_, fPPd):
'''
compare sign of lower-layer iderP to the sign of its upconnects to form contiguous same-sign PPs
'''
confirmed_upconnect_ = []
for derP in iderP._P.upconnect_: # potential upconnects from previous call
if derP not in iderP.PP.derP__: # this may occur after PP merging
if fPPd: same_sign = (iderP.dP > 0) == (derP.dP > 0) # comp dP sign
else: same_sign = (iderP.mP > 0) == (derP.mP > 0) # comp mP sign
if same_sign: # upconnect derP has different PP, merge them
if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):
merge_PP(iderP.PP, derP.PP, PP_)
else: # accumulate derP in current PP
accum_PP(iderP.PP, derP)
confirmed_upconnect_.append(derP)
else:
if not isinstance(derP.PP, CPP): # sign changed, derP is root derP unless it already has FPP/PP
PP = CPP()
accum_PP(PP,derP)
derP.P.downconnect_cnt = 0 # reset downconnect count for root derP
iderP.PP.upconnect_.append(derP.PP) # add new initialized PP as upconnect of current PP
derP.PP.downconnect_cnt += 1 # add downconnect count to newly initialized PP
if derP._P.upconnect_:
upconnect_2_PP_(derP, PP_, fPPd) # recursive compare sign of next-layer upconnects
elif derP.PP is not iderP.PP and derP.P.downconnect_cnt == 0:
PP_.append(derP.PP) # terminate PP (not iPP) at the sign change
iderP._P.upconnect_ = confirmed_upconnect_
if not iderP.P.downconnect_cnt:
PP_.append(iderP.PP) # iPP is terminated after all upconnects are checked
def merge_PP(_PP, PP, PP_): # merge PP into _PP
for derP in PP.derP__:
if derP not in _PP.derP__:
_PP.derP__.append(derP) # add derP to PP
derP.PP = _PP # update reference
_PP.accum_from(derP) # accumulate params
if PP in PP_:
PP_.remove(PP) # remove merged PP
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()})
def accum_PP(PP, derP): # accumulate params in PP
PP.accum_from(derP) # accumulate params
PP.accum_from(derP.P) # accum derP's P base param to PP
PP.derP__.append(derP) # add derP to PP
derP.PP = PP # update reference
def comp_dx(P): # cross-comp of dx s in P.dert_
Ddx = 0
Mdx = 0
dxdert_ = []
_dx = P.dert_[0][2] # first dx
for dert in P.dert_[1:]:
dx = dert[2]
ddx = dx - _dx
if dx > 0 == _dx > 0: mdx = min(dx, _dx)
else: mdx = -min(abs(dx), abs(_dx))
dxdert_.append((ddx, mdx)) # no dx: already in dert_
Ddx += ddx # P-wide cross-sign, P.L is too short to form sub_Ps
Mdx += mdx
_dx = dx
P.dxdert_ = dxdert_
P.Ddx = Ddx
P.Mdx = Mdx
def comp_slice(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
layer1 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0})
mP, dP = 0, 0
absG = max(1,P.G + (ave_g*P.L)); _absG = max(1,_P.G + (ave_g*_P.L)) # use max to avoid zero division
absGa = max(1,P.Ga + (ave_ga *P.L)); _absGa = max(1,_P.Ga + (ave_ga *_P.L))
# compare base param to get layer1
for param_name in layer1:
if param_name == 'Da':
sin = P.Dy/absG ; cos = P.Dx/absG
_sin = _P.Dy/_absG; _cos = _P.Dx/_absG
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == 'Dady':
sin = P.Dydy/absGa; cos = P.Dxdy/absGa
_sin = _P.Dydy/_absGa; _cos = _P.Dxdy/_absGa
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == 'Dadx':
sin = P.Dydx/absGa; cos = P.Dxdx/absGa
_sin = _P.Dydx/_absGa; _cos = _P.Dxdx/_absGa
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == "x":
_param = _P.dX # _dX
param = P.x # dX
elif param_name == "L" or param_name == "M":
hyp = np.hypot(P.x, 1) # ratio of local segment of long (vertical) axis to dY = 1
_param = getattr(_P,param_name)
param = getattr(P,param_name) / hyp # orthogonal L & M are reduced by hyp
else:
param = getattr(P, param_name)
_param = getattr(_P, param_name)
dm = comp_param(param, _param, param_name, ave_min) # add ave_min, * P.L is not needed?
layer1[param_name] = dm
mP += dm.m
dP += dm.d
'''
s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dx, P.Dy, P.G, P.M, P.L, P.Ddx, P.Mdx # params per comp branch
_s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dx, _P.Dy, _P.G, _P.M, _P.dX, _P.L, _P.Ddx, _P.Mdx
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L /= hyp # orthogonal L is reduced by hyp
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
M /= hyp # orthogonal M is reduced by hyp
dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?
# G + Ave was wrong because Dy, Dx are summed as signed, resulting G is different from summed abs G
G = np.hypot(P.Dy, P.Dx)
if G == 0: G = 1
_G = np.hypot(_P.Dy, _P.Dx)
if _G == 0: _G = 1
sin = P.Dy / G; _sin = _P.Dy / _G
cos = P.Dx / G; _cos = _P.Dx / _G
sin_da = (cos * _sin) - (sin * _cos)
cos_da = (cos * _cos) + (sin * _sin)
da = np.arctan2( sin_da, cos_da )
ma = ave_ga - abs(da)
dP = dL + dM + da # -> directional PPd, equal-weight params, no rdn?
mP = mL + mM + ma # -> complementary PPm, rdn *= Pd | Pm rolp?
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
'''
mP -= ave_mP * ave_rmP ** (P.dX / P.L)
derP = CderP(mP=mP, dP=dP, P=P, _P=_P, layer1=layer1)
P.derP = derP
return derP
def comp_slice_full(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
x0, Dx, Dy, L, = P.x0, P.Dx, P.Dy, P.L
# params per comp branch, add angle params
_x0, _Dx, _Dy,_dX, _L = _P.x0, _P.Dx, _P.Dy, _P.dX, _P.L
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive?
xn = x0 + L - 1
_xn = _x0 + _L - 1
mX = min(xn, _xn) - max(x0, _x0) # overlap = abs proximity: summed binary x match
rX = dX / mX if mX else dX*2 # average dist / prox, | prox / dist, | mX / max_L?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
# is this looks better? or it would better if we stick to the old code?
difference = P.difference(_P) # P - _P
match = P.min_match(_P) # min of P and _P
abs_match = P.abs_min_match(_P) # min of abs(P) and abs(_P)
dL = difference['L'] # L: positions / sign, dderived: magnitude-proportional value
mL = match['L']
dM = difference['M'] # use abs M? no Mx, My: non-core, lesser and redundant bias?
mM = match['M']
# min is value distance for opposite-sign comparands, vs. value overlap for same-sign comparands
dDy = difference['Dy'] # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI
mDy = abs_match['Dy']
# no comp G: Dy, Dx are more specific:
dDx = difference['Dx'] # same-sign Dx if Pd
mDx = abs_match['Dx']
if dX * P.G > ave_ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match
# diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/orthogonalization.png
# Long axis is a curve of connections between ave_xs: mid-points of consecutive Ps.
# Ortho virtually rotates P to connection-orthogonal direction:
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L = L / hyp # orthogonal L
# combine derivatives in proportion to the contribution of their axes to orthogonal axes:
# contribution of Dx should increase with hyp(dX,dY=1), this is original direction of Dx:
Dy = (Dy / hyp + Dx * hyp) / 2 # estimated along-axis D
Dx = (Dy * hyp + Dx / hyp) / 2 # estimated cross-axis D
'''
alternatives:
oDy = (Dy * hyp - Dx / hyp) / 2; oDx = (Dx / hyp + Dy * hyp) / 2; or:
oDy = hypot( Dy / hyp, Dx * hyp); oDx = hypot( Dy * hyp, Dx / hyp)
'''
# recompute difference and match
dL = _L - L
mL = min(_L, L)
dDy = _Dy - Dy
mDy = min(abs(_Dy), abs(Dy))
dDx = _Dx - Dx
mDx = min(abs(_Dx), abs(Dx))
if (Dx > 0) != (_Dx > 0): mDx = -mDx
if (Dy > 0) != (_Dy > 0): mDy = -mDy
dDdx, dMdx, mDdx, mMdx = 0, 0, 0, 0
if P.dxdert_ and _P.dxdert_: # from comp_dx
fdx = 1
dDdx = difference['Ddx']
mDdx = abs_match['Ddx']
if (P.Ddx > 0) != (_P.Ddx > 0): mDdx = -mDdx
# Mdx is signed:
dMdx = match['Mdx']
mMdx = -abs_match['Mdx']
if (P.Mdx > 0) != (_P.Mdx > 0): mMdx = -mMdx
else:
fdx = 0
# coeff = 0.7 for semi redundant parameters, 0.5 for fully redundant parameters:
dP = ddX + dL + 0.7*(dM + dDx + dDy) # -> directional PPd, equal-weight params, no rdn?
# correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy?
if fdx: dP += 0.7*(dDdx + dMdx)
mP = mdX + mL + 0.7*(mM + mDx + mDy) # -> complementary PPm, rdn *= Pd | Pm rolp?
if fdx: mP += 0.7*(mDdx + mMdx)
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
derP = CderP(P=P, _P=_P, mP=mP, dP=dP, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy)
P.derP = derP
if fdx:
derP.fdx=1; derP.dDdx=dDdx; derP.mDdx=mDdx; derP.dMdx=dMdx; derP.mMdx=mMdx
'''
min comp for rotation: L, Dy, Dx, no redundancy?
mParam weighting by relative contribution to mP, /= redundancy?
div_f, nvars: if abs dP per PPd, primary comp L, the rest is normalized?
'''
return derP
''' radial comp extension for co-internal blobs:
!= sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else:
== sign comp x ind( adj_adj_blob_) -> same-sign merge | composition:
borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_
internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG?
isolation = decay + contrast:
G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?)
if isolation: cross adjj_blob composition eval,
else: cross adjj_blob merge eval:
blob merger if internal match (~raG) - isolation, rdn external match:
blob compos if external match (~rA?) + isolation,
Also eval comp_slice over fork_?
rng+ should preserve resolution: rng+_dert_ is dert layers,
rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g,
rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr?
Add comp_PP_recursive
'''
# draft of comp_PP, following structure of comp_blob
def comp_PP_(blob, fPPd):
for fPd in [0,1]:
if fPPd: # cluster by d sign
if fPd: # using derPd (PPdd)
PP_ = blob.PPdd_
else: # using derPm (PPdm)
PP_ = blob.PPdm_
for PP in PP_:
if len(PP.derPPd_) == 0: # PP doesn't perform any searching in prior function call
comp_PP_recursive(PP, PP.upconnect_, derPP_=[], fPPd=fPPd)
form_PPP_(PP_, fPPd)
else: # cluster by m sign
if fPd: # using derPd (PPmd)
PP_ = blob.PPmd_
else: # using derPm (PPmm)
PP_ = blob.PPmm_
for PP in PP_:
if len(PP.derPPm_) == 0: # PP doesn't perform any searching in prior function call
comp_PP_recursive(PP, PP.upconnect_, derPP_=[], fPPd=fPPd)
form_PPP_(PP_, fPPd)
def comp_PP_recursive(PP, upconnect_, derPP_, fPPd):
derPP_pair_ = [ [derPP.PP, derPP._PP] for derPP in derPP_]
for _PP in upconnect_:
if [_PP, PP] in derPP_pair_ : # derPP.PP = _PP, derPP._PP = PP
derPP = derPP_[derPP_pair_.index([_PP,PP])]
elif [PP, _PP] not in derPP_pair_ : # same pair of PP and _PP doesn't checked prior this function call
derPP = comp_PP(PP, _PP) # comp_PP
derPP_.append(derPP)
if "derPP" in locals(): # derPP exists
accum_derPP(PP, derPP, fPPd) # accumulate derPP
if fPPd: # PP cluster by d
mPP = derPP.mdPP # match of PPs' d
else: # PP cluster by m
mPP = derPP.mmPP # match of PPs' m
if mPP>0: # _PP replace PP to continue the searching
comp_PP_recursive(_PP, _PP.upconnect_, derPP_, fPPd)
elif fPPd and PP.neg_mdPP + PP.mdPP > ave_mPP: # evaluation to extend PPd comparison
PP.distance += len(_PP.Pd__) # approximate using number of Py, not so sure
PP.neg_mdPP += derPP.mdPP
comp_PP_recursive(PP, _PP.upconnect_, derPP_, fPPd)
elif not fPPd and PP.neg_mmPP + PP.mmPP > ave_mPP: # evaluation to extend PPm comparison
PP.distance += len(_PP.P__) # approximate using number of Py, not so sure
PP.neg_mmPP += derPP.mmPP
comp_PP_recursive(PP, _PP.upconnect_, derPP_, fPPd)
# draft
def form_PPP_(PP_, fPPd):
PPP_ = []
for PP in PP_:
if fPPd:
mPP = PP.mdPP # match of PP's d
PPP = PP.PPPd
else:
mPP = PP.mmPP # match of PP's m
PPP = PP.PPPm
if mPP > ave_mPPP and not isinstance(PPP, CPPP):
PPP = CPPP() # init new PPP
accum_PPP(PPP, PP, fPPd) # accum PP into PPP
form_PPP_recursive(PPP_, PPP, PP.upconnect_, checked_ids=[PP.id], fPPd=fPPd)
PPP_.append(PPP) # pack PPP after scanning all upconnects
return PPP_
def form_PPP_recursive(PPP_, PPP, upconnect_, checked_ids, fPPd):
for _PP in upconnect_:
if _PP.id not in checked_ids:
checked_ids.append(_PP.id)
if fPPd: _mPP = _PP.mdPP # match of _PPs' d
else: _mPP = _PP.mmPP # match of _PPs' m
if _mPP>0 : # _PP.mPP >0
if fPPd: _PPP = _PP.PPPd
else: _PPP = _PP.PPPm
if isinstance(_PPP, CPPP): # _PP's PPP exists, merge with current PPP
PPP_.remove(_PPP) # remove the merging PPP from PPP_
merge_PPP(PPP, _PPP, fPPd)
else:
accum_PPP(PPP, _PP, fPPd) # accum PP into PPP
if _PP.upconnect_: # continue with _PP upconnects
form_PPP_recursive(PPP_, PPP, _PP.upconnect_, checked_ids, fPPd)
def accum_PPP(PPP, PP, fPPd):
PPP.accum_from(PP) # accumulate parameter, including layer1
if fPPd:
PPP.PPd_.append(PP) # add PPd to PPP's PPd_
PP.PPPd = PPP # update PPP reference of PP
for derPPd in PP.derPPd_: # accumulate derPPd params, layer01 and layer11
PPP.accum_from(derPPd)
PPP.derPPd_.append(derPPd)
else:
PPP.PPm_.append(PP) # add PPm to PPP's PPm_
PP.PPPm = PPP # update PPP reference of PP
for derPPm in PP.derPPm_: # accumulate derPPm params, layer01 and layer11
PPP.accum_from(derPPm)
PPP.derPPm_.append(derPPm)
def merge_PPP(PPP, _PPP, fPPd):
if fPPd:
for _PP in _PPP.PPd_:
if _PP not in PPP.PPd_:
accum_PPP(PPP, _PP, fPPd)
else:
for _PP in _PPP.PPm_:
if _PP not in PPP.PPm_:
accum_PPP(PPP, _PP, fPPd)
def comp_PP(PP, _PP):
# compare PP and _PP base params to get layer 01 of derPP #-----------------
layer01 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0})
mP, dP = 0, 0
absG = max(1, PP.G + (ave_g*PP.L)); _absG = max(1, _PP.G + (ave_g*_PP.L)) # use max to avoid zero division
absGa = max(1,PP.Ga + (ave_ga*PP.L)); _absGa = max(1, _PP.Ga + (ave_ga*_PP.L))
for param_name in layer01:
if param_name == 'Da':
# sin and cos components
sin = PP.Dy/absG; cos = PP.Dx/absG
_sin = _PP.Dy/_absG; _cos = _PP.Dx/_absG
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == 'Dady':
# sin and cos components
sin = PP.Dydy/absGa; cos = PP.Dxdy/absGa
_sin = _PP.Dydy/_absGa; _cos = _PP.Dxdy/_absGa
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == 'Dadx':
# sin and cos components
sin = PP.Dydx/absGa; cos = PP.Dxdx/absGa
_sin = _PP.Dydx/_absGa; _cos = _PP.Dxdx/_absGa
param = [sin, cos]
_param = [_sin, _cos]
elif param_name == "x":
_param = _PP.dX # _dX
param = PP.x # dX
elif param_name == "L" or param_name == "M":
hyp = np.hypot(PP.x, 1) # ratio of local segment of long (vertical) axis to dY = 1
_param = getattr(_PP,param_name)
param = getattr(PP,param_name) / hyp # orthogonal L & M are reduced by hyp
else:
param = getattr(PP, param_name)
_param = getattr(_PP, param_name)
dm = comp_param(param, _param, param_name, ave_mPP)
layer01[param_name] = dm
mP += dm.m
dP += dm.d
# compare layer1 to get layer11 #-------------------------------------------
layer11 = dict({'I':.0,'Da':.0,'G':.0,'M':.0,'Dady':.0,'Dadx':.0,'Ga':.0,'Ma':.0,'L':.0,'Mdx':.0, 'Ddx':.0, 'x':.0})
mmPP, dmPP, mdPP, ddPP = 0, 0, 0, 0
for i, ((param_name, dm), (_param_name, _dm)) in enumerate(zip(PP.layer1.items(), _PP.layer1.items())):
f_comp = 0
if param_name in ['Da', 'Dady', 'Dadx']: # angle, need convert to vector form
if dm.m > ave_comp and _dm.m >ave_comp: # check da.m of prior layer
f_comp = 1
sin, cos = np.sin(dm.d), np.cos(dm.d) # da is computed from normalized dy and dx, do we still need to absGalize it again here in layer1?
_sin, _cos = np.sin(_dm.d), np.cos(_dm.d)
param_d = [sin, cos]; param_m = dm.m
_param_d = [_sin, _cos]; _param_m = _dm.m
else:
if dm.m > ave_comp and _dm.m >ave_comp: # check m of prior layer
f_comp = 1
param_d = dm.d; param_m = dm.m
_param_d = _dm.d; _param_m = _dm.m
if f_comp:
dmd = comp_param(param_d, _param_d, param_name, ave_mPP) # dm of d
dmm = comp_param(param_m, _param_m, param_name, ave_mPP) # dm of m
layer11[param_name] = [dmd, dmm] # layer 2 in list ,storing dm of each d and m
mdPP += dmd.m # m from dm of d
ddPP += dmd.d # d from dm of d
mmPP += dmm.m # m from dm of m
dmPP += dmm.d # d from dm of m
else:
dmd = Cdm()
dmm = Cdm()
if PP.mP >ave_comp and PP.dP>ave_comp and _PP.mP >ave_comp and _PP.dP>ave_comp:
dmmP = comp_param(PP.mP, _PP.mP, [], ave_mPP) # dm of mP
dmdP = comp_param(PP.dP, _PP.dP, [], ave_mPP) # dm of dP
mdPP += dmdP.m # match of compared PPs' d components
ddPP += dmdP.d # difference of compared PPs' d components
mmPP += dmmP.m # match of compared PPs' m components
dmPP += dmmP.d # difference of compared PPs' m components
mmPP -= ave_mPP # match of compared PPs' m components
dmPP -= ave_mPP # difference of compared PPs' m components
derPP = CderPP(PP=PP, _PP=_PP, mmPP=mmPP, dmPP = dmPP, mdPP=mdPP, ddPP=ddPP,layer01=layer01, layer11=layer11)
'''
# match of compared PPs' m components
mmPP = match['mP'] + match['mx'] + match['mL'] + match['mDx'] + match['mDy'] - ave_mPP
# difference of compared PPs' m components
dmPP = difference['mP'] + difference['mx'] + difference['mL'] + difference['mDx'] + difference['mDy'] - ave_mPP
# match of compared PPs' d components
mdPP = match['dP'] + match['dx'] + match['dL'] + match['dDx'] + match['dDy']
# difference of compared PPs' d components
ddPP = difference['dP'] + difference['dx'] + difference['dL'] + difference['dDx'] + difference['dDy']
derPP = CderPP(PP=PP, _PP=_PP, mmPP=mmPP, dmPP = dmPP, mdPP=mdPP, ddPP=ddPP)
'''
return derPP
def accum_derPP(PP, derPP, fPPd):
if fPPd: # PP cluster by d
PP.derPPd_.append(derPP)
else: # PP cluster by m
PP.derPPm_.append(derPP)
PP.accum_from(derPP)
|
python
|
__version__ = "0.2"
from PyQNX6.core import *
|
python
|
import numpy
import six
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import collections_abc
from chainer.utils import type_check
def _tensordot(a, b, a_axes, b_axes, c_axes=None):
a_col_ndim = len(a_axes[1])
b_row_ndim = len(b_axes[0])
if a_col_ndim != b_row_ndim:
raise ValueError('axes count mismatch')
if a.ndim < a_col_ndim or b.ndim < b_row_ndim:
raise ValueError('dimension of input tensors must be '
'greater equal to dot-axes count ({})'
.format(a_col_ndim))
for a_axis, b_axis in zip(a_axes[1], b_axes[0]):
if a.shape[a_axis] != b.shape[b_axis]:
raise ValueError('shape mismatch')
xp = cuda.get_array_module(a)
y = xp.tensordot(a, b, axes=(tuple(a_axes[1]), tuple(b_axes[0])))
if c_axes is not None:
a_row_ndim = len(a_axes[0])
b_col_ndim = len(b_axes[1])
c_row_ndim = len(c_axes[0])
c_col_ndim = len(c_axes[1])
if a_row_ndim != c_row_ndim:
raise ValueError('axes count mismatch')
if b_col_ndim != c_col_ndim:
raise ValueError('axes count mismatch')
trans = [None for i in six.moves.range(y.ndim)]
table_a = [1 if i in a_axes[0] else 0 for i in six.moves.range(a.ndim)]
table_a = numpy.cumsum(table_a) - 1
for i, c_axis in enumerate(c_axes[0]):
trans[c_axis] = table_a[a_axes[0][i]]
table_b = [1 if i in b_axes[1] else 0 for i in six.moves.range(b.ndim)]
table_b = numpy.cumsum(table_b) - 1
for i, c_axis in enumerate(c_axes[1]):
trans[c_axis] = table_b[b_axes[1][i]] + len(a_axes[0])
for i, c_axis in enumerate(trans):
if i != c_axis:
y = xp.transpose(y, trans)
break
return y
class TensorDot(function_node.FunctionNode):
def __init__(self, axes=2, a_axes=None, b_axes=None, c_axes=None,
dtype=None):
self.axes = axes
self.a_axes = a_axes
self.b_axes = b_axes
self.c_axes = c_axes
self.dtype = dtype
if isinstance(axes, collections_abc.Sequence):
if len(axes) != 2:
raise ValueError('axes must be a pair of sequence of integers '
'when it is a list or tuple.')
elif isinstance(axes, int):
pass
else:
raise TypeError('axes must be a pair of sequence of integers or '
'an integer')
def check_type_forward(self, in_types):
type_check.argname(in_types, ('a', 'b'))
a_type, b_type = in_types
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
)
def forward(self, inputs):
self.retain_inputs((0, 1))
a, b = inputs
if self.a_axes is None or self.b_axes is None:
a_axes = [[], []] # 0:row axes, 1:col axes
b_axes = [[], []] # 0:row axes, 1:col axes
axes = self.axes
if isinstance(axes, collections_abc.Sequence):
a_axes[1], b_axes[0] = axes
if numpy.isscalar(a_axes[1]):
a_axes[1] = a_axes[1],
if numpy.isscalar(b_axes[0]):
b_axes[0] = b_axes[0],
else:
a_axes[1] = six.moves.range(a.ndim - axes, a.ndim)
b_axes[0] = six.moves.range(axes)
a_range = six.moves.range(a.ndim)
a_axes[0] = [i for i in a_range if i not in a_axes[1]]
b_range = six.moves.range(b.ndim)
b_axes[1] = [i for i in b_range if i not in b_axes[0]]
self.a_axes = a_axes
self.b_axes = b_axes
c = _tensordot(a, b, self.a_axes, self.b_axes, self.c_axes)
if self.c_axes is None:
c_axes = [[], []] # 0:row axes, 1:col axes
c_row_ndim = len(self.a_axes[0])
c_col_ndim = len(self.b_axes[1])
c_axes[0] = six.moves.range(c_row_ndim)
c_axes[1] = six.moves.range(c_row_ndim, c_row_ndim + c_col_ndim)
self.c_axes = c_axes
return utils.force_array(c, self.dtype),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
gc, = grad_outputs
ga = None
if 0 in indexes:
ga, = TensorDot(a_axes=self.c_axes,
b_axes=[self.b_axes[1], self.b_axes[0]],
c_axes=self.a_axes,
dtype=a.dtype).apply((gc, b))
gb = None
if 1 in indexes:
gb, = TensorDot(a_axes=[self.a_axes[1], self.a_axes[0]],
b_axes=self.c_axes,
c_axes=self.b_axes,
dtype=b.dtype).apply((a, gc))
return ga, gb
def tensordot(a, b, axes=2):
"""Returns the tensor dot product of two arrays along specified axes.
This is equivalent to compute dot product along the specified axes which
are treated as one axis by reshaping.
Args:
a (Variable): The first argument.
b (Variable): The second argument.
axes:
- If it is an integer, then ``axes`` axes at the last of ``a`` and
the first of ``b`` are used.
- If it is a pair of sequences of integers, then these two
sequences specify the list of axes for ``a`` and ``b``. The
corresponding axes are paired for sum-product.
Returns:
~chainer.Variable: The tensor dot product of ``a`` and ``b`` along the
axes specified by ``axes``.
.. admonition:: Example
>>> a = np.random.rand(5, 3, 2)
>>> b = np.random.rand(3, 2, 4)
>>> c = F.tensordot(a, b, axes=2)
>>> c.shape
(5, 4)
.. seealso:: :func:`numpy.tensordot`
"""
return TensorDot(axes=axes).apply((a, b))[0]
|
python
|
import pytest
from di.core.element import Element
from di.core.module import (
Module,
ModuleElementConsistencyCheck,
ModuleElementConsistencyError,
ModuleImportSolver,
ModuleImportSolverError,
)
def test_module_cycle():
modules = [Module(name=f"{index}") for index in range(3)]
modules[0].imports = {modules[1]}
modules[1].imports = {modules[2]}
modules[2].imports = {modules[0]}
solver = ModuleImportSolver()
with pytest.raises(ModuleImportSolverError):
solver.solve(modules)
def test_module_simple():
modules = [Module(name=f"{index}") for index in range(4)]
modules[1].imports = {modules[0]}
modules[2].imports = {modules[0]}
modules[3].imports = {modules[1], modules[2]}
solver = ModuleImportSolver()
plan = solver.solve(modules)
order = plan.steps
assert len(order[0]) == 1 and modules[0] in order[0]
assert len(order[1]) == 2 and modules[1] in order[1] and modules[2] in order[1]
assert len(order[2]) == 1 and modules[3] in order[2]
def test_module_consistency_check_internals():
check = ModuleElementConsistencyCheck()
elements = [Element(injector=..., strategy=...) for _ in range(4)]
check.check([Module(elements={*elements}, exports={*elements[:2]})])
with pytest.raises(ModuleElementConsistencyError):
check.check([Module(elements={*elements[:2]}, exports={*elements[1:]})])
a = Module(elements={*elements[:2]}, exports={*elements[:2]})
b = Module(elements={*elements[2:]}, exports={*elements}, imports={a})
check.check([a, b])
a = Module(elements={*elements[:2]}, exports={*elements[:1]})
b = Module(elements={*elements[2:]}, exports={*elements}, imports={a})
with pytest.raises(ModuleElementConsistencyError):
check.check([a, b])
a = Module(elements={*elements}, bootstrap={*elements})
check.check([a])
a = Module(elements={*elements[:2]}, bootstrap={*elements})
with pytest.raises(ModuleElementConsistencyError):
check.check([a])
a = Module(elements={*elements[:2]}, exports={*elements[:1]})
b = Module(elements={*elements[2:]}, bootstrap={*elements})
with pytest.raises(ModuleElementConsistencyError):
check.check([a, b])
def test_module_consistency_check_duplicates():
check = ModuleElementConsistencyCheck()
elements = [Element(injector=..., strategy=...) for _ in range(8)]
check.check(
[
Module(elements={*elements[:4]}, exports={*elements[:2]}),
Module(elements={*elements[4:]}, exports={*elements[6:]}),
]
)
with pytest.raises(ModuleElementConsistencyError):
check.check(
[
Module(elements={*elements}, exports={*elements[:2]}),
Module(elements={*elements[4:]}, exports={*elements[6:]}),
]
)
|
python
|
import acipdt
import xlrd
import xlwt
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from xlutils.copy import copy
from orderedset import OrderedSet
import sys
import time
import ipaddress
import getpass
import os
# Log levels 0 = None, 1 = Class only, 2 = Line
log_level = 2
# Define the name of the configuration file you will be using.
# This doesn't alter the folder name.
ACI_DEPLOY_FILE = 'aci_deploy.xls'
# Adding these values are NOT secure. Use for testing only.
APICIP = None
APICUSER = None
APICPASS = None
def stdout_log(sheet, line):
if log_level == 0:
return
elif ((log_level == (1) or log_level == (2)) and
(sheet) and (line is None)):
print('*' * 80)
print('Starting work on {} section'.format(sheet))
print('*' * 80)
elif log_level == (2) and (sheet) and (line is not None):
print('Deploying line {} from section {}...'.format(line, sheet))
else:
return
def read_in(usr_path):
try:
wb_path = os.path.join(usr_path, ACI_DEPLOY_FILE)
wb = xlrd.open_workbook(wb_path)
print("Workbook Loaded.")
except Exception as e:
print("Something went wrong logging opening the workbook - ABORT!")
sys.exit(e)
return wb
def findKeys(ws, rows):
func_list = OrderedSet()
for i in range(2, rows):
if (ws.cell(i, 0)).value:
func_list.add((ws.cell(i, 0)).value)
else:
i += 1
return func_list
def countKeys(ws, rows, func):
count = 0
for i in range(2, rows):
if (ws.cell(i, 0)).value == func:
count += 1
else:
i += 1
return count
def findVars(ws, rows, func, count):
var_list = []
var_dict = {}
for i in range(2, rows):
if (ws.cell(i, 0)).value == func:
try:
for x in range(4, 17):
if (ws.cell(i - 1, x)).value:
var_list.append((ws.cell(i - 1, x)).value)
else:
x += 1
except Exception as e:
e = e
pass
break
while count > 0:
var_dict[count] = {}
var_count = 0
for z in var_list:
var_dict[count][z] = ws.cell(i + count - 1, 4 + var_count).value
var_count += 1
var_dict[count]['row'] = i + count - 1
count -= 1
return var_dict
def wb_update(wr_ws, status, i):
# build green and red style sheets for excel
green_st = xlwt.easyxf('pattern: pattern solid;')
green_st.pattern.pattern_fore_colour = 3
red_st = xlwt.easyxf('pattern: pattern solid;')
red_st.pattern.pattern_fore_colour = 2
yellow_st = xlwt.easyxf('pattern: pattern solid;')
yellow_st.pattern.pattern_fore_colour = 5
# if stanzas to catch the status code from the request
# and then input the appropriate information in the workbook
# this then writes the changes to the doc
if status == 200:
wr_ws.write(i, 1, 'Success (200)', green_st)
if status == 400:
print("Error 400 - Bad Request - ABORT!")
print("Probably have a bad URL or payload")
wr_ws.write(i, 1, 'Bad Request (400)', red_st)
pass
if status == 401:
print("Error 401 - Unauthorized - ABORT!")
print("Probably have incorrect credentials")
wr_ws.write(i, 1, 'Unauthorized (401)', red_st)
pass
if status == 403:
print("Error 403 - Forbidden - ABORT!")
print("Server refuses to handle your request")
wr_ws.write(i, 1, 'Forbidden (403)', red_st)
pass
if status == 404:
print("Error 404 - Not Found - ABORT!")
print("Seems like you're trying to POST to a page that doesn't"
" exist.")
wr_ws.write(i, 1, 'Not Found (400)', red_st)
pass
if status == 666:
print("Error - Something failed!")
print("The POST failed, see stdout for the exception.")
wr_ws.write(i, 1, 'Unkown Failure', yellow_st)
pass
if status == 667:
print("Error - Invalid Input!")
print("Invalid integer or other input.")
wr_ws.write(i, 1, 'Unkown Failure', yellow_st)
pass
def pod_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('Fabric Pod Policies')
wr_ws = wr_wb.get_sheet(0)
rows = ws.nrows
func_list = findKeys(ws, rows)
podpol = acipdt.FabPodPol(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("podpol.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def access_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('Fabric Access Policies')
wr_ws = wr_wb.get_sheet(1)
rows = ws.nrows
func_list = findKeys(ws, rows)
accpol = acipdt.FabAccPol(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("accpol.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def tn_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('Tenant Configuration')
wr_ws = wr_wb.get_sheet(2)
rows = ws.nrows
func_list = findKeys(ws, rows)
tnpol = acipdt.FabTnPol(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("tnpol.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def l3_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('L3 Out')
wr_ws = wr_wb.get_sheet(3)
rows = ws.nrows
func_list = findKeys(ws, rows)
l3pol = acipdt.FabL3Pol(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("l3pol.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def vmm_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('VMM')
wr_ws = wr_wb.get_sheet(4)
rows = ws.nrows
func_list = findKeys(ws, rows)
vmm = acipdt.FabVMM(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("vmm.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def fab_admin_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('Fabric Admin')
wr_ws = wr_wb.get_sheet(5)
rows = ws.nrows
func_list = findKeys(ws, rows)
fabadmin = acipdt.FabAdminMgmt(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("fabadmin.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def mpod_policies(apic, cookies, wb, wr_wb):
ws = wb.sheet_by_name('Multipod')
wr_ws = wr_wb.get_sheet(6)
rows = ws.nrows
func_list = findKeys(ws, rows)
mpod = acipdt.Mpod(apic, cookies)
stdout_log(wr_ws.name, None)
for func in func_list:
count = countKeys(ws, rows, func)
var_dict = findVars(ws, rows, func, count)
for pos in var_dict:
row_num = var_dict[pos]['row']
del var_dict[pos]['row']
for x in list(var_dict[pos].keys()):
if var_dict[pos][x] == '':
del var_dict[pos][x]
stdout_log(wr_ws.name, row_num)
status = eval("mpod.%s(**var_dict[pos])" % func)
wb_update(wr_ws, status, row_num)
time.sleep(.025)
def take_snapshot(apic, cookies, snapshot_name):
query = acipdt.Query(apic, cookies)
query_string = 'configSnapshot'
query_payload = query.query_class(query_string)
payload_len = len(query_payload[1]['imdata'])
snap_count = 0
for x in range(0, payload_len):
try:
if (query_payload[1]['imdata'][x]['configSnapshot']['attributes']
['fileName'])[4:17] == snapshot_name:
snap_count += 1
except Exception as e:
e = e
print("It seems the APIC does not support snapshots, moving on.")
return(None)
if snap_count > 0:
print("A snapshot including 'acipdt_backup' already exists. Would you "
"like to delete this snapshot or exit?")
user_input = input("Delete 'd' or Exit 'q' [q]: ")
selection = user_input or 'q'
if selection.lower() == 'd':
del_snap_pol(apic, cookies, snapshot_name)
elif selection.lower() == 'q':
sys.exit()
snapshot = 'true'
status = 'created,modified'
snapshot_args = {}
snapshot_args['name'] = snapshot_name
snapshot_args['snapshot'] = snapshot
snapshot_args['status'] = status
cfgmgmt = acipdt.FabCfgMgmt(apic, cookies)
status = cfgmgmt.backup(**snapshot_args)
if status == 200:
print("Snapshot taken successfully, continuing.")
time.sleep(1)
snap = True
return(snap)
else:
print("Snapshot failed for some reason, do you want to continue?")
while True:
user_input = input("Continue 'y' or 'n' [n]: ")
selection = user_input or 'n'
if selection.lower() == 'y':
snap = None
return(snap)
elif selection.lower() == 'n':
del_snap_pol(apic, cookies, snapshot_name)
sys.exit()
def revert_snapshot(apic, cookies, snapshot_name):
print('Deployment completed, please verify status in workbook.')
while True:
user_input = input("Rollback to previous snapshot 'y' or 'n' [n]: ")
selection = user_input or 'n'
if selection.lower() == 'n':
return
elif selection.lower() == 'y':
query = acipdt.Query(apic, cookies)
query_string = 'configSnapshot'
query_payload = query.query_class(query_string)
payload_len = len(query_payload[1]['imdata'])
for x in range(0, payload_len):
if (query_payload[1]['imdata'][x]['configSnapshot']
['attributes']['fileName'])[4:17] == snapshot_name:
snapshot_name = (query_payload[1]['imdata'][x]
['configSnapshot']['attributes']
['fileName'])
break
cfgmgmt = acipdt.FabCfgMgmt(apic, cookies)
snapshot_args = {}
snapshot_args['name'] = snapshot_name
cfgmgmt.snapback(**snapshot_args)
return
def del_snap_pol(apic, cookies, snapshot_name):
status = 'deleted'
snapshot = 'true'
snapshot_args = {}
snapshot_args['name'] = snapshot_name
snapshot_args['snapshot'] = snapshot
snapshot_args['status'] = status
cfgmgmt = acipdt.FabCfgMgmt(apic, cookies)
status = cfgmgmt.backup(**snapshot_args)
def main():
# Disable urllib3 warnings
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Ask user for path to the ACI_DEPLOY_FILE
while True:
print('Please enter the path to {0}, note that this is '
'also where the workbook will be saved upon completion.'.format(ACI_DEPLOY_FILE))
usr_path = input('Path: ')
if os.path.exists(usr_path):
break
else:
print('Enter a valid path.')
# Static snapshot name
snapshot_name = 'acipdt_backup'
# Prompt for APIC IP if the constant is None
if APICIP is not None:
apic = APICIP
else:
while True:
apic = input('Enter the APIC IP: ')
try:
ipaddress.ip_address(apic)
break
except Exception as e:
print('Enter a valid IP address. Error received: {}'.format(e))
# Prompt for APIC Username if the constant is None
if APICUSER is not None:
user = APICUSER
else:
user = input('Enter APIC username: ')
# Prompt for APIC Password if the constant is None
if APICPASS is not None:
pword = APICPASS
else:
while True:
try:
pword = getpass.getpass(prompt='Enter APIC password: ')
break
except Exception as e:
print('Something went wrong. Error received: {}'.format(e))
# Initialize the fabric login method, passing appropriate variables
fablogin = acipdt.FabLogin(apic, user, pword)
# Run the login and load the cookies var
cookies = fablogin.login()
# Load workbook
wb = read_in(usr_path)
# Copy workbook to a RW version
wr_wb = copy(wb)
snap = take_snapshot(apic, cookies, snapshot_name)
pod_policies(apic, cookies, wb, wr_wb)
access_policies(apic, cookies, wb, wr_wb)
tn_policies(apic, cookies, wb, wr_wb)
l3_policies(apic, cookies, wb, wr_wb)
vmm_policies(apic, cookies, wb, wr_wb)
fab_admin_policies(apic, cookies, wb, wr_wb)
mpod_policies(apic, cookies, wb, wr_wb)
# Save workbook to user path
wr_wb.save('{0}/{1}'.format(usr_path, ACI_DEPLOY_FILE))
if snap is not None:
revert_snapshot(apic, cookies, snapshot_name)
del_snap_pol(apic, cookies, snapshot_name)
if __name__ == '__main__':
main()
|
python
|
from distutils.version import StrictVersion
from unittest.mock import patch
from keepassxc_pwned.keepass_wrapper import KeepassWrapper
from .common import *
# use keepassxc.cli wrapper script to test changing location of keepassxc-cli
keepass_different_binary = os.path.abspath(
os.path.join(this_dir, "keepassxc.cli"))
class OldKeepassWrapper(KeepassWrapper):
def version(self) -> StrictVersion:
return StrictVersion("2.4.9")
class VersionError(KeepassWrapper):
def version(self):
return StrictVersion("2.5.a") # raises ValueError
def test_is_strict_version():
assert isinstance(KeepassWrapper().version(), StrictVersion)
def test_subcommand_old():
assert OldKeepassWrapper().backwards_compatible_export() == "extract"
def test_subcommand_new():
assert KeepassWrapper().backwards_compatible_export() == "export"
def test_issue_parsing_version_string():
# should return "export" by default (newer syntax)
assert VersionError().backwards_compatible_export() == "export"
@patch("shutil.which", return_value=None)
def test_no_keepass_cli(mock_shutil_which, caplog):
# with default keepassxc-cli as --keepassxc-cli flag (binary)
with pytest.raises(SystemExit):
assert KeepassWrapper().verify_binary_exists()
assert ("Could not find a binary called keepassxc-cli on your $PATH."
in caplog.text)
def test_use_different_binary():
k = KeepassWrapper(keepass_different_binary)
assert k.keepassxc_cli_location == keepass_different_binary
assert k.backwards_compatible_export() == "export"
|
python
|
# -*- coding: utf-8 -*-
'''
Support for the softwareupdate command on MacOS.
'''
from __future__ import absolute_import
# Import python libs
import re
import os
# import salt libs
import salt.utils
import salt.utils.mac_utils
from salt.exceptions import CommandExecutionError, SaltInvocationError
__virtualname__ = 'softwareupdate'
def __virtual__():
'''
Only for MacOS
'''
if not salt.utils.is_darwin():
return (False, 'The softwareupdate module could not be loaded: '
'module only works on MacOS systems.')
return __virtualname__
def _get_available(recommended=False, restart=False):
'''
Utility function to get all available update packages.
Sample return date:
{ 'updatename': '1.2.3-45', ... }
'''
cmd = ['softwareupdate', '--list']
out = salt.utils.mac_utils.execute_return_result(cmd)
# rexp parses lines that look like the following:
# * Safari6.1.2MountainLion-6.1.2
# Safari (6.1.2), 51679K [recommended]
# - iCal-1.0.2
# iCal, 1.0.2, 6520K
rexp = re.compile('(?m)^ [*|-] '
r'([^ ].*)[\r\n].*\(([^\)]+)')
if salt.utils.is_true(recommended):
# rexp parses lines that look like the following:
# * Safari6.1.2MountainLion-6.1.2
# Safari (6.1.2), 51679K [recommended]
rexp = re.compile('(?m)^ [*] '
r'([^ ].*)[\r\n].*\(([^\)]+)')
keys = ['name', 'version']
_get = lambda l, k: l[keys.index(k)]
updates = rexp.findall(out)
ret = {}
for line in updates:
name = _get(line, 'name')
version_num = _get(line, 'version')
ret[name] = version_num
if not salt.utils.is_true(restart):
return ret
# rexp parses lines that look like the following:
# * Safari6.1.2MountainLion-6.1.2
# Safari (6.1.2), 51679K [recommended] [restart]
rexp1 = re.compile('(?m)^ [*|-] '
r'([^ ].*)[\r\n].*restart*')
restart_updates = rexp1.findall(out)
ret_restart = {}
for update in ret:
if update in restart_updates:
ret_restart[update] = ret[update]
return ret_restart
def list_available(recommended=False, restart=False):
'''
List all available updates.
:param bool recommended: Show only recommended updates.
:param bool restart: Show only updates that require a restart.
:return: Returns a dictionary containing the updates
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.list_available
'''
return _get_available(recommended, restart)
def ignore(name):
'''
Ignore a specific program update. When an update is ignored the '-' and
version number at the end will be omitted, so "SecUpd2014-001-1.0" becomes
"SecUpd2014-001". It will be removed automatically if present. An update
is successfully ignored when it no longer shows up after list_updates.
:param name: The name of the update to add to the ignore list.
:ptype: str
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.ignore <update-name>
'''
# remove everything after and including the '-' in the updates name.
to_ignore = name.rsplit('-', 1)[0]
cmd = ['softwareupdate', '--ignore', to_ignore]
salt.utils.mac_utils.execute_return_success(cmd)
return to_ignore in list_ignored()
def list_ignored():
'''
List all updates that have been ignored. Ignored updates are shown
without the '-' and version number at the end, this is how the
softwareupdate command works.
:return: The list of ignored updates
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.list_ignored
'''
cmd = ['softwareupdate', '--list', '--ignore']
out = salt.utils.mac_utils.execute_return_result(cmd)
# rep parses lines that look like the following:
# "Safari6.1.2MountainLion-6.1.2",
# or:
# Safari6.1.2MountainLion-6.1.2
rexp = re.compile('(?m)^ ["]?'
r'([^,|\s].*[^"|\n|,])[,|"]?')
return rexp.findall(out)
def reset_ignored():
'''
Make sure the ignored updates are not ignored anymore,
returns a list of the updates that are no longer ignored.
:return: True if the list was reset, Otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.reset_ignored
'''
cmd = ['softwareupdate', '--reset-ignored']
salt.utils.mac_utils.execute_return_success(cmd)
return list_ignored() == []
def schedule_enabled():
'''
Check the status of automatic update scheduling.
:return: True if scheduling is enabled, False if disabled
- ``True``: Automatic checking is on,
- ``False``: Automatic checking is off,
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.schedule_enabled
'''
cmd = ['softwareupdate', '--schedule']
ret = salt.utils.mac_utils.execute_return_result(cmd)
enabled = ret.split()[-1]
return salt.utils.mac_utils.validate_enabled(enabled) == 'on'
def schedule_enable(enable):
'''
Enable/disable automatic update scheduling.
:param enable: True/On/Yes/1 to turn on automatic updates. False/No/Off/0 to
turn off automatic updates. If this value is empty, the current status will
be returned.
:type: bool str
:return: True if scheduling is enabled, False if disabled
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.schedule_enable on|off
'''
status = salt.utils.mac_utils.validate_enabled(enable)
cmd = ['softwareupdate',
'--schedule',
salt.utils.mac_utils.validate_enabled(status)]
salt.utils.mac_utils.execute_return_success(cmd)
return salt.utils.mac_utils.validate_enabled(schedule_enabled()) == status
def update_all(recommended=False, restart=True):
'''
Install all available updates. Returns a dictionary containing the name
of the update and the status of its installation.
:param bool recommended: If set to True, only install the recommended
updates. If set to False (default) all updates are installed.
:param bool restart: Set this to False if you do not want to install updates
that require a restart. Default is True
:return: A dictionary containing the updates that were installed and the
status of its installation. If no updates were installed an empty dictionary
is returned.
:rtype: dict
- ``True``: The update was installed.
- ``False``: The update was not installed.
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.update_all
'''
to_update = _get_available(recommended, restart)
if not to_update:
return {}
for _update in to_update:
cmd = ['softwareupdate', '--install', _update]
salt.utils.mac_utils.execute_return_success(cmd)
ret = {}
updates_left = _get_available()
for _update in to_update:
ret[_update] = True if _update not in updates_left else False
return ret
def update(name):
'''
Install a named update.
:param str name: The name of the of the update to install.
:return: True if successfully updated, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.update <update-name>
'''
if not update_available(name):
raise SaltInvocationError('Update not available: {0}'.format(name))
cmd = ['softwareupdate', '--install', name]
salt.utils.mac_utils.execute_return_success(cmd)
return not update_available(name)
def update_available(name):
'''
Check whether or not an update is available with a given name.
:param str name: The name of the update to look for
:return: True if available, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.update_available <update-name>
salt '*' softwareupdate.update_available "<update with whitespace>"
'''
return name in _get_available()
def list_downloads():
'''
Return a list of all updates that have been downloaded locally.
:return: A list of updates that have been downloaded
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.list_downloads
'''
outfiles = []
for root, subFolder, files in os.walk('/Library/Updates'):
for f in files:
outfiles.append(os.path.join(root, f))
dist_files = []
for f in outfiles:
if f.endswith('.dist'):
dist_files.append(f)
ret = []
for update in _get_available():
for f in dist_files:
with salt.utils.fopen(f) as fhr:
if update.rsplit('-', 1)[0] in fhr.read():
ret.append(update)
return ret
def download(name):
'''
Download a named update so that it can be installed later with the
``update`` or ``update_all`` functions
:param str name: The update to download.
:return: True if successful, otherwise False
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.download <update name>
'''
if not update_available(name):
raise SaltInvocationError('Update not available: {0}'.format(name))
if name in list_downloads():
return True
cmd = ['softwareupdate', '--download', name]
salt.utils.mac_utils.execute_return_success(cmd)
return name in list_downloads()
def download_all(recommended=False, restart=True):
'''
Download all available updates so that they can be installed later with the
``update`` or ``update_all`` functions. It returns a list of updates that
are now downloaded.
:param bool recommended: If set to True, only install the recommended
updates. If set to False (default) all updates are installed.
:param bool restart: Set this to False if you do not want to install updates
that require a restart. Default is True
:return: A list containing all downloaded updates on the system.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' softwareupdate.download_all
'''
to_download = _get_available(recommended, restart)
for name in to_download:
download(name)
return list_downloads()
def get_catalog():
'''
.. versionadded:: 2016.3.0
Get the current catalog being used for update lookups. Will return a url if
a custom catalog has been specified. Otherwise the word 'Default' will be
returned
:return: The catalog being used for update lookups
:rtype: str
CLI Example:
.. code-block:: bash
salt '*' softwareupdates.get_catalog
'''
cmd = ['defaults',
'read',
'/Library/Preferences/com.apple.SoftwareUpdate.plist']
out = salt.utils.mac_utils.execute_return_result(cmd)
if 'AppleCatalogURL' in out:
cmd.append('AppleCatalogURL')
out = salt.utils.mac_utils.execute_return_result(cmd)
return out
elif 'CatalogURL' in out:
cmd.append('CatalogURL')
out = salt.utils.mac_utils.execute_return_result(cmd)
return out
else:
return 'Default'
def set_catalog(url):
'''
.. versionadded:: 2016.3.0
Set the Software Update Catalog to the URL specified
:param str url: The url to the update catalog
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdates.set_catalog http://swupd.local:8888/index.sucatalog
'''
# This command always returns an error code, though it completes
# successfully. Success will be determined by making sure get_catalog
# returns the passed url
cmd = ['softwareupdate', '--set-catalog', url]
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
pass
return get_catalog() == url
def reset_catalog():
'''
.. versionadded:: 2016.3.0
Reset the Software Update Catalog to the default.
:return: True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' softwareupdates.reset_catalog
'''
# This command always returns an error code, though it completes
# successfully. Success will be determined by making sure get_catalog
# returns 'Default'
cmd = ['softwareupdate', '--clear-catalog']
try:
salt.utils.mac_utils.execute_return_success(cmd)
except CommandExecutionError as exc:
pass
return get_catalog() == 'Default'
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 13:01:46 2020
@author: dnb3k
"""
#use the unique Full Tokens code to get teh dataframe
import ossPyFuncs
import pandas as pd
import wordcloud
import re
import matplotlib.pyplot as plt
import os
import numpy as np
#perform sql query to get company column
postgreSql_selectQuery="SELECT company FROM gh.ctrs_raw ;"
inputRaw=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)
#obtain the eralse list
currentDir=os.path.dirname('ossPyFuncs.py')
eraseList=pd.read_csv(os.path.join(currentDir,'keyFiles/eraseStrings_v6.csv'),quotechar="'")
#apply the erase list
semiCleanedOutput=pd.DataFrame(ossPyFuncs.eraseFromColumn(inputRaw['company'],eraseList))
#get the counts for the unique values
tableUniqueFullNameCounts=semiCleanedOutput.iloc[:,0].value_counts()
#convert that output to a proper table
tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
#rename the columns
tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True)
selfEmployedKeys=re.compile('(?i)self|Me, myself and I|personal|^home$|private|individual|myself|^me$|\\bindependent\\b|independent contractor|consultant|freelancer|freelance|self-employed| my ')
dataTest2=tableUniqueFullNameCounts[tableUniqueFullNameCounts['company'].str.contains(selfEmployedKeys)]
dataTest2=tableUniqueFullNameCounts[tableUniqueFullNameCounts['company'].str.contains('(?i)S\.R\.L\.')]
freelanceSum=np.sum(dataTest2['count'])
allSum=np.sum(tableUniqueFullNameCounts['count'])
def addBooleanColumnFromCriteria(inputDataToAssess,assessItems,newColumnName):
"""iteratively determine if input column contains member of other column
Keyword arguments:
inputDataToAssess -- a column from a pandas dataframe, this will be the set of
target words/entries that deletions will be made from
assessItems -- a seriers or dataframe containing strings
(regex expressions) which will be searched for (as substrings)
in the inputDataToAssess. This will be done in an iterative fashion, and
a bolean vector will be created and appended to the output, indicating
which entries in inputDataToAssess contained a substring from assessItems.
newColumnName -- name of the new column (i.e. 'government', 'academic', etc.)
"""
import pandas as pd
import re
inputDataToAssess[newColumnName]=False
#necessary, due to escape nonsense
inputColumn=inputColumn.replace(regex=True, to_replace='\\\\',value='/')
for index, row in newColumnName.iterrows():
curReplaceVal=row[0]
currentRegexExpression=re.compile(curReplaceVal)
CurrentBoolVec=inputColumn.str.contains(currentRegexExpression,na=False)
inputDataToAssess[newColumnName].loc[CurrentBoolVec]=True
return inputDataToAssess;
|
python
|
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
from math import radians
import numpy as np
from passpredict import _rotations
class Rotations:
"""
Example from Vallado, Eg 11-6, p. 912
"""
def setup(self, *args):
self.lat = radians(42.38)
self.lon = radians(-71.13)
self.location_ecef = np.array([1526.122, -4465.064, 4276.894])
self.satellite_ecef = np.array([885.7296, -4389.3856, 5070.1765])
def time_ecef_to_razel(self):
_rotations.razel(self.lat, self.lon, self.location_ecef, self.satellite_ecef)
def time_elevation_at(self):
_rotations.elevation_at(self.lat, self.lon, self.location_ecef, self.satellite_ecef)
def time_range_at(self):
_rotations.range_at(self.lat, self.lon, self.location_ecef, self.satellite_ecef)
def time_ecef_to_llh(self):
_rotations.ecef_to_llh(self.satellite_ecef)
class SolarRotations:
"""
Example from Vallado, Eg.5-1, p.280, April 2, 2006, 00:00 UTC
"""
def setup(self, *args):
self.jd = 2453827.5
self.rmod = np.array([146186212.0, 28788976.0, 12481064.0])
self.rpef = np.empty(3, dtype=np.double)
def time_mod2ecef(self):
_rotations.mod2ecef(self.jd, self.rmod, self.rpef)
|
python
|
"""
Main file!
"""
import argparse
import logging
import pickle
import string
import sys
from typing import Optional, TextIO, BinaryIO
import ujson
from vk_dumper import utils, vk
log = logging.getLogger('vk-dumper')
def main() -> None:
log.info('Starting vk-dumper!')
args = parse_args()
# Output variants:
json_file: Optional[TextIO] = args.json_file[0] if args.json_file else None
pickle_file: Optional[BinaryIO] = args.pickle_file[0] if args.pickle_file else None
text_file: Optional[TextIO] = args.text_file[0] if args.text_file else None
if not (json_file or pickle_file or text_file):
log.critical('No dump save method selected, use -j/-p/-x parameters')
sys.exit(1)
if args.verbose:
utils.init_logging(debug=True)
log.debug('Logger reinitialized, debug logs enabled')
result = vk.dump_messages(
args.login, args.password,
args.message_count[0] if args.message_count else None,
args.vk_ids[0] if args.vk_ids else None
)
log.info('Got %d messages', len(result))
if json_file:
log.info('Saving results into JSON file "%s"...', json_file.name)
ujson.dump(result, json_file, ensure_ascii=False, escape_forward_slashes=False)
log.info('...done')
if pickle_file:
log.info('Saving results into Pickle file "%s"...', pickle_file.name)
pickle.dump(
result, pickle_file,
protocol=pickle.HIGHEST_PROTOCOL, fix_imports=False
)
log.info('...done')
if text_file:
log.info('Saving results into plain text file (with sanitizing) "%s"...', text_file.name)
sanitized = ''
for entry in result:
words = entry['text'] \
.lower() \
.split()
bad_chars = string.punctuation + string.ascii_lowercase + string.digits
translator = str.maketrans(bad_chars, ' ' * len(bad_chars))
stripped = [word.translate(translator) for word in words]
sanitized += ' '.join(stripped) + ' '
if args.markovify:
sanitized += '\n'
if args.markovify:
text_file.write(sanitized)
else:
text_file.write(
' '.join(sanitized.strip().split())
)
log.info('...done')
def parse_args() -> argparse.Namespace:
arg_parser = argparse.ArgumentParser(
prog='python -m vk_dumper',
description='Utility for dumping VK messages.',
epilog='desu~'
)
arg_parser.add_argument(
'--verbose', '-v',
action='store_true',
help='show additional debug logs',
dest='verbose'
)
arg_parser.add_argument(
'--message-count', '-c',
action='store',
nargs=1,
type=int,
help='maximum messages count to extract',
metavar='count',
dest='message_count'
)
arg_parser.add_argument(
'--vk-id', '-d',
action='append',
nargs=1,
type=str,
help='select usernames to extract, without @',
metavar='username',
dest='vk_ids'
)
arg_parser.add_argument(
'--out-json', '-j',
action='store',
nargs=1,
type=argparse.FileType(
'xt', encoding='utf-8', errors='surrogateescape'
),
help='choose a path to dump results as JSON',
metavar='/path/to/dump.json',
dest='json_file'
)
arg_parser.add_argument(
'--out-pickle', '-p',
action='store',
nargs=1,
type=argparse.FileType('xb'),
help='choose a path to dump results as Pickle',
metavar='/path/to/dump.pkl',
dest='pickle_file'
)
arg_parser.add_argument(
'--out-txt', '-x',
action='store',
nargs=1,
type=argparse.FileType(
'xt', encoding='utf-8', errors='surrogateescape'
),
help='choose a path to dump results as plain text',
metavar='/path/to/dump.txt',
dest='text_file'
)
arg_parser.add_argument(
'--markov', '-m',
action='store_true',
help='store plaintext with line break after each message for markovify',
dest='markovify'
)
arg_parser.add_argument(
'login',
action='store',
type=str,
help='VK.com phone number/email',
metavar='login',
)
arg_parser.add_argument(
'password',
action='store',
type=str,
help='VK.com password',
metavar='password',
)
return arg_parser.parse_args()
if __name__ == '__main__':
utils.init_logging(debug=False)
main()
|
python
|
from .iostream import cprint, cin, cout, cerr, endl
from .cmath import *
from . import cmath, iostream
|
python
|
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from tracpro.test import factories
from tracpro.test.cases import TracProTest
from .. import charts
from .. import models
class PollChartTest(TracProTest):
def setUp(self):
super(PollChartTest, self).setUp()
self.org = factories.Org()
self.poll = factories.Poll(org=self.org)
self.region1 = factories.Region(org=self.org, name="Beta")
self.region2 = factories.Region(org=self.org, name="Acme")
self.question1 = factories.Question(
poll=self.poll, question_type=models.Question.TYPE_MULTIPLE_CHOICE)
self.question2 = factories.Question(
poll=self.poll, question_type=models.Question.TYPE_OPEN)
self.question3 = factories.Question(
poll=self.poll, question_type=models.Question.TYPE_NUMERIC)
self.pollrun = factories.UniversalPollRun(poll=self.poll)
self.contact1 = factories.Contact(org=self.org, region=self.region1)
self.response1 = factories.Response(
contact=self.contact1, pollrun=self.pollrun,
status=models.Response.STATUS_COMPLETE)
factories.Answer(
response=self.response1, question=self.question1,
value="4.00000", category="1 - 5")
factories.Answer(
response=self.response1, question=self.question2,
value="It's very rainy", category="All Responses")
factories.Answer(
response=self.response1, question=self.question3,
value="4.00000", category="1 - 5")
self.contact2 = factories.Contact(org=self.org, region=self.region1)
self.response2 = factories.Response(
contact=self.contact2, pollrun=self.pollrun,
status=models.Response.STATUS_COMPLETE)
factories.Answer(
response=self.response2, question=self.question1,
value="3.00000", category="1 - 5")
factories.Answer(
response=self.response2, question=self.question2,
value="rainy and rainy", category="All Responses")
factories.Answer(
response=self.response2, question=self.question3,
value="3.00000", category="1 - 5")
self.contact3 = factories.Contact(org=self.org, region=self.region2)
self.response3 = factories.Response(
contact=self.contact3, pollrun=self.pollrun,
status=models.Response.STATUS_COMPLETE)
factories.Answer(
response=self.response3, question=self.question1,
value="8.00000", category="6 - 10")
factories.Answer(
response=self.response3, question=self.question2,
value="Sunny sunny", category="All Responses")
factories.Answer(
response=self.response3, question=self.question3,
value="8.00000", category="6 - 10")
self.pollruns = models.PollRun.objects.filter(pk=self.pollrun.pk)
self.responses = models.Response.objects.filter(pollrun=self.pollrun)
def test_multiple_pollruns_multiple_choice(self):
answers = models.Answer.objects.filter(question=self.question1)
data, summary_table = charts.multiple_pollruns_multiple_choice(
self.pollruns, answers, self.responses, contact_filters={})
self.assertEqual(
data['dates'],
[self.pollrun.conducted_on.strftime('%Y-%m-%d')])
self.assertEqual(data['series'], [
{'name': '1 - 5',
'data': [{'y': 2, 'url': reverse('polls.pollrun_read', args=[self.pollrun.pk])}]},
{'name': '6 - 10',
'data': [{'y': 1, 'url': reverse('polls.pollrun_read', args=[self.pollrun.pk])}]},
])
def test_word_cloud_data(self):
answers = models.Answer.objects.filter(question=self.question2)
data = charts.word_cloud_data(answers)
self.assertEqual(data, [
{"text": "rainy", "weight": 3},
{"text": "sunny", "weight": 2},
])
def test_multiple_pollruns_numeric(self):
chart_type, data, summary_table = charts.multiple_pollruns(
self.pollruns, self.responses, self.question3, split_regions=False,
contact_filters={})
summary_data = dict(summary_table)
self.assertEqual(chart_type, "numeric")
self.assertEqual(data['pollrun-urls'], [
reverse('polls.pollrun_read', args=[self.pollrun.pk]),
])
self.assertEqual(data['participation-urls'], [
reverse('polls.pollrun_participation', args=[self.pollrun.pk]),
])
# Answers are 4, 3 and 8 for a single date
# Single item for single date: sum = 4 + 3 + 8 = 15
# URL points to pollrun detail page for this date
self.assertEqual(data['sum'], [{
'name': self.question3.name,
'data': [15.0],
}])
# Single item for single date: average = (4 + 3 + 8)/3 = 5
# URL points to pollrun detail page for this date
self.assertEqual(data['average'], [{
'name': self.question3.name,
'data': [5.0],
}])
# Set all responses to complete in setUp()
# Response rate = 100%
# URL points to participation tab
self.assertEqual(data['response-rate'], [{
'name': self.question3.name,
'data': [100.0],
}])
# Today's date
self.assertEqual(
data['dates'],
[self.pollrun.conducted_on.strftime('%Y-%m-%d')])
# Mean, Standard Dev, response rate avg, pollrun list
self.assertEqual(summary_data['Mean'], 5.0)
self.assertEqual(summary_data['Standard deviation'], 0.0)
self.assertEqual(summary_data['Response rate average (%)'], 100.0)
# Remove an answer, thus changing the response rate.
self.response1.answers.get(question=self.question3).delete()
chart_type, data, summary_table = charts.multiple_pollruns(
self.pollruns, self.responses, self.question3, split_regions=False,
contact_filters={})
summary_data = dict(summary_table)
self.assertEqual(chart_type, "numeric")
# 2 answers of 3 expected - response rate should be 66.7%
self.assertEqual(data['response-rate'], [{
'name': self.question3.name,
'data': [66.7],
}])
self.assertEqual(summary_data['Response rate average (%)'], 66.7)
def test_multiple_pollruns_numeric_split(self):
chart_type, data, summary_table = charts.multiple_pollruns(
self.pollruns, self.responses, self.question3, split_regions=True,
contact_filters={})
summary_data = dict(summary_table)
self.assertEqual(chart_type, "numeric")
self.assertEqual(data['pollrun-urls'], [
reverse('polls.pollrun_read', args=[self.pollrun.pk]),
])
self.assertEqual(data['participation-urls'], [
reverse('polls.pollrun_participation', args=[self.pollrun.pk]),
])
self.assertEqual(data['sum'], [
{
'name': "Acme",
'data': [8.0],
},
{
'name': "Beta",
'data': [7.0],
},
])
# Single item for single date: average = (4 + 3 + 8)/3 = 5
# URL points to pollrun detail page for this date
self.assertEqual(data['average'], [
{
'name': "Acme",
'data': [8.0],
},
{
'name': "Beta",
'data': [3.5],
},
])
# Set all responses to complete in setUp()
# Response rate = 100%
# URL points to participation tab
self.assertEqual(data['response-rate'], [
{
'name': "Acme",
'data': [100.0],
},
{
'name': "Beta",
'data': [100.0],
},
])
# Today's date
self.assertEqual(
data['dates'],
[self.pollrun.conducted_on.strftime('%Y-%m-%d')])
self.assertEqual(summary_data['Mean'], 5.0)
self.assertEqual(summary_data['Standard deviation'], 0.0)
self.assertEqual(summary_data['Response rate average (%)'], 100.0)
def test_single_pollrun_multiple_choice(self):
answers = models.Answer.objects.filter(question=self.question1)
data = charts.single_pollrun_multiple_choice(answers, self.pollrun)
self.assertEqual(
data['data'],
[2, 1])
self.assertEqual(
data['categories'],
['1 - 5', '6 - 10'])
def test_single_pollrun_open(self):
chart_type, chart_data, summary_table = charts.single_pollrun(
self.pollrun, self.responses, self.question2)
self.assertEqual(chart_type, 'open-ended')
self.assertEqual(chart_data[0], {'text': 'rainy', 'weight': 3})
self.assertEqual(len(chart_data), 2)
self.assertEqual(summary_table, None)
def test_single_pollrun_numeric(self):
# Make answers numeric
self.question3.question_type = models.Question.TYPE_NUMERIC
self.question3.save()
# Answers for question 3 = 8, 3 and 4
# Average = 5, Response Rate = 100%, STDEV = 2.2
chart_type, chart_data, summary_table = charts.single_pollrun(
self.pollrun, self.responses, self.question3)
summary_data = dict(summary_table)
self.assertEqual(chart_type, 'bar')
self.assertEqual(summary_data['Mean'], 5)
self.assertEqual(summary_data['Response rate average (%)'], 100)
self.assertEqual(summary_data['Standard deviation'], 2.2)
# Results are autocategorized
self.assertEqual([2, 1], chart_data['data'])
self.assertEqual(2, len(chart_data['categories']))
|
python
|
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Function to compute the square root and its inverse for positive definite
matrix. This file covers the distributed case. This function is the interface,
with the work functions defined in `distla_core.linalg.invsqrt_utils`.
"""
from jax import lax
from distla_core.linalg.backends import distributed_backend
from distla_core.linalg.invsqrt import invsqrt_utils
def invsqrt(A,
eps=None,
maxiter=200,
s_min=None,
s_thresh=0.1,
p_sz=128,
precision=lax.Precision.HIGHEST):
"""
Computes the square root and inverse square root of the positive definite
matrix `A`.
The method is an iterative one. As explained in Higobj_fn's "Stable iterations
for the matrix square root", 1997, the matrix sign function of the block
matrix `[[0, A], [I, 0]]` is `[[0, sqrt(A)], [inv(sqrt(A)), 0]]`, and hence
the same Newton-Schultz iteration that is used for computing the matrix sign
function (see `polar.py`) can be applied to simultaneously compute `sqrt(A)`,
`inv(sqrt(A))`.
The iteration proceeds in two stages. First we repeatedly apply the so called
"rogue" polynomial
```
Y_{k+1} = a_m * Y_k - 4 * (a_m/3)**3 * Y_k @ Z_k @ Y_k
Z_{k+1} = a_m * Z_k - 4 * (a_m/3)**3 * Z_k @ Y_k @ Z_k
```
where `a_m = (3 / 2) * sqrt(3) - s_thresh`, and `Y_0 = A` and `Z_0 = I`, to
bring the eigenvalues of `[[0, Y], [Z, 0]]` to within the range `[s_thresh,
1]`. Then we switch to the Newton-Schultz iteration
```
Y_{k+1} = (3 / 2) * Y_k - (1 / 2) * Y_k @ Z_k @ Y_k
Z_{k+1} = (3 / 2) * Z_k - (1 / 2) * Z_k @ Y_k @ Z_k
```
until convergence.
Args:
`A`: The input matrix. Assumed to be positive definite.
`eps`: The final result will satisfy `|I - Y @ Z| <= eps * |I|`, where `Y`
and `Z` are the returned approximations to `sqrt(A)` and
`inv(sqrt(A))` respectively. Machine epsilon by default.
`maxiter`: Iterations will terminate after this many steps even if the
above is unsatisfied. 200 by default.
`s_min`: An under estimate of the smallest eigenvalue value of
`[[0, A], [I, 0]]`. Machine epsilon by default.
`s_thresh`: The iteration switches from the `rogue` polynomial to the
Newton-Schultz iterations after `s_min` is estimated to have
reached this value. 0.1 by default.
`p_sz`: Panel size for the SUMMA matmuls. 128 by default.
`precision`: Precision of the matrix multiplications.
Returns:
`Y`: approximation to `sqrt(A)`.
`Z`: approximation to `inv(sqrt(A))`.
`jr`: The number of 'rogue' iterations.
`jt`: The total number of iterations.
"""
# TODO The above description for `s_min` isn't very helpful. How do we
# understand the connection between eigenvalues of the block matrix, and
# eigenvalues of A?
backend = distributed_backend.DistributedBackend(p_sz, precision=precision)
return invsqrt_utils._invsqrt(A, eps, maxiter, s_min, s_thresh, backend)
|
python
|
"""
Train a DeeProtein-model.
"""
import argparse
import json
from DeeProtein import DeeProtein
import helpers
import os
def main():
with open(FLAGS.config_json) as config_fobj:
config_dict = json.load(config_fobj)
# set the gpu context
if not FLAGS.gpu:
if config_dict["gpu"] == 'True':
config_dict["gpu"] = "False"
optionhandler = helpers.OptionHandler(config_dict)
model = DeeProtein(optionhandler)
model.train(restore_whole=FLAGS.restore_whole, binary=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--config_json',
type=str,
required=True,
help='Path to the config.JSON')
parser.add_argument(
'--restore_whole',
type=str,
default=True,
help='Wheter to restore the whole model including the outlayer '
'(optional). Defaults to True.')
parser.add_argument(
'--gpu',
type=str,
default=True,
help='Wheter to train in gpu context or not '
'(optional). Defaults to True.')
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print('Error, unrecognized flags:', unparsed)
exit(-1)
main()
|
python
|
# -*- coding: utf-8 -*-
__author__ = 'Lara Olmos Camarena'
import re
import json
import re
"""
utils
"""
def preprocess_text(text_str):
regular_expr = re.compile('\n|\r|\t|\(|\)|\[|\]|:|\,|\;|"|\?|\-|\%')
text_str = re.sub(regular_expr, ' ', text_str)
token_list = text_str.split(' ')
token_list = [element for element in token_list if element]
return ' '.join(token_list)
def sublist(lst1, lst2):
if len(lst1) > 1:
res1 = set(lst1)
else:
res1 = set(list(lst1))
res2 = set(lst2)
return res1 <= res2
"""
NER
"""
NER_TYPES = ['ORGANIZATION',
'PERSON', 'TITLE', 'IDEOLOGY',
'CITY', 'COUNTRY', 'LOCATION', 'NATIONALITY', 'STATE_OR_PROVINCE',
'DATE', 'DURATION', 'TIME',
'PERCENT', 'NUMBER', 'ORDINAL', 'MONEY',
'CAUSE_OF_DEATH', 'CRIMINAL_CHARGE', 'RELIGION']
def load_data(string_element):
data = {}
if string_element == '{}' or string_element == '[]':
return data
try:
raw_data = str(string_element).replace("',", '",').replace("['", '["').replace("']", '"]').replace("':", '":').replace("{'", '{"').replace(", '", ', "')
data = json.loads(raw_data)
except Exception as e:
print(e)
print(raw_data)
return data
def get_ner_values(ner_dict, specific_ner):
if ner_dict and specific_ner in ner_dict.keys():
return list(ner_dict[specific_ner])
return []
def get_ner_tags(ner_dict):
if ner_dict:
return list(ner_dict.keys())
return []
def get_ner_count(ner_dict, specific_ner):
if ner_dict and specific_ner in ner_dict.keys():
return len(ner_dict[specific_ner])
return 0
def ner_type_answer(element):
if element in ['CAUSE_OF_DEATH', 'CITY', 'COUNTRY', 'CRIMINAL_CHARGE', 'DATE',
'DURATION', 'IDEOLOGY', 'LOCATION', 'MISC', 'MONEY', 'MULTI',
'NATIONALITY', 'NONE', 'NUMBER', 'ORDINAL', 'ORGANIZATION',
'PERCENT', 'PERSON', 'RELIGION', 'SET', 'STATE_OR_PROVINCE',
'TIME', 'TITLE']:
return element
return 'MISC'
"""
POS TAGGING
"""
# ALL TYPES: ['CC', 'CD', 'DT', 'EX', 'FW', 'IN', 'IN', 'JJ', 'JJR', 'JJS', 'MD', 'NN', 'NNS', 'NP', 'NPS', 'PDT', 'POS', 'PP', 'PP', 'RB', 'RBR', 'RBS', 'RP', 'SENT', 'SYM', 'TO', 'UH', 'VB', 'VBD', 'VBG', 'VBN', 'VBZ', 'VBP', 'VD', 'VDD', 'VDG', 'VDN', 'VDZ', 'VDP', 'VH', 'VHD', 'VHG', 'VHN', 'VHZ', 'VHP', 'VV', 'VVD', 'VVG', 'VVN', 'VVP', 'VVZ', 'WDT', 'WP', 'WP', 'WRB', ':', '$']
pattern_pos = r"pos=['\"]('?\w+|``)?[\?,.`\[\]]?['\"]"
reg_expr_pos = re.compile(pattern_pos)
def get_pos(text):
return [item.replace('pos=','').replace("'",'') for item in re.findall(reg_expr_pos, text)]
pattern_word = r"word=['\"]([\"'`]?(\w|\.|,|\-)+)?[\?,.\[\]]?['\"]"
reg_expr_word = re.compile(pattern_word)
def get_word_pos(text):
return [item[0].replace('word=','').replace("'",'') for item in re.findall(reg_expr_word, text)]
def get_pos_count(pos_list, specific_pos):
if pos_list and specific_pos in pos_list:
return pos_list.count(specific_pos)
return 0
def pral_pos(pos_str):
if 'NP' in pos_str:
return 'NP'
if 'JJ' in pos_str:
return 'JJ'
if 'V' in pos_str:
return 'V'
if 'R' in pos_str:
return 'R'
if 'CD' in pos_str:
return 'CD'
if 'NN' in pos_str:
return 'NN'
return ''
import treetaggerwrapper
tagger = treetaggerwrapper.TreeTagger(TAGLANG='en', TAGDIR='C:\\Users\\larao_000\\Documents\\nlp\\tree-tagger-windows-3.2.3\\TreeTagger\\')
def pos_tagging(text, max_length=1000):
results = []
for i in range(0, len(text), max_length):
partial_text = text[i:i+max_length]
tags = tagger.tag_text(partial_text)
results += treetaggerwrapper.make_tags(tags)
return results
"""
Primer sustantivo de la pregunta, o en caso de partรญcula Wh, es automรกtico.
Where -> place, Who -> person. No tiene por quรฉ aparecer en el texto explรญcito.
WDT wh-determiner which;
WP wh-pronoun who, what;
WP$ possessive wh-pronoun whose;
WRB wh-abverb where, when
"""
def wh_query(foco, common_wh=['what', 'who', 'where', 'when', 'why', 'which', 'how', 'in', 'the']):
if foco in common_wh:
return foco
return 'other'
def obtener_foco(query, query_pos):
candidate_focus = []
minor_index = []
if len(query) > 0:
if (query[0].lower() == 'who') and 'WP' in query_pos:
candidate_focus.append('person')
if 'WP$' in query_pos[0]:
candidate_focus.append('person')
if (query[0].lower() == 'where') and 'WRB' in query_pos:
candidate_focus.append('place')
if (query[0].lower() == 'when') and 'WRB' in query_pos:
candidate_focus.append('time')
if sublist(['NN'],query_pos):
minor_index.append(query_pos.index('NN'))
if sublist(['NNS'],query_pos):
minor_index.append(query_pos.index('NNS'))
if sublist(['NPS'],query_pos):
minor_index.append(query_pos.index('NPS'))
if sublist(['NP'],query_pos):
minor_index.append(query_pos.index('NP'))
if sublist(['WP'], query_pos) and not sublist(['NN'], query_pos) and not sublist(['NNS'], query_pos) and not sublist(['NP'], query_pos) and not sublist(['NPS'], query_pos):
if sublist(['VVG'], query_pos):
minor_index.append(query_pos.index('VVG'))
if sublist(['JJ'], query_pos):
minor_index.append(query_pos.index('JJ'))
if sublist(['VVN'], query_pos):
minor_index.append(query_pos.index('VVN'))
if sublist(['VVD'], query_pos):
minor_index.append(query_pos.index('VVD'))
if sublist(['RB'], query_pos):
minor_index.append(query_pos.index('RB'))
if len(minor_index) > 0 and min(minor_index) < len(query) and min(minor_index) >= 0:
candidate_focus.append(query[min(minor_index)])
if ('how much' in ' '.join(query).lower()) or ('how many' in ' '.join(query).lower()):
candidate_focus.append('quantity')
if candidate_focus:
return candidate_focus[0]
else:
return ''
def transform_foco(foco, common_focos=['type', 'kind', 'percentage', 'term', 'group',
'language', 'part', 'date', 'word', 'example', 'period', 'event', 'product',
'title', 'ideology', 'religion', 'money', 'percentage']):
# who, what
if foco in ['person','name','people', 'names','nationalities', 'nationality']:
return 'person'
if foco in ['organization','company','companies','organizations']:
return 'organization'
# where
if foco in ['place', 'country', 'city', 'state', 'province', 'location', 'area', 'region',
'areas','locations','states', 'cities', 'countries']:
return 'location'
# when
if foco in ['time', 'duration', 'age', 'year', 'month', 'day', 'week', 'hour', 'decade', 'century',
'days', 'years', 'hours', 'ages', 'weeks', 'decades', 'months', 'centuries']:
return 'time'
if foco in ['number','numbers','quantity']:
return 'number'
if foco in common_focos:
return foco
if foco[-1]=='s' and len(foco) > 2 and foco in common_focos:
return foco[:-1]
if foco == 'nan' or foco == '':
return 'other'
# NN, what, ...
foco_pos = pos_tagging(str(foco))
if foco_pos[0].pos == 'NN' or foco_pos[0].pos == 'NNS':
return 'NN'
if foco_pos[0].pos == 'NP' or foco_pos[0].pos == 'NPS':
return 'NP'
return 'other'
def validate_foco_ner(foco, ner_query, answer):
result = 'KO'
foco_pos = get_pos(str(pos_tagging(foco)))
if foco_pos:
foco_pos = foco_pos[0]
if not isinstance(ner_query, list):
ner_query = str(ner_query).replace('[','').replace(']','').replace("'", '').split(', ')
if ner_query == '[]':
result = 'NA'
elif ner_query == []:
result = 'NA'
elif not foco or foco == 'NaN':
result = 'NA'
elif str(answer)!='' and str(answer)!='NaN' and str(answer)!='[NaN]':
if (foco.lower() in ['person','name','people', 'names','nationalities', 'nationality'] or foco_pos in ['NP','NPS']) and sublist(ner_query,['PERSON', 'ORGANIZATION', 'TITLE', 'NATIONALITY']):
result = 'OK-PERSON-ORG'
if (foco.lower() in ['place', 'country', 'city', 'state', 'province', 'location', 'area', 'region',
'areas','locations','states', 'cities', 'countries'] or foco_pos in ['NP','NPS']) and sublist(ner_query,['CITY', 'COUNTRY', 'LOCATION', 'STATE_OR_PROVINCE']):
result = 'OK-LOC'
if (foco.lower() in ['time', 'duration', 'age', 'year', 'month', 'day', 'week', 'hour', 'decade', 'century',
'days', 'years', 'hours', 'ages', 'weeks', 'decades', 'months', 'centuries']) and sublist(ner_query,['DATE', 'TIME', 'DURATION', 'NUMBER']):
result = 'OK-TIME'
if (foco.lower() in ['titles','title','role','roles']) and sublist(ner_query,['TITLE']):
result = 'OK-TITLE'
if (foco.lower() in ['percentage']) and sublist(ner_query,['PERCENT']):
result = 'OK-PERCENT'
if (foco.lower() in ['number','numbers','quantity', 'money', 'age', 'percentage'] or foco_pos in ['CD','LS', 'NNS']) and sublist(ner_query,['NUMBER', 'PERCENT', 'MONEY', 'ORDINAL', 'CARDINAL']):
result = 'OK-NUMBER'
if foco and sublist([foco.upper()], ner_query):
result = 'OK-' + foco.upper()
elif foco and foco[-1]=='s' and len(foco) > 2 and sublist([foco[:-1].upper()], ner_query):
result = 'OK-' + foco[:-1].upper()
else:
result='NA'
return result
"""
QA
"""
def load_answer_data(string_element):
data = {}
if string_element == '{}' or string_element == '[]':
return data
try:
raw_data = str(string_element).replace("',", '",').replace("['", '["').replace("']", '"]').replace("':", '":').replace("{'", '{"').replace(", '", ', "').replace(": '", ': "').replace("'}", '"}').replace(': \\"', ': "').replace('\\"}', '"}')
raw_data = raw_data.replace('\\""','"').replace("\\'","'").replace('""', '"\"').replace('""','"')
answer_data = re.search(r'"answer": ".*"}', raw_data).group(0).replace('"answer": ', '').replace('"}', '').replace('"', '').replace("'", '').replace("\\",'')
raw_data = raw_data[:raw_data.index('answer": ')+len('answer": ')] + '"'+ answer_data + '"}'
data = json.loads(raw_data)
except Exception as e:
print(e)
print(raw_data)
return data
def correct(answer, model_answer, plausible):
answer = str(answer).replace("'", '').replace('"', '').replace(',','')
model_answer = str(model_answer).replace("'", '').replace('"', '').replace(',','')
plausible = str(plausible).replace("'", '').replace('"', '').replace(',','').replace('.','')
if answer and model_answer:
if answer == model_answer:
return True
if str(answer).lower().replace('the ', '') == str(model_answer).lower().replace('the ', ''):
return True
if str(answer).lower() in str(model_answer).lower() or str(model_answer).lower() in str(answer).lower():
return True
elif plausible and model_answer:
if plausible == model_answer:
return True
if str(plausible).lower().replace('the ', '') == model_answer.lower().replace('the ', ''):
return True
if str(plausible).lower() in str(model_answer).lower() or str(model_answer).lower() in str(plausible).lower():
return True
elif answer == '' and model_answer == '':
return True
return False
def correct_medium(answer, model_answer, plausible):
answer = answer.replace("'", '').replace('"', '').replace(',','')
model_answer = model_answer.replace("'", '').replace('"', '').replace(',','')
plausible = plausible.replace("'", '').replace('"', '').replace(',','')
if answer and model_answer:
if answer == model_answer:
return 'FACIL'
if str(answer).lower().replace('the ', '') == str(model_answer).lower().replace('the ', ''):
return 'FACIL'
if str(answer).lower() in str(model_answer).lower() or str(model_answer).lower() in str(answer).lower():
return 'MEDIA'
elif plausible and model_answer:
if plausible == model_answer:
return 'FACIL'
if str(plausible).lower().replace('the ', '') == model_answer.lower().replace('the ', ''):
return 'FACIL'
if str(plausible).lower() in str(model_answer).lower() or str(model_answer).lower() in str(plausible).lower():
return 'MEDIA'
return 'DIFICIL'
|
python
|
import importlib
from time import clock
for i in range(2):
module = input("Enter module to import")
importlib.import_module(module)
start = clock()
print(iterativefact(27))
end = clock()
elapsed = end - start
print(elapsed)
start = clock()
print(recursivefactorial(27))
end = clock()
elapsed = end - start
print(elapsed)
|
python
|
# -*- encoding: utf-8 -*-
"""
Boolean type comparator used to match Boolean
Comparators are used by Audit module to compare module output
with the expected result
In FDG-connector, comparators might also be used with FDG
Boolean comparator exposes various commands:
- "match"
Use Cases:
- To check a boolean value against boolean true/false
- To check whether we got anything (boolean_typecast)
i.e. True for anything, False for None or empty string
example:
comparator:
type: boolean
match: True
boolean_cast: False # Optional param
"""
import logging
log = logging.getLogger(__name__)
def match(audit_id, result_to_compare, args):
"""
Match against a boolean
match: True
:param result_to_compare:
The value to compare.
:param args:
Comparator boolean as mentioned in the check.
"""
log.debug('Running boolean::match for check: {0}'.format(audit_id))
# if result_to_compare is not of boolean type, but we want a type-cast
boolean_cast = args.get('boolean_cast', False)
value_to_compare = result_to_compare
if boolean_cast:
value_to_compare = bool(value_to_compare)
if value_to_compare == args['match']:
return True, "Check Passed"
return False, "boolean::match failure. Expected={0} Got={1}".format(str(args['match']), result_to_compare)
|
python
|
'''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import os, sys
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)
from butterfly import Butterfly
from cnn.models.low_rank_conv import LowRankConv2d
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class Butterfly1x1Conv(Butterfly):
"""Product of log N butterfly factors, each is a block 2x2 of diagonal matrices.
"""
def forward(self, input):
"""
Parameters:
input: (batch, c, h, w) if real or (batch, c, h, w, 2) if complex
Return:
output: (batch, nstack * c, h, w) if real or (batch, nstack * c, h, w, 2) if complex
"""
batch, c, h, w = input.shape
input_reshape = input.view(batch, c, h * w).transpose(1, 2).reshape(-1, c)
output = super().forward(input_reshape)
return output.view(batch, h * w, self.nstack * c).transpose(1, 2).view(batch, self.nstack * c, h, w)
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1, structure='D'):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.conv1.weight._no_wd = True
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn1.weight._no_wd = True
self.bn1.bias._no_wd = True
if structure == 'D':
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
elif structure.startswith('LR'):
odo_nblocks = int(structure.split('_')[1])
rank = int(odo_nblocks * math.log2(out_planes) / 2)
self.conv2 = LowRankConv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False, rank=rank)
else:
param = structure.split('_')[0]
nblocks = 0 if len(structure.split('_')) <= 1 else int(structure.split('_')[1])
self.residual = False if len(structure.split('_')) <= 2 else (structure.split('_')[2] == 'res')
# self.residual = self.residual and in_planes == out_planes
self.conv2 = Butterfly1x1Conv(in_planes, out_planes, bias=False, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
self.bn2 = nn.BatchNorm2d(out_planes)
self.bn2.weight._no_wd = True
self.bn2.bias._no_wd = True
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
if not getattr(self, 'residual', False):
out = F.relu(self.bn2(self.conv2(out)), inplace=True)
else:
prev = out
out = self.conv2(out)
if out.shape[1] == 2 * prev.shape[1]:
b, c, h, w = prev.shape
out = (out.reshape(b, 2, c, h, w) + prev.reshape(b, 1, c, h, w)).reshape(b, 2 * c, h, w)
else:
out = out + prev
out = F.relu(self.bn2(out), inplace=True)
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=1000, width_mult=1.0, round_nearest=8, structure=None, softmax_structure='D', sm_pooling=1):
"""
structure: list of string
"""
super(MobileNet, self).__init__()
self.width_mult = width_mult
self.round_nearest = round_nearest
self.structure = [] if structure is None else structure
self.n_structure_layer = len(self.structure)
self.structure = ['D'] * (len(self.cfg) - self.n_structure_layer) + self.structure
self.sm_pooling = sm_pooling
input_channel = _make_divisible(32 * width_mult, round_nearest)
self.conv1 = nn.Conv2d(3, input_channel, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(input_channel)
self.bn1.weight._no_wd = True
self.bn1.bias._no_wd = True
self.layers = self._make_layers(in_planes=input_channel)
self.last_channel = _make_divisible(1024 * width_mult // sm_pooling, round_nearest)
if softmax_structure == 'D':
self.linear = nn.Linear(self.last_channel, num_classes)
else:
param = softmax_structure.split('_')[0]
nblocks = 0 if len(softmax_structure.split('_')) <= 1 else int(softmax_structure.split('_')[1])
self.linear = Butterfly(self.last_channel, num_classes, tied_weight=False, ortho_init=True, param=param, nblocks=nblocks)
def _make_layers(self, in_planes):
layers = []
for x, struct in zip(self.cfg, self.structure):
out_planes = _make_divisible((x if isinstance(x, int) else x[0]) * self.width_mult, self.round_nearest)
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride, structure=struct))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)), inplace=True)
out = self.layers(out)
out = out.mean([2, 3])
if self.sm_pooling != 1:
b, n = out.shape
out = out.reshape(b, self.sm_pooling, n // self.sm_pooling).mean(1)
out = self.linear(out)
return out
def mixed_model_state_dict(self, full_model_path, distilled_param_path):
current_state_dict_keys = self.state_dict().keys()
full_model_state_dict = torch.load(full_model_path, map_location='cpu')['state_dict']
full_model_state_dict = {name.replace('module.', ''): param for name, param in full_model_state_dict.items()}
distilled_params = torch.load(distilled_param_path, map_location='cpu')
state_dict = {name: param for name, param in full_model_state_dict.items() if name in current_state_dict_keys}
for i, struct in enumerate(self.structure):
# Only support butterfly for now
if struct.startswith('odo') or struct.startswith('regular'):
layer = f'layers.{i}.conv2'
nblocks = int(struct.split('_')[1])
structured_param = distilled_params[layer, nblocks]
state_dict.update({layer + '.' + name: param for name, param in structured_param.items()})
return state_dict
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
|
python
|
import numpy as np
import tensorflow as tf
import scipy.signal
def add_histogram(writer, tag, values, step, bins=1000):
"""
Logs the histogram of a list/vector of values.
From: https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
"""
# Create histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill fields of histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values ** 2))
# Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1]
# See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/summary.proto#L30
# Therefore we drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
writer.add_summary(summary, step)
def discount(x, gamma, terminal_array=None):
if terminal_array is None:
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
else:
y, adv = 0, []
terminals_reversed = terminal_array[1:][::-1]
for step, dt in enumerate(reversed(x)):
y = dt + gamma * y * (1 - terminals_reversed[step])
adv.append(y)
return np.array(adv)[::-1]
class RunningStats(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
# https://github.com/openai/baselines/blob/master/baselines/common/running_mean_std.py
def __init__(self, epsilon=1e-4, shape=()):
self.mean = np.zeros(shape, 'float64')
self.var = np.ones(shape, 'float64')
self.std = np.ones(shape, 'float64')
self.count = epsilon
def update(self, x):
batch_mean = np.mean(x, axis=0)
batch_var = np.var(x, axis=0)
batch_count = x.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean, batch_var, batch_count):
delta = batch_mean - self.mean
new_mean = self.mean + delta * batch_count / (self.count + batch_count)
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = M2 / (self.count + batch_count)
self.mean = new_mean
self.var = new_var
self.std = np.maximum(np.sqrt(self.var), 1e-6)
self.count = batch_count + self.count
def lstm_state_combine(state):
return np.reshape([s[0] for s in state], (len(state), -1)), \
np.reshape([s[1] for s in state], (len(state), -1))
|
python
|
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class Author(Item):
name = Field()
profile_url = Field()
avatar_url = Field()
class BlogAuthor(Author):
pass
class CommentAuthor(Author):
pass
class Post(Item):
author = Field()
title = Field(default="")
content = Field()
posted = Field()
origin_url = Field()
class BlogPost(Post):
tags = Field(default=[])
comments = Field(default=[])
class CommentPost(Post):
pass
|
python
|
import xml.etree.ElementTree as ET
import fnmatch
import matplotlib.pyplot as plt
#rootDir = '/Users/yanzhexu/Desktop/Research/GBM/aCGH_whole_tumor_maps_for_Neuro-Onc_dataset/CEFSL_slices_only/slice22/ROI for +C_3D_AXIAL_IRSPGR_Fast_IM-0005-0022.xml'
# draw ROI from coordinates in XML file
def ParseXMLDrawROI(rootDir):
tree = ET.parse(rootDir)
root = tree.getroot()
childnum = 0
xcoordlist = list()
ycoordlist = list()
xycoordlist = list()
for child in root.iter('string'):
if not fnmatch.fnmatch(child.text,'*{*}*'):
continue
childnum+=1
#print child.text
#xycoord = list()
xcoords = str(child.text).split(',')[0]
ycoords = str(child.text).split(',')[1]
xc = float(xcoords.split('{')[1])
yc = float(ycoords.split('}')[0].replace(' ',''))
# xycoord.append(xc)
# xycoord.append(yc)
# xycoordlist.append(xycoord)
xcoordlist.append(xc)
ycoordlist.append(yc)
xcoordlist.append(xcoordlist[0])
ycoordlist.append(ycoordlist[0])
# print childnum
# print xcoordlist
# print ycoordlist
plt.plot(xcoordlist,ycoordlist,'b')
#plt.show()
# print xycoordlist
|
python
|
word = 'Bye'
phrase = word * 3 + '!'
print(phrase)
name = input()
print('I love', name)
|
python
|
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
from PoPs import IDs as IDsPoPsModule
from PoPs.groups import misc as chemicalElementMiscPoPsModule
from fudge.channelData import Q as QModule
from brownies.legacy.endl import misc as miscENDLModule
endfMATBases = { 0 : 1,
1 : 1, 2 : 3, 3 : 6, 4 : 9, 5 : 10, 6 : 12, 7 : 14, 8 : 16, 9 : 19, 10 : 20,
11 : 23, 12 : 24, 13 : 27, 14 : 28, 15 : 31, 16 : 32, 17 : 35, 18 : 36, 19 : 39, 20 : 40,
21 : 45, 22 : 46, 23 : 50, 24 : 50, 25 : 55, 26 : 54, 27 : 59, 28 : 58, 29 : 63, 30 : 64,
31 : 69, 32 : 70, 33 : 75, 34 : 74, 35 : 79, 36 : 78, 37 : 85, 38 : 84, 39 : 89, 40 : 90,
41 : 93, 42 : 92, 43 : 99, 44 : 96, 45 : 103, 46 : 102, 47 : 107, 48 : 106, 49 : 113, 50 : 112,
51 : 121, 52 : 120, 53 : 127, 54 : 124, 55 : 133, 56 : 130, 57 : 138, 58 : 136, 59 : 141, 60 : 142,
61 : 139, 62 : 144, 63 : 151, 64 : 152, 65 : 159, 66 : 156, 67 : 165, 68 : 162, 69 : 169, 70 : 168,
71 : 175, 72 : 174, 73 : 180, 74 : 180, 75 : 185, 76 : 184, 77 : 191, 78 : 190, 79 : 197, 80 : 196,
81 : 203, 82 : 204, 83 : 209, 84 : 206, 85 : -1, 86 : -1, 87 : -1, 88 : 223, 89 : 225, 90 : 227,
91 : 229, 92 : 234, 93 : 230, 94 : 235, 95 : 235, 96 : 240, 97 : 240, 98 : 240, 99 : 265, 100 : 244 }
def endfZAPFromMT( MT ) :
"""This function identifies the outgoing particle (i.e., ZAP) from the MT number. The outgoing
particle is almost always a neutron, except in a few cases."""
if ( ( ( MT >= 600 ) and ( MT <= 649 ) ) or ( MT == 103 ) ) : # proton
return( "H1" )
elif( ( ( MT >= 650 ) and ( MT <= 699 ) ) or ( MT == 104 ) ) : # deuteron
return( "H2" )
elif( ( ( MT >= 700 ) and ( MT <= 749 ) ) or ( MT == 105 ) ) : # triton
return( "H3" )
elif( ( ( MT >= 750 ) and ( MT <= 799 ) ) or ( MT == 106 ) ) : # helium-3
return( "He3" )
elif( ( ( MT >= 800 ) and ( MT <= 849 ) ) or ( MT == 107 ) ) : # helium-4
return( "He4" )
return( "n" ) # neutron
def ZAAndMATFromParticleName( particleName ) :
Z, A, suffix, ZA = miscENDLModule.getZ_A_suffix_andZAFromName( particleName )
m = 0
if( len( suffix ) ) :
if( suffix[0] == 'm' ) : m = int( suffix[1:] )
if( m > 2 ) : raise Exception( 'Unsupport ENDF MAT for particle = %s' % particleName )
if( A == 0 ) :
MAT = 100 * Z
if( Z == 100 ) : MAT = 9920 # Special case for 100_Fm_000.
else :
Zp, AJumps = Z, 3
if( Z >= 99 ) : Zp, AJumps = 99, 1
MATBases = endfMATBases[Z]
if( MATBases < 0 ) : MATBases = { 85 : 210, 86 : 211, 87 : 212 }[Z]
MAT = 100 * Zp + 25 + AJumps * ( A - MATBases ) + m
# Kludge for Es254_m1 (MAT logic doesn't allow for isomers above Z=98, so Es254_m1 takes what should
# be the Es255 MAT):
if Z==99 and A>=255: MAT += 1
return( ZA, MAT )
def getParticleNameFromMAT( MAT ):
Z, MATstuff = divmod( MAT, 100 )
nStable, nIsomer = divmod( (MATstuff-25), 3 )
A = endfMATBases[Z] + nStable
name = chemicalElementMiscPoPsModule.idFromZAndA( Z, A )
if( nIsomer ) : name += '_m%d' % nIsomer
return( name )
class endfMTtoC_ProductList :
def __init__( self, C, reactionLabel, isFission = 0, ns = 0, H1s = 0, H2s = 0, H3s = 0, He3s = 0, He4s = 0, gammas = 0, residualLevel = None ) :
self.C = C
self.residualLevel = residualLevel
self.reactionLabel = reactionLabel
self.isFission = isFission
self.productCounts = { 'n' : ns, 'H1' : H1s, 'H2' : H2s, 'H3' : H3s, 'He3' : He3s, 'He4' : He4s, IDsPoPsModule.photon : gammas }
def __getitem__( self, product ) :
return( self.productCounts[product] )
def __repr__( self ) :
s = ''
for p in [ 'n', 'H1', 'H2', 'H3', 'He3', 'He4', IDsPoPsModule.photon ] :
if( self.productCounts[p] != 0 ) : s += " %5s = %d:" % ( p, self.productCounts[p] )
s = "C = %s: isFission = %5s:%s --- %s" % ( self.C, self.isFission != 0, s, self.reactionLabel )
return( s )
def endfMTtoC_ProductList_excitedStateInitializer( list, MTGround, MTContinuum, C, label, ns = 0, H1s = 0, H2s = 0, H3s = 0, He3s = 0, He4s = 0, gammas = 0 ) :
levelSuffixes = [ "", "st", "nd", "rd" ]
list[MTGround] = endfMTtoC_ProductList( C, "(z,%s[0]) -- to ground state" % label, 0, ns, H1s, H2s, H3s, He3s, He4s, gammas, 0 )
for idx in range( MTGround + 1, MTContinuum ) :
level = idx - MTGround
try :
levelSuffix = levelSuffixes[level]
except :
levelSuffix = "th"
list[idx] = endfMTtoC_ProductList( C, "(z,%s[%d]) -- to %d%s excited state" % ( label, level, level, levelSuffix ),
0, ns, H1s, H2s, H3s, He3s, He4s, gammas, level )
list[MTContinuum] = endfMTtoC_ProductList( C, "(z,%s[c]) -- excitation to continuum" % label, 0, ns, H1s, H2s, H3s, He3s, He4s, gammas, 'c' )
endfMTtoC_ProductLists = {}
endfMTtoC_ProductLists[1] = endfMTtoC_ProductList( 1, "(n,total)" )
endfMTtoC_ProductLists[2] = endfMTtoC_ProductList( 10, "(z,elastic)" )
endfMTtoC_ProductLists[3] = endfMTtoC_ProductList( 55, "(z,non-elastic)" )
endfMTtoC_ProductLists[4] = endfMTtoC_ProductList( 11, "(z,n)", 0, 1, 0, 0, 0, 0, 0, -1 )
endfMTtoC_ProductLists[5] = endfMTtoC_ProductList( 5, "(z,anything)" )
endfMTtoC_ProductLists[10] = endfMTtoC_ProductList( -1, "(z,continuum)" )
endfMTtoC_ProductLists[11] = endfMTtoC_ProductList( 32, "(n,2nd)", 0, 2, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[16] = endfMTtoC_ProductList( 12, "(z,2n)", 0, 2, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[17] = endfMTtoC_ProductList( 13, "(z,3n)", 0, 3, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[18] = endfMTtoC_ProductList( 15, "(z,f)", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[19] = endfMTtoC_ProductList( -1, "(n,f) -- 1st chance fission.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[20] = endfMTtoC_ProductList( -1, "(n,nf) -- 2nd chance fission.", -1, 1, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[21] = endfMTtoC_ProductList( -1, "(n,2nf) -- 3rd chance fission.", -1, 2, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[22] = endfMTtoC_ProductList( 26, "(z,na)", 0, 1, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[23] = endfMTtoC_ProductList( 36, "(z,n3a)", 0, 1, 0, 0, 0, 0, 3, 0 )
endfMTtoC_ProductLists[24] = endfMTtoC_ProductList( 33, "(z,2na)", 0, 2, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[25] = endfMTtoC_ProductList( 16, "(z,3na)", 0, 3, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[27] = endfMTtoC_ProductList( -1, "(n,abs)" )
endfMTtoC_ProductLists[28] = endfMTtoC_ProductList( 20, "(z,np)", 0, 1, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[29] = endfMTtoC_ProductList( 27, "(z,n2a)", 0, 1, 0, 0, 0, 0, 2, 0 )
endfMTtoC_ProductLists[30] = endfMTtoC_ProductList( -1, "(z,2n2a)", 0, 2, 0, 0, 0, 0, 2, 0 )
endfMTtoC_ProductLists[32] = endfMTtoC_ProductList( 22, "(z,nd)", 0, 1, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[33] = endfMTtoC_ProductList( 24, "(z,nt)", 0, 1, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[34] = endfMTtoC_ProductList( 25, "(z,nH)", 0, 1, 0, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[35] = endfMTtoC_ProductList( -1, "(z,nd2a)", 0, 1, 0, 1, 0, 0, 2, 0 )
endfMTtoC_ProductLists[36] = endfMTtoC_ProductList( -1, "(z,nt2a)", 0, 1, 0, 0, 1, 0, 2, 0 )
endfMTtoC_ProductLists[37] = endfMTtoC_ProductList( 14, "(z,4n)", 0, 4, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[38] = endfMTtoC_ProductList( -1, "(n,3nf) -- 4th chance fission.", -1, 3, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[41] = endfMTtoC_ProductList( 29, "(z,2np)", 0, 2, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[42] = endfMTtoC_ProductList( 16, "(z,3np)", 0, 3, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[44] = endfMTtoC_ProductList( 17, "(n,n2p)", 0, 1, 2, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[45] = endfMTtoC_ProductList( 34, "(n,npa)", 0, 1, 1, 0, 0, 0, 1, 0 )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 50, 91, 11, "n", ns = 1 )
endfMTtoC_ProductLists[101] = endfMTtoC_ProductList( -1, "(n,disappearance)" )
endfMTtoC_ProductLists[102] = endfMTtoC_ProductList( 46, "(z,g)", 0, 0, 0, 0, 0, 0, 0, 1 )
endfMTtoC_ProductLists[103] = endfMTtoC_ProductList( 40, "(z,p)", 0, 0, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[104] = endfMTtoC_ProductList( 41, "(z,d)", 0, 0, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[105] = endfMTtoC_ProductList( 42, "(z,t)", 0, 0, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[106] = endfMTtoC_ProductList( 44, "(z,H)", 0, 0, 0, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[107] = endfMTtoC_ProductList( 45, "(z,a)", 0, 0, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[108] = endfMTtoC_ProductList( 37, "(z,2a)", 0, 0, 0, 0, 0, 0, 2, 0 )
endfMTtoC_ProductLists[109] = endfMTtoC_ProductList( -1, "(z,3a)", 0, 0, 0, 0, 0, 0, 3, 0 )
endfMTtoC_ProductLists[111] = endfMTtoC_ProductList( 18, "(z,2p)", 0, 0, 2, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[112] = endfMTtoC_ProductList( 48, "(z,pa)", 0, 0, 1, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[113] = endfMTtoC_ProductList( 42, "(z,t2a)", 0, 0, 0, 0, 1, 0, 2, 0 )
endfMTtoC_ProductLists[114] = endfMTtoC_ProductList( -1, "(z,d2a)", 0, 0, 0, 1, 0, 0, 2, 0 )
endfMTtoC_ProductLists[115] = endfMTtoC_ProductList( 19, "(z,pd)", 0, 0, 1, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[116] = endfMTtoC_ProductList( 39, "(z,pt)", 0, 0, 1, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[117] = endfMTtoC_ProductList( 47, "(z,da)", 0, 0, 0, 1, 0, 0, 1, 0 )
endfMTtoC_ProductLists[151] = endfMTtoC_ProductList( -1, "(n,resonance)" )
# cmattoon Septmeber 2011, additional MT #s defined by CSEWG in 2010:
endfMTtoC_ProductLists[152] = endfMTtoC_ProductList( -1, "(z,5n)", 0, 5, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[153] = endfMTtoC_ProductList( -1, "(z,6n)", 0, 6, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[154] = endfMTtoC_ProductList( -1, "(z,2nt)", 0, 2, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[155] = endfMTtoC_ProductList( 43, "(z,ta)", 0, 0, 0, 0, 1, 0, 1, 0 )
endfMTtoC_ProductLists[156] = endfMTtoC_ProductList( -1, "(z,4np)", 0, 4, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[157] = endfMTtoC_ProductList( -1, "(z,3nd)", 0, 3, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[158] = endfMTtoC_ProductList( 23, "(z,nda)", 0, 1, 0, 1, 0, 0, 1, 0 )
endfMTtoC_ProductLists[159] = endfMTtoC_ProductList( 31, "(z,2npa)", 0, 2, 1, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[160] = endfMTtoC_ProductList( -1, "(z,7n)", 0, 7, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[161] = endfMTtoC_ProductList( -1, "(z,8n)", 0, 8, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[162] = endfMTtoC_ProductList( -1, "(z,5np)", 0, 5, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[163] = endfMTtoC_ProductList( -1, "(z,6np)", 0, 6, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[164] = endfMTtoC_ProductList( -1, "(z,7np)", 0, 7, 1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[165] = endfMTtoC_ProductList( -1, "(z,4na)", 0, 4, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[166] = endfMTtoC_ProductList( -1, "(z,5na)", 0, 5, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[167] = endfMTtoC_ProductList( -1, "(z,6na)", 0, 6, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[168] = endfMTtoC_ProductList( -1, "(z,7na)", 0, 7, 0, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[169] = endfMTtoC_ProductList( -1, "(z,4nd)", 0, 4, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[170] = endfMTtoC_ProductList( -1, "(z,5nd)", 0, 5, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[171] = endfMTtoC_ProductList( -1, "(z,6nd)", 0, 6, 0, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[172] = endfMTtoC_ProductList( -1, "(z,3nt)", 0, 3, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[173] = endfMTtoC_ProductList( -1, "(z,4nt)", 0, 4, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[174] = endfMTtoC_ProductList( -1, "(z,5nt)", 0, 5, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[175] = endfMTtoC_ProductList( -1, "(z,6nt)", 0, 6, 0, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[176] = endfMTtoC_ProductList( -1, "(z,2nH)", 0, 2, 0, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[177] = endfMTtoC_ProductList( -1, "(z,3nH)", 0, 3, 0, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[178] = endfMTtoC_ProductList( -1, "(z,4nH)", 0, 4, 0, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[179] = endfMTtoC_ProductList( -1, "(z,3n2p)", 0, 3, 2, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[180] = endfMTtoC_ProductList( -1, "(z,3n2a)", 0, 3, 0, 0, 0, 0, 2, 0 )
endfMTtoC_ProductLists[181] = endfMTtoC_ProductList( -1, "(z,3npa)", 0, 3, 1, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[182] = endfMTtoC_ProductList( -1, "(z,dt)", 0, 0, 0, 1, 1, 0, 0, 0 )
endfMTtoC_ProductLists[183] = endfMTtoC_ProductList( -1, "(z,npd)", 0, 1, 1, 1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[184] = endfMTtoC_ProductList( -1, "(z,npt)", 0, 1, 1, 0, 1, 0, 0, 0 )
endfMTtoC_ProductLists[185] = endfMTtoC_ProductList( -1, "(z,ndt)", 0, 1, 0, 1, 1, 0, 0, 0 )
endfMTtoC_ProductLists[186] = endfMTtoC_ProductList( -1, "(z,npH)", 0, 1, 1, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[187] = endfMTtoC_ProductList( -1, "(z,ndH)", 0, 1, 0, 1, 0, 1, 0, 0 )
endfMTtoC_ProductLists[188] = endfMTtoC_ProductList( -1, "(z,ntH)", 0, 1, 0, 0, 1, 1, 0, 0 )
endfMTtoC_ProductLists[189] = endfMTtoC_ProductList( 28, "(z,nta)", 0, 1, 0, 0, 1, 0, 1, 0 )
endfMTtoC_ProductLists[190] = endfMTtoC_ProductList( -1, "(z,2n2p)", 0, 2, 2, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[191] = endfMTtoC_ProductList( -1, "(z,pH)", 0, 0, 1, 0, 0, 1, 0, 0 )
endfMTtoC_ProductLists[192] = endfMTtoC_ProductList( -1, "(z,dH)", 0, 0, 0, 1, 0, 1, 0, 0 )
endfMTtoC_ProductLists[193] = endfMTtoC_ProductList( 38, "(z,Ha)", 0, 0, 0, 0, 0, 1, 1, 0 )
endfMTtoC_ProductLists[194] = endfMTtoC_ProductList( -1, "(z,4n2p)", 0, 4, 2, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[195] = endfMTtoC_ProductList( -1, "(z,4n2a)", 0, 4, 0, 0, 0, 0, 2, 0 )
endfMTtoC_ProductLists[196] = endfMTtoC_ProductList( -1, "(z,4npa)", 0, 4, 1, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[197] = endfMTtoC_ProductList( -1, "(z,3p)", 0, 0, 3, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[198] = endfMTtoC_ProductList( -1, "(z,n3p)", 0, 1, 3, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[199] = endfMTtoC_ProductList( -1, "(z,3n2pa)", 0, 3, 2, 0, 0, 0, 1, 0 )
endfMTtoC_ProductLists[200] = endfMTtoC_ProductList( -1, "(z,5n2p)", 0, 5, 2, 0, 0, 0, 0, 0 )
# end of new MT #s
endfMTtoC_ProductLists[201] = endfMTtoC_ProductList( -1, "(z,Xn)", 0, -1, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[202] = endfMTtoC_ProductList( -1, "(z,Xg)", 0, 0, 0, 0, 0, 0, 0, -1 )
endfMTtoC_ProductLists[203] = endfMTtoC_ProductList( 50, "(z,Xp)", 0, 0, -1, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[204] = endfMTtoC_ProductList( 51, "(z,Xd)", 0, 0, 0, -1, 0, 0, 0, 0 )
endfMTtoC_ProductLists[205] = endfMTtoC_ProductList( 52, "(z,Xt)", 0, 0, 0, 0, -1, 0, 0, 0 )
endfMTtoC_ProductLists[206] = endfMTtoC_ProductList( 53, "(z,XH)", 0, 0, 0, 0, 0, -1, 0, 0 )
endfMTtoC_ProductLists[207] = endfMTtoC_ProductList( 54, "(z,Xa)", 0, 0, 0, 0, 0, 0, -1, 0 )
endfMTtoC_ProductLists[208] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[209] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[210] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[211] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[212] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[213] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[214] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[215] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[216] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[217] = endfMTtoC_ProductList( -1, "Various meson and antiparticle production $sigma$'s" )
endfMTtoC_ProductLists[251] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." )
endfMTtoC_ProductLists[252] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." )
endfMTtoC_ProductLists[253] = endfMTtoC_ProductList( -1, "Various elastic neutrons scattering parameters." )
endfMTtoC_ProductLists[301] = endfMTtoC_ProductList( -1, "Energy release for total and partial $sigma$'s." )
endfMTtoC_ProductLists[451] = endfMTtoC_ProductList( -1, "Heading or title information, MF=1 only." )
endfMTtoC_ProductLists[452] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ total, i.e. prompt plus delayed, fission.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[454] = endfMTtoC_ProductList( 15, "(z,f) Independent fission product yields.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[455] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ for delayed fission.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[456] = endfMTtoC_ProductList( 15, "(z,f) $bar{nu}$ for prompt fission.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[457] = endfMTtoC_ProductList( -1, "(z,f) Radioactive decay data.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[458] = endfMTtoC_ProductList( 15, "(z,f) Energy release in fission for incident $n$'s.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[459] = endfMTtoC_ProductList( 15, "(z,f) Cumulative fission product yields.", -1, 0, 0, 0, 0, 0, 0, 0 )
endfMTtoC_ProductLists[500] = endfMTtoC_ProductList( -1, "Total charged particle stopping power." )
endfMTtoC_ProductLists[501] = endfMTtoC_ProductList( 70, "Total photon interaction $sigma$." )
endfMTtoC_ProductLists[502] = endfMTtoC_ProductList( 71, "Photon coherent scattering." )
endfMTtoC_ProductLists[504] = endfMTtoC_ProductList( 72, "Photon incoherent scattering." )
endfMTtoC_ProductLists[505] = endfMTtoC_ProductList( -1, "Imaginary scattering factor." )
endfMTtoC_ProductLists[506] = endfMTtoC_ProductList( -1, "Real scattering factor." )
endfMTtoC_ProductLists[515] = endfMTtoC_ProductList( -1, "Pair production, electron field." )
endfMTtoC_ProductLists[516] = endfMTtoC_ProductList( 74, "Pair production." )
endfMTtoC_ProductLists[517] = endfMTtoC_ProductList( -1, "Pair production, nuclear field." )
endfMTtoC_ProductLists[522] = endfMTtoC_ProductList( 73, "Photoelectric absorption." )
endfMTtoC_ProductLists[534] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[535] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[536] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[537] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[538] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[539] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[540] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[541] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[542] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[543] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[544] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[545] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[546] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[547] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[548] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[549] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[550] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[551] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[552] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[553] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[554] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[555] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[556] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[557] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[558] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[559] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[560] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[561] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[562] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[563] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[564] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[565] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[566] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[567] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[568] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[569] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[570] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductLists[571] = endfMTtoC_ProductList( -1, "Various subshell photoelectric $sigma$'s." )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 600, 649, 40, "p", H1s = 1 )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 650, 699, 41, "d", H2s = 1 )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 700, 749, 42, "t", H3s = 1 )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 750, 799, 44, "He3", He3s = 1 )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 800, 849, 45, "a", He4s = 1 )
endfMTtoC_ProductLists[851] = endfMTtoC_ProductList( -1, "Lumped reaction covariances." )
endfMTtoC_ProductList_excitedStateInitializer( endfMTtoC_ProductLists, 875, 891, 12, "2n", ns = 2 )
def getCSFromMT( MT ) :
if( MT == 1 ) : return( 1, 0 ) # (z,total)
if( MT == 2 ) : return( 10, 0 ) # (z,elas)
if( MT in [ 3, 4, 5, 10, 25, 27, 30, 35, 36, 101, 109, 113, 114,
152, 153, 154, 156, 157, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171,
172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188,
190, 191, 192, 194, 195, 196, 197, 198, 199, 200] ) : return( -MT, 0 )
if( MT == 11 ) : return( 32, 0 )
if( MT == 16 ) : return( 12, 0 )
if( MT == 17 ) : return( 13, 0 )
if( MT == 18 ) : return( 15, 0 )
if( MT == 19 ) : return( 15, 0 )
if( MT == 20 ) : return( 15, 0 )
if( MT == 21 ) : return( 15, 0 )
if( MT == 22 ) : return( 26, 0 )
if( MT == 23 ) : return( 36, 0 )
if( MT == 24 ) : return( 33, 0 )
if( MT == 28 ) : return( 20, 0 )
if( MT == 29 ) : return( 27, 0 )
if( MT == 32 ) : return( 22, 0 )
if( MT == 33 ) : return( 24, 0 )
if( MT == 34 ) : return( 25, 0 )
if( MT == 37 ) : return( 14, 0 )
if( MT == 38 ) : return( 15, 0 )
if( MT == 41 ) : return( 29, 0 )
if( MT == 42 ) : return( 16, 0 )
if( MT == 44 ) : return( 17, 0 )
if( MT == 45 ) : return( 34, 0 )
if( 50 <= MT < 91 ) : return( 11, 1 )
if( MT == 91 ) : return( 11, 0 )
if( MT == 102 ) : return( 46, 0 )
if( MT == 103 ) : return( 40, 0 )
if( MT == 104 ) : return( 41, 0 )
if( MT == 105 ) : return( 42, 0 )
if( MT == 106 ) : return( 44, 0 )
if( MT == 107 ) : return( 45, 0 )
if( MT == 108 ) : return( 37, 0 )
if( MT == 111 ) : return( 18, 0 )
if( MT == 112 ) : return( 48, 0 )
if( MT == 115 ) : return( 19, 0 )
if( MT == 116 ) : return( 39, 0 )
if( MT == 117 ) : return( 47, 0 )
if( MT == 155 ) : return( 43, 0 )
if( MT == 158 ) : return( 23, 0 )
if( MT == 159 ) : return( 31, 0 )
if( MT == 189 ) : return( 28, 0 )
if( MT == 193 ) : return( 38, 0 )
if( MT == 452 ) : return( 15, 0 ) # prompt plus delayed fission neutrons
if( MT == 455 ) : return( 15, 7 ) # delayed fission neutrons
if( MT == 456 ) : return( 15, 0 ) # prompt fission neutrons
if( MT == 458 ) : return( 15, 0 ) # prompt fission neutron energy
if( 600 <= MT < 649 ) : return( 40, 1 )
if( MT == 649 ) : return( 40, 0 )
if( 650 <= MT < 699 ) : return( 41, 1 )
if( MT == 699 ) : return( 41, 0 )
if( 700 <= MT < 749 ) : return( 42, 1 )
if( MT == 749 ) : return( 42, 0 )
if( 750 <= MT < 799 ) : return( 44, 1 )
if( MT == 799 ) : return( 44, 0 )
if( 800 <= MT < 849 ) : return( 45, 1 )
if( MT == 849 ) : return( 45, 0 )
if( 875 <= MT < 891 ) : return( 12, 1 )
if( MT == 891 ) : return( 12, 0 )
if( MT == 502 ) : return( 71, 0 ) # photo-atomic coherent
if( MT == 504 ) : return( 72, 0 ) # photo-atomic incoherent
if( MT == 516 ) : return( 74, 0 ) # photo-atomic pair production
if( MT == 522 ) : return( 73, 0 ) # photo-atomic photo-electric
raise Exception( 'MT = %d is not supported for conversion to C, S' % MT )
def getMTFromC( C ) :
if( C == 1 ) : return( 1 ) # (z,total)
if( C == 5 ) : return( -5 ) # (z,prod)
if( C == 8 ) : return( -8 ) # (z,lacs)
if( C == 9 ) : return( -9 ) # (z,n+i)
if( C == 10 ) : return( 2 ) # (z,elas)
if( C == 11 ) : return( 50 ) # (z,n)
if( C == 12 ) : return( 16 ) # (z,2n)
if( C == 13 ) : return( 17 ) # (z,3n)
if( C == 14 ) : return( 37 ) # (z,4n)
if( C == 15 ) : return( 18 ) # (z,f)
if( C == 16 ) : return( 42 ) # (z,3np)
if( C == 17 ) : return( 44 ) # (z,n2p)
if( C == 18 ) : return( 111 ) # (z,2p)
if( C == 19 ) : return( 115 ) # (z,pd)
if( C == 20 ) : return( 28 ) # (z,np)
if( C == 21 ) : return( -20 ) # (z,pn)
if( C == 22 ) : return( 32 ) # (z,nd)
if( C == 23 ) : return( 158 ) # (z,nda)
if( C == 24 ) : return( 33 ) # (z,nt)
if( C == 25 ) : return( 34 ) # (z,nHe3)
if( C == 26 ) : return( 22 ) # (z,na)
if( C == 27 ) : return( 29 ) # (z,n2a)
if( C == 28 ) : return( 189 ) # (z,nta)
if( C == 29 ) : return( 41 ) # (z,2np)
if( C == 30 ) : return( -30 ) # (z,gna)
if( C == 31 ) : return( 159 ) # (z,2npa)
if( C == 32 ) : return( 11 ) # (z,2nd)
if( C == 33 ) : return( 24 ) # (z,2na)
if( C == 34 ) : return( 45 ) # (z,npa)
if( C == 35 ) : return( 32 ) # (z,dn), ENDF does not have an (z,dn) reaction only an (z,nd) reaction and ENDL's only (z,dn) reaction is not two-body so order does not matter.
if( C == 36 ) : return( 23 ) # (z,n3a)
if( C == 37 ) : return( 108 ) # (z,2a)
if( C == 38 ) : return( 193 ) # (z,He3 a)
if( C == 39 ) : return( 116 ) # (z,pt)
if( C == 40 ) : return( 600 ) # (z,p)
if( C == 41 ) : return( 650 ) # (z,d)
if( C == 42 ) : return( 700 ) # (z,t)
if( C == 43 ) : return( 155 ) # (z,ta)
if( C == 44 ) : return( 750 ) # (z,He3)
if( C == 45 ) : return( 800 ) # (z,a)
if( C == 46 ) : return( 102 ) # (z,g)
if( C == 47 ) : return( 117 ) # (z,da)
if( C == 48 ) : return( 112 ) # (z,pa)
if( C == 49 ) : return( -49 ) # (z,2pa)
if( C == 50 ) : return( -50 ) # (z,Xp)
if( C == 51 ) : return( -51 ) # (z,Xd)
if( C == 52 ) : return( -52 ) # (z,Xt)
if( C == 53 ) : return( -53 ) # (z,XHe3)
if( C == 54 ) : return( -54 ) # (z,Xa)
if( C == 55 ) : return( -55 ) # (z,Xg)
if( C == 56 ) : return( -56 ) # (z,Xn)
if( C == 57 ) : return( -57 ) # (z,Xe)
if( C == 70 ) : return( 501 ) # (z,totp)
if( C == 71 ) : return( 502 ) # (z,coh)
if( C == 72 ) : return( 504 ) # (z,incoh)
if( C == 73 ) : return( 522 ) # (z,photo)
if( C == 74 ) : return( 516 ) # (z,pair)
if( C == 75 ) : return( -75 ) # (z,triplet)
if( C == 78 ) : return( -78 ) # (z,ic)
if( C == 81 ) : return( -81 ) # (z,ion)
if( C == 82 ) : return( -82 ) # (z,brem)
if( C == 83 ) : return( -83 ) # (z,excit)
if( C == 84 ) : return( -84 ) # (z,coll)
if( C == 91 ) : return( -91 ) # (z,shell)
if( C == 92 ) : return( -92 ) # (z,trans)
if( C == 93 ) : return( -93 ) # (z,whole)
raise Exception( 'C = %d is not supported for conversion to MT' % C )
class ENDLCS_To_ENDFMT :
def __init__( self, projectile ) :
self.projectile = projectile
self.MTPrimes = {}
def getMTFromCS( self, C, S, CCounts = 0 ) :
def MTPrimes( self, MT, S, projectile ) :
if( S == 0 ) :
if( MT == 50 ) :
MT = 91
else :
MT += 49
else :
if( MT not in self.MTPrimes ) :
self.MTPrimes[MT] = -1
if( self.projectile == projectile ) : self.MTPrimes[MT] += 1
self.MTPrimes[MT] += 1
MT += self.MTPrimes[MT]
return( MT )
MT = getMTFromC( C )
if( MT == 50 ) :
MT = MTPrimes( self, MT, S, 'n' )
elif( MT == 600 ) :
MT = MTPrimes( self, MT, S, 'H1' )
if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 103
elif( MT == 650 ) :
MT = MTPrimes( self, MT, S, 'H2' )
if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 104
elif( MT == 700 ) :
MT = MTPrimes( self, MT, S, 'H3' )
if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 105
elif( MT == 750 ) :
MT = MTPrimes( self, MT, S, 'He3' )
if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 106
elif( MT == 800 ) :
MT = MTPrimes( self, MT, S, 'He4' )
if( ( S == 0 ) and ( CCounts == 1 ) ) : MT = 107
return( MT )
def ENDF_MTZAEquation( projectileZA, targetZA, MT ) :
"""
This function returns a python list of length 2. The first element is a list of all outgoing particle ZA's
(including the residual) for the reaction of projectileZA + targetZA with ENDF's reaction identifier MT.
The second element is a reaction equation for this projectileZA, targetZA and MT. For example
ENDF_MTZAEquation( 1, 95242, 22 ) returns
([1, 2004, 93238], 'n + Am242 -> n + He4 + Np238')
That is, for a neutron ( projectileZA = 1 ) hitting Am242 ( targetZA = 95242 ) with MT = 22 - ENDF (z,na) reaction -
the outgoing particle ZA's are [1, 2004, 93238] and the reaction equation is 'n + Am242 -> n + He4 + Np238'.
"""
if( ( MT < 0 ) or ( MT > 999 ) or ( MT in [ 1, 3, 5, 10, 18, 19, 20, 21, 27, 38, 101, 151 ] or ( 200 < MT < 600 ) or ( 850 < MT < 875 ) ) ) :
raise Exception( 'MT = %s is no supported' % MT )
elif( MT == 2 ) :
productCounts = { chemicalElementMiscPoPsModule.idFromZA( projectileZA ) : 1 }
level = None
elif( MT == 4 ) :
productCounts = { chemicalElementMiscPoPsModule.idFromZA( projectileZA ) : 1 }
level = None
else :
productCounts = endfMTtoC_ProductLists[MT].productCounts
level = endfMTtoC_ProductLists[MT].residualLevel
compoundZA = projectileZA + targetZA
residualZA = compoundZA
productCountList = []
adder, equationZA, equation = '', [], '%s + %s ->' % \
( chemicalElementMiscPoPsModule.idFromZA( projectileZA ), chemicalElementMiscPoPsModule.idFromZA( targetZA ) )
for product in productCounts :
if( product == IDsPoPsModule.photon ) :
productZA = 0
else :
productZA = miscENDLModule.getZ_A_suffix_andZAFromName( product )[-1]
if( productCounts[product] > 0 ) : productCountList.append( [ productZA, product, productCounts[product] ] )
productCountList.sort( )
for productZA, token, count in productCountList :
residualZA -= count * productZA
for idx in range( count ) :
equation += ' %s%s' % ( adder, token )
equationZA.append( productZA )
adder = '+ '
levelStr = ''
if( not( level is None ) ) :
if( isinstance( level, int ) ) :
if( level < 0 ) : ValueError( 'level = %s must be >= 0' % level )
if( level > 0 ) : levelStr = "_e%s" % level
else :
raise Exception( 'Unknown level specifier = %s' % level )
equation += ' %s%s%s' % ( adder, chemicalElementMiscPoPsModule.idFromZA( residualZA ), levelStr )
equationZA.append( residualZA )
return( equationZA, equation )
def setReactionsOutputChannelFromOutputChannel( info, reaction, outputChannel ) :
for conversion in info.ENDFconversionFlags.flags :
if( isinstance( conversion.link, QModule.component ) ) :
if( conversion.link == outputChannel.Q ) : conversion.link = reaction.outputChannel.Q
reaction.outputChannel.process = outputChannel.process
reaction.outputChannel.genre = outputChannel.genre
for Q in outputChannel.Q : reaction.outputChannel.Q.add( Q )
for product in outputChannel.products : reaction.outputChannel.products.add( product )
for delayedNeutron in outputChannel.fissionFragmentData.delayedNeutrons :
reaction.outputChannel.fissionFragmentData.delayedNeutrons.add( delayedNeutron )
for fissionEnergyRelease in outputChannel.fissionFragmentData.fissionEnergyReleases :
reaction.outputChannel.fissionFragmentData.fissionEnergyReleases.add( fissionEnergyRelease )
|
python
|
import numpy as np
from scipy.linalg import expm
from tqdm import tqdm
import time
from utils import visualize_trajectory_2d,load_data
############## All Utility Functions ###############
def round_dot(vec):
'''
:param vec: A point in homogeneous co-ordinate
:return: 0 representation for the vector
'''
assert vec.shape == (4,1)
vec_homog = to_homog(vec)
vec_hat = hat(vec_homog[0:3,0])
vec_round_dot = np.hstack((np.eye(3),-vec_hat))
vec_round_dot = np.vstack((vec_round_dot,np.zeros((1,6))))
return vec_round_dot
def to_homog(vec):
'''
:param vec: A vector
:return: A vector scaled to make sure the last component is one
'''
assert vec.shape == (4,1)
return vec / vec[3,0]
def pi(point):
'''
:param : point in 3D
:return: projected point in 2D
'''
point = point.reshape(4)
return point / point[2]
def inv_pi(point):
'''
:param point: In 2D
:return: in 3D - Gain depth info
'''
assert point.shape == (4,1)
return point * point[3]
def deri_pi(point):
'''
:param point: derivative of the projection this point is taken
:return: derivative
'''
point = point.reshape(4)
return np.array([[1,0,-point[0]/point[2],0],
[0,1,-point[1]/point[2],0],
[0,0,0,0],
[0,0,-point[3]/point[2],1]]) / point[2]
def hat(vec):
'''
This function computes the hat function
'''
assert vec.ndim == 1
return np.array([[0,-vec[2],vec[1]],
[vec[2],0,vec[1]],
[-vec[1],vec[0],0]])
def curly_hat(omega_hat,v):
'''
This function Computes the curly hat operations
'''
v_hat = hat(v)
curly_u = np.hstack((omega_hat,v_hat))
curly_u = np.vstack((curly_u,np.hstack((np.zeros((3,3)),omega_hat))))
return curly_u
############## All Utility Functions Ends ###############
def imu_ekf(data_set):
'''
This function performs the update for the data received from the
Binocular Camera.
This method is used to solve the predict only step in the problem
:param data_set-> The data set which is to be read for processing
:return -> The mean poses for the IMU is returned
'''
time_stamp,features,v,omega,K,b,cam_T_imu = load_data(data_set)
start_time = time.time()
z = features
opt_T_imu = cam_T_imu
#### Initializations ####
prev_pose = np.eye(4)
prev_cov = np.eye(6)
pose_mean = np.zeros((4,4,time_stamp.shape[1]))
for t in tqdm(range(time_stamp.shape[1]-1)):
tau = time_stamp[0,t+1] - time_stamp[0,t]
omega_hat = hat(omega[:,t])
u_hat = np.hstack((omega_hat,v[:,t].reshape(3,1)))
u_hat = np.vstack((u_hat,np.zeros((1,4))))
#### Predict IMU Pose ####
#### Mean ####
pose_mean[:,:,t] = expm(-tau * u_hat) @ prev_pose
prev_pose = pose_mean[:,:,t]
#### Co-variance ####
W = np.diag(np.random.randn(6))
pose_cov = expm(-tau * curly_hat(omega_hat,v[:,t])) @ prev_cov \
@ expm(-tau * curly_hat(omega_hat,v[:,t])).T + W
#visualize_trajectory_2d(pose_mean)
print("Done IMU Predict and time taken is ", time.time()-start_time)
return pose_mean
def slam_imu_predict(time_stamp,features,v,omega,K,b,cam_T_imu,t,prev_pose,prev_cov):
'''
This function performs the predict step for the
Slam problem, This is called once for every time stamp
Inputs: Along with the previous pose and covariance matrix
Output: Predicted covaraince and mean for the IMU pose
'''
start_time = time.time()
z = features
opt_T_imu = cam_T_imu
tau = time_stamp[0,t+1] - time_stamp[0,t]
omega_hat = hat(omega[:,t])
u_hat = np.hstack((omega_hat,v[:,t].reshape(3,1)))
u_hat = np.vstack((u_hat,np.zeros((1,4))))
#### Predict IMU Pose ####
#### Mean ####
pose_mean = expm(-tau * u_hat) @ prev_pose
#### Co-variance ####
W = np.diag(np.random.randn(6))
pose_cov = expm(-tau * curly_hat(omega_hat,v[:,t])) @ prev_cov \
@ expm(-tau * curly_hat(omega_hat,v[:,t])).T + W
#visualize_trajectory_2d(pose_mean)
#print("Done IMU Predict and time taken is ", time.time()-start_time)
return pose_mean, pose_cov
def slam(data_set):
'''
This performs slam for the visual odometry data
Step 1: Performs predict for IMU pose
Step 2: Performs update for IMU pose and landmark points
Substep: Compute Jacobian for H_l and H_u
Substep: Concatenate both of them
Substep: Perform overall update for Covraince and Kalman Gain
Substep: Perform individual update for the means of IMU pose and Landmark Locations
:return: Plot of the localization and Mapping for the Particle
'''
time_stamp,z,v,omega,k,b,cam_T_imu = load_data(data_set)
#Choosing Points in Map
chose_landmark_option = 1
if(chose_landmark_option == 1):
chosen_landmarks = [i for i in range(z.shape[1]) if i%10 == 0]
elif(chose_landmark_option == 2):
chosen_landmarks = np.random.randint(0,z.shape[1],500)
last_landmark = max(chosen_landmarks)
#Temprory variables
landmark_mean_cam = np.zeros(3)
first_observation = np.zeros(3)
#Projection Constants
P_T = np.hstack((np.eye(3),np.zeros((3,1)))).T
M = np.hstack((k[0:2,0:3],np.zeros((2,1))))
M = np.vstack((M,M))
landmark_mean = np.zeros((3 * len(chosen_landmarks))) # Total LandMarks are 3M
state_cov = 2 * np.eye(3*len(chosen_landmarks)+6) #New State Variable with Size 3M+6
imu_prev_pose, imu_prev_cov = np.eye(4), np.eye(6) # To predict module Initialization
pose_mean = np.zeros((4,4,features.shape[2])) #For plotting purpose size is 4x4xT
for t in tqdm(range(features.shape[2]-1)):
#### IMU Predict pos and covariance ####
imu_pred_pos,imu_pred_cov = slam_imu_predict(time_stamp,z,v,omega,K,b,cam_T_imu,t,imu_prev_pose,imu_prev_cov)
z_tik = np.zeros((4 * len(chosen_landmarks),1)) #Observation Model Readings
z_observed = np.zeros((4 * len(chosen_landmarks),1)) #Sensor readings
### Find the legal Readings and Choose the one's in the Points of Interest ###
z_sum = np.sum(z[:,0:last_landmark,t],axis=0)
valid_scans = np.where(z_sum != -4)
valid_and_relevant_scans = [scan for scan in valid_scans[0] if scan in chosen_landmarks]
H_l = np.zeros((4*len(chosen_landmarks),3*len(chosen_landmarks)))
H_u = np.zeros((4*len(chosen_landmarks),6))
for scan in valid_and_relevant_scans:
###### Jacobian for Mapping Calculation #####
scan_loc = chosen_landmarks.index(scan) # The location of the current scan in the original array
str_4x,end_4x = scan_loc*4, scan_loc*4+4
str_3x,end_3x = scan_loc*3, scan_loc*3+3
##### Initialization for scans seen for the first time ######
if (np.all(landmark_mean[str_3x:end_3x] == first_observation)):
## Convert Z into Camera Cordinates
landmark_mean_cam[2] = -M[2, 3] / (z[0, scan, t] - z[2, scan, t])
landmark_mean_cam[1] = (z[1, scan, t] - M[1, 2]) * landmark_mean_cam[2] / M[1, 1]
landmark_mean_cam[0] = (z[0, scan, t] - M[0, 2]) * landmark_mean_cam[2] / M[0, 0]
landmark_mean_cam_homog = np.vstack((landmark_mean_cam.reshape(3, 1), 1))
landmark_mean_homog = np.linalg.inv(cam_T_imu @ imu_pred_pos) @ landmark_mean_cam_homog
landmark_mean[str_3x:end_3x] = landmark_mean_homog[0:3, 0]
##### Perform Update related Operations ######
else:
landmark_mean_homo = np.vstack((landmark_mean[str_3x:end_3x].reshape(3, 1), 1))
landmark_camera = cam_T_imu @ imu_pred_pos @ landmark_mean_homo
dpi_dq = deri_pi(landmark_camera)
H_l[str_4x:end_4x,str_3x:end_3x] = M @ dpi_dq @ cam_T_imu @ imu_pred_pos @ P_T
###### Jacobian for IMU Calculation #####
H_u[str_4x:end_4x,:] = M @ dpi_dq @ cam_T_imu @ round_dot(to_homog(imu_pred_pos @ landmark_mean_homo))
###### Observed vs Expected ######
z_observed[str_4x:end_4x,0] = z[:,scan,t]
z_tik[str_4x:end_4x,0] = M @ pi(landmark_camera)
#### Update Combined Covariance####
H = np.hstack((H_l,H_u)) #Main Jacobian
N = np.diag(5 * np.random.rand(H.shape[0]))
###### If the inverse leads to Singularity Compute Another Noise ######
try:
Kalman_gain = state_cov @ H.T @ np.linalg.inv(H @ state_cov @ H.T + N)
except:
N = np.diag(6 * np.random.rand(H.shape[0]))
Kalman_gain = state_cov @ H.T @ np.linalg.inv(H @ state_cov @ H.T + N)
#### Update the Stat_covariance Matrix ####
state_cov = (np.eye(3*len(chosen_landmarks)+6) - Kalman_gain @ H) @ state_cov
##IMU Mean Update##
perturb_pos = Kalman_gain[-6:,:] @ (z_observed-z_tik) #Pick last few rows to get IMU details
perturb_pos_hat = np.hstack((hat(perturb_pos[3:6,0]),perturb_pos[0:3,0].reshape(3,1)))
perturb_pos_hat = np.vstack((perturb_pos_hat,np.zeros((1,4))))
imu_update_pose = expm(perturb_pos_hat) @ imu_pred_pos
pose_mean[:,:,t] = imu_update_pose
##LandMark Mean Update ##
perturb_landmark = Kalman_gain[0:-6,:] @ (z_observed - z_tik) #Pick first 3M rows
landmark_mean = landmark_mean + perturb_landmark.reshape(-1)
#update imu pos with the updated value of these varaibles
imu_prev_pose = imu_update_pose
visualize_trajectory_2d(pose_mean, landmark_mean.reshape(-1, 3).T)
def visual_ekf(pose_mean,z,k,b,cam_T_imu):
'''
:param pose_mean: The estimated pose for the IMU Data set along with the Estimated pose of IMU
Computes the Landmark update based on the assumption of IMU poses being golden
Uses the Stereo Camera Model to get the output
:return: Plot of the localization of the body along with the maps for the sourrounding
'''
print("Starting Mapping Update")
start_time = time.time()
num_landmark = z.shape[1]
landmark_mean = np.zeros((3*num_landmark)) # 3M
landmark_cov = np.diag(1e-2*np.random.randn(3*num_landmark))
landmark_mean_cam = np.zeros(3)
landmark_mean_cam_homog = np.zeros((4,1))
P_T = np.hstack((np.eye(3),np.zeros((3,1)))).T
M = np.hstack((k[0:2,0:3],np.zeros((2,1))))
M = np.vstack((M,M))
M[2,3] = -k[0,0] * b #Disparity
total_time = z.shape[2]
no_observation = np.array([-1,-1,-1,-1])
first_observation = np.zeros(3)
for t in tqdm(range(total_time)):
jacobian = np.zeros((4*num_landmark, 3*num_landmark))
z_tik = np.zeros((4 * num_landmark))
z_sum = np.sum(z[:,0:num_landmark,t],axis=0)
valid_scans = np.where(z_sum != -4)
#for landmark in range(num_landmark-1):
for landmark in valid_scans[0]:
lnd_mrk_strt, lnd_mrk_end = landmark * 3, landmark * 3 + 3
if(np.all(landmark_mean[lnd_mrk_strt:lnd_mrk_end] == first_observation)):
landmark_mean_cam[2] = -M[2,3] / (z[0,landmark,t] - z[2,landmark,t])
landmark_mean_cam[1] = (z[1,landmark,t] - M[1,2]) * landmark_mean_cam[2] / M[1,1]
landmark_mean_cam[0] = (z[0,landmark,t] - M[0,2]) * landmark_mean_cam[2] / M[0,0]
landmark_mean_cam_homog = np.vstack((landmark_mean_cam.reshape(3,1),1))
landmark_mean_homog = np.linalg.inv(cam_T_imu @ pose_mean[:,:,t]) @ landmark_mean_cam_homog
landmark_mean[lnd_mrk_strt:lnd_mrk_end] = landmark_mean_homog[0:3,0]
#initialize
else:
landmark_mean_homo = np.vstack((landmark_mean[lnd_mrk_strt:lnd_mrk_end].reshape(3,1),1))
landmark_camera = cam_T_imu @ pose_mean[:, :, t] @ landmark_mean_homo
dpi_dq = deri_pi(landmark_camera)
strt,end = landmark*3,landmark*3 + 3 #Address
z_tik = (M @ pi(landmark_camera)).flatten()
jacobian = M @ dpi_dq @ cam_T_imu @ pose_mean[:,:,t] @ P_T
k_gain = landmark_cov[strt:end,strt:end] @ jacobian.T @ \
np.linalg.inv(jacobian @ landmark_cov[strt:end,strt:end] @ jacobian.T \
+ np.diag(30 * np.random.randn(4))) #np.diag(1e2) also worked
landmark_mean[strt:end] = landmark_mean[strt:end] + k_gain @ (z[:,landmark,t] - z_tik)
landmark_cov[strt:end,strt:end] = (np.eye(3) - k_gain @ jacobian) @ landmark_cov[strt:end,strt:end]
print("Done Mapping update and time taken is ", time.time()-start_time)
visualize_trajectory_2d(pose_mean,landmark_mean.reshape(-1,3).T)
if __name__ == '__main__':
dataset_list = ['data/0022.npz','data/0027.npz','data/0034.npz']
for data_set in dataset_list:
t,features,linear_velocity,rotational_velocity,K,b,cam_T_imu = load_data(data_set)
### Run Part a and Part b ###
visual_ekf(imu_ekf(data_set),features,K,b,cam_T_imu)
### Run Part c ###
slam(data_set)
|
python
|
from rule import *
from operator import methodcaller
def chi_toi_checker(cards):
return 0
def ko_ku_shi_checker(cards):
return 0
def ron_dfs(handcards):
if (len(handcards) == 0):
return True
return False
#no matter whether have yaku
def can_ron(cards):
if (ron_dfs(cards.handcards)):
return True
return (chi_toi_checker(cards) > 0) or (ko_ku_shi_checker(cards) > 0)
def chin_i_so_checker(cards):
ok = True
return 1
def ri_chi_checker(cards):
return 1
yaku_list = ["chin_i_so","ri_chi"]
def get_all_yaku(cards):
ret = []
for pattern in yaku_list:
check_ret = eval(pattern + "_checker")(cards)
if (check_ret > 0):
ret.append((check_ret, pattern))
return ret
print(get_all_yaku('fuck'))
|
python
|
import pytest
import torch
import time
from ynot.datasets import FPADataset
from ynot.echelle import Echellogram
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn as nn
@pytest.mark.parametrize(
"device", ["cuda", "cpu"],
)
def test_forward_backward(device):
"""Do the scene models have the right shape"""
echellogram = Echellogram(device=device)
t0 = time.time()
scene_model = echellogram.forward(1)
t1 = time.time()
scalar = scene_model.sum()
t2 = time.time()
scalar.backward()
t3 = time.time()
net_time = t1 - t0
net_time2 = t3 - t2
print(f"\n\t{echellogram.device}: forward {net_time:0.5f} seconds", end="\t")
print(f"\n\t{echellogram.device}: backward {net_time2:0.5f} seconds", end="\t")
assert scene_model.shape == echellogram.xx.shape
assert scene_model.dtype == echellogram.xx.dtype
@pytest.mark.parametrize(
"device", ["cuda", "cpu"]
)
@pytest.mark.slow
def test_training_loop(device):
"""The end-to-end training should operate"""
model = Echellogram(device=device)
dataset = FPADataset()
n_frames_per_batch=1
train_loader = DataLoader(dataset=dataset, batch_size=n_frames_per_batch, pin_memory=True,
shuffle=True)
loss_fn = nn.MSELoss(reduction='mean')
optimizer = optim.Adam(model.parameters(), 0.01)
n_epochs = 10
losses = []
initial_params = model.parameters()
t0 = time.time()
for epoch in range(n_epochs):
for data in train_loader:
ind, y_batch = data[0].to(device, non_blocking=True), data[1].to(device, non_blocking=True)
model.train()
yhat = model.forward(ind).unsqueeze(0)
loss = loss_fn(yhat, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
losses.append(loss.item())
t1 = time.time()
net_time = t1 - t0
print(f"\n\t {n_epochs} epochs on {device}: {net_time:0.1f} seconds", end="\t")
for loss in losses:
assert loss == loss
for parameter in model.parameters():
assert parameter.isfinite().all()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.