content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import chardet
import codecs
def WriteFile(filePath, lines, encoding="utf-8"):
with codecs.open(filePath, "w", encoding) as f:
actionR = '' #定位到[Events]区域的标记
for sline in lines:
if '[Events]' in sline:
actionR = 'ok'
f.write(sline)
continue
if actionR == 'ok':
f.write(sline.replace(
'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding',\
'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text'))
actionR = ''
print("├ 已为Emby改善字幕兼容性")
else:
f.write(sline)
def CONV_UTF8(src, dst):
# 检测编码,coding可能检测不到编码,有异常
f = open(src, "rb")
coding = chardet.detect(f.read())["encoding"]
f.close()
# if coding != "utf-8":
with codecs.open(src, "r", coding) as f:
try:
WriteFile(dst, f.readlines(), encoding="utf-8")
except Exception:
print(src + " " + coding + " read error")
if __name__ == "__main__":
filename="the.walking.dead.s10e05.1080p.web.h264-xlf.zh.ass"
CONV_UTF8(filename,filename)
|
python
|
from screenplay import Action, Actor
from screenplay.actions import fail_with_message
class _if_nothing_is_found_fail_with_message(Action):
def __init__(self, action: Action, fail_actions: list, message: str):
super().__init__()
self.action = action
self.fail_actions = fail_actions
self.message = message
def perform_as(self, actor: Actor):
value = actor.attempts_to(
self.action
)
if value is None:
actor.attempts_to(
*self.fail_actions,
fail_with_message(self.message)
)
return value
def _create_empty_additional_actions():
return []
class find_base_action(Action):
create_fail_actions_callback = _create_empty_additional_actions
def if_nothing_is_found_fail_with_message(self, message: str):
return _if_nothing_is_found_fail_with_message(self, find_base_action.create_fail_actions_callback(), message)
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_serialization import jsonutils
from senlin.objects.requests import policies
from senlin.tests.unit.common import base as test_base
class TestPolicyList(test_base.SenlinTestCase):
def test_policy_list_request_body_full(self):
params = {
'name': ['policy1'],
'type': ['senlin.policy.scaling-1.0'],
'limit': 2,
'marker': 'd6901ce0-1403-4b9c-abf5-25c59cf79823',
'sort': 'name:asc',
'project_safe': False
}
sot = policies.PolicyListRequest(**params)
self.assertEqual(['policy1'], sot.name)
self.assertEqual(['senlin.policy.scaling-1.0'], sot.type)
self.assertEqual(2, sot.limit)
self.assertEqual('d6901ce0-1403-4b9c-abf5-25c59cf79823', sot.marker)
self.assertEqual('name:asc', sot.sort)
self.assertFalse(sot.project_safe)
class TestPolicyCreate(test_base.SenlinTestCase):
spec = {
"properties": {
"adjustment": {
"min_step": 1,
"number": 1,
"type": "CHANGE_IN_CAPACITY"
},
"event": "CLUSTER_SCALE_IN"
},
"type": "senlin.policy.scaling",
"version": "1.0"
}
def test_policy_create_body(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyCreateRequestBody(name='foo', spec=spec)
self.assertEqual('foo', sot.name)
self.assertEqual('senlin.policy.scaling', sot.spec['type'])
self.assertEqual('1.0', sot.spec['version'])
def test_policy_create_request(self):
spec = copy.deepcopy(self.spec)
policy = policies.PolicyCreateRequestBody(name='foo', spec=spec)
sot = policies.PolicyCreateRequest(policy=policy)
self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody)
def test_request_body_to_primitive(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyCreateRequestBody(name='foo', spec=spec)
self.assertEqual('foo', sot.name)
res = sot.obj_to_primitive()
# request body
self.assertEqual('PolicyCreateRequestBody', res['senlin_object.name'])
self.assertEqual('1.0', res['senlin_object.version'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertIn('name', res['senlin_object.changes'])
self.assertIn('spec', res['senlin_object.changes'])
# spec
data = res['senlin_object.data']
self.assertEqual(u'foo', data['name'])
spec_data = jsonutils.loads(data['spec'])
self.assertEqual('senlin.policy.scaling', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
def test_request_to_primitive(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyCreateRequestBody(name='foo', spec=spec)
sot = policies.PolicyCreateRequest(policy=body)
self.assertIsInstance(sot.policy, policies.PolicyCreateRequestBody)
self.assertEqual('foo', sot.policy.name)
res = sot.obj_to_primitive()
self.assertIn('policy', res['senlin_object.changes'])
self.assertEqual('PolicyCreateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
data = res['senlin_object.data']['policy']
self.assertEqual('PolicyCreateRequestBody', data['senlin_object.name'])
self.assertEqual('senlin', data['senlin_object.namespace'])
self.assertEqual('1.0', data['senlin_object.version'])
self.assertIn('name', data['senlin_object.changes'])
self.assertIn('spec', data['senlin_object.changes'])
pd = data['senlin_object.data']
self.assertEqual(u'foo', pd['name'])
spec_data = jsonutils.loads(pd['spec'])
self.assertEqual('senlin.policy.scaling', spec_data['type'])
self.assertEqual('1.0', spec_data['version'])
class TestPolicyGet(test_base.SenlinTestCase):
def test_policy_get(self):
sot = policies.PolicyGetRequest(identity='foo')
self.assertEqual('foo', sot.identity)
class TestPolicyUpdate(test_base.SenlinTestCase):
def test_policy_update_body(self):
data = {'name': 'foo'}
sot = policies.PolicyUpdateRequestBody(**data)
self.assertEqual('foo', sot.name)
def test_policy_update(self):
data = {'name': 'foo'}
body = policies.PolicyUpdateRequestBody(**data)
request = {
'identity': 'pid',
'policy': body
}
sot = policies.PolicyUpdateRequest(**request)
self.assertEqual('pid', sot.identity)
self.assertIsInstance(sot.policy, policies.PolicyUpdateRequestBody)
def test_policy_data_to_primitive(self):
data = {'name': 'foo'}
sot = policies.PolicyUpdateRequestBody(**data)
res = sot.obj_to_primitive()
self.assertIn('name', res['senlin_object.changes'])
self.assertEqual(u'foo', res['senlin_object.data']['name'])
self.assertEqual('PolicyUpdateRequestBody', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
def test_request_to_primitive(self):
data = {'name': 'foo'}
name = policies.PolicyUpdateRequestBody(**data)
request = {
'identity': 'pid',
'name': name
}
sot = policies.PolicyUpdateRequest(**request)
res = sot.obj_to_primitive()
self.assertIn('identity', res['senlin_object.changes'])
self.assertEqual(u'pid', res['senlin_object.data']['identity'])
self.assertEqual('PolicyUpdateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
class TestPolicyValidate(test_base.SenlinTestCase):
spec = {
"properties": {
"adjustment": {
"min_step": 1,
"number": 1,
"type": "CHANGE_IN_CAPACITY"
},
"event": "CLUSTER_SCALE_IN"
},
"type": "senlin.policy.scaling",
"version": "1.0"
}
def test_validate_request_body(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
self.assertEqual(spec['type'], body.spec['type'])
self.assertEqual(spec['version'], body.spec['version'])
def test_validate_request(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
policy = policies.PolicyValidateRequest(policy=body)
self.assertIsInstance(
policy.policy, policies.PolicyValidateRequestBody)
def test_request_body_to_primitive(self):
spec = copy.deepcopy(self.spec)
sot = policies.PolicyValidateRequestBody(spec=spec)
res = sot.obj_to_primitive()
self.assertIn('spec', res['senlin_object.changes'])
self.assertEqual(
'PolicyValidateRequestBody', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
pd = res['senlin_object.data']['spec']
data = jsonutils.loads(pd)
self.assertEqual('senlin.policy.scaling', data['type'])
self.assertEqual('1.0', data['version'])
def test_request_to_primitive(self):
spec = copy.deepcopy(self.spec)
body = policies.PolicyValidateRequestBody(spec=spec)
policy = policies.PolicyValidateRequest(policy=body)
res = policy.obj_to_primitive()
self.assertIn('policy', res['senlin_object.changes'])
self.assertEqual('PolicyValidateRequest', res['senlin_object.name'])
self.assertEqual('senlin', res['senlin_object.namespace'])
self.assertEqual('1.0', res['senlin_object.version'])
body = res['senlin_object.data']['policy']
self.assertIn('spec', body['senlin_object.changes'])
self.assertEqual(
'PolicyValidateRequestBody', body['senlin_object.name'])
self.assertEqual('senlin', body['senlin_object.namespace'])
self.assertEqual('1.0', body['senlin_object.version'])
pd = body['senlin_object.data']['spec']
data = jsonutils.loads(pd)
self.assertEqual('senlin.policy.scaling', data['type'])
self.assertEqual('1.0', data['version'])
class TestPolicyDelete(test_base.SenlinTestCase):
def test_policy_delete(self):
sot = policies.PolicyDeleteRequest(identity='foo')
self.assertEqual('foo', sot.identity)
|
python
|
from .base import *
BOOST_PER_SECOND = 80 * 1 / .93 # boost used per second out of 255
REPLICATED_PICKUP_KEY = 'TAGame.VehiclePickup_TA:ReplicatedPickupData'
REPLICATED_PICKUP_KEY_168 = 'TAGame.VehiclePickup_TA:NewReplicatedPickupData'
def get_boost_actor_data(actor: dict):
if REPLICATED_PICKUP_KEY in actor:
actor = actor[REPLICATED_PICKUP_KEY]
if actor is not None and actor != -1:
actor = actor['pickup']
if actor is not None and 'instigator_id' in actor and actor["instigator_id"] != -1:
return actor
elif REPLICATED_PICKUP_KEY_168 in actor:
actor = actor[REPLICATED_PICKUP_KEY_168]
if actor is not None and actor != -1:
actor = actor['pickup_new']
if actor is not None and 'instigator_id' in actor and actor["instigator_id"] != -1:
return actor
return None
class BoostHandler(BaseActorHandler):
type_name = 'Archetypes.CarComponents.CarComponent_Boost'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
car_actor_id = actor.get('TAGame.CarComponent_TA:Vehicle', None)
if car_actor_id is None or car_actor_id not in self.parser.current_car_ids_to_collect:
return
player_actor_id = self.parser.car_player_ids[car_actor_id]
boost_is_active_random_int = actor.get(
COMPONENT_ACTIVE_KEY,
actor.get(COMPONENT_REPLICATED_ACTIVE_KEY, False))
# boost_is_active when random_int is odd?!
boost_is_active = (boost_is_active_random_int % 2 == 1)
if boost_is_active:
# manually decrease car boost amount (not shown in replay)
# i assume game calculates the decrease itself similarly
boost_amount = max(0, actor.get('TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount',
0) - delta * BOOST_PER_SECOND)
actor['TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount'] = boost_amount
else:
boost_amount = actor.get('TAGame.CarComponent_Boost_TA:ReplicatedBoostAmount', None)
self.parser.player_data[player_actor_id][frame_number]['boost'] = boost_amount
self.parser.player_data[player_actor_id][frame_number]['boost_active'] = boost_is_active
class BoostPickupHandler(BaseActorHandler):
@classmethod
def can_handle(cls, actor: dict) -> bool:
return actor['ClassName'] == 'TAGame.VehiclePickup_Boost_TA'
def update(self, actor: dict, frame_number: int, time: float, delta: float) -> None:
boost_actor = get_boost_actor_data(actor)
if boost_actor is not None:
car_actor_id = boost_actor['instigator_id']
if car_actor_id in self.parser.car_player_ids:
player_actor_id = self.parser.car_player_ids[car_actor_id]
if frame_number in self.parser.player_data[player_actor_id]:
actor = self.parser.player_data[player_actor_id]
frame_number_look_back = frame_number - 1
previous_boost_data = None
while frame_number_look_back >= 0:
try:
previous_boost_data = actor[frame_number_look_back]['boost']
except KeyError:
previous_boost_data = None
if previous_boost_data is not None:
break
frame_number_look_back -= 1
try:
current_boost_data = actor[frame_number]['boost']
except KeyError:
current_boost_data = None
# Ignore any phantom boosts
if (previous_boost_data is not None and current_boost_data is not None and
(255 > previous_boost_data < current_boost_data)):
actor[frame_number]['boost_collect'] = True
# set to false after acknowledging it's turned True
# it does not turn back false immediately although boost is only collected once.
# using actor_id!=-1
boost_actor["instigator_id"] = -1
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Unit-e Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import sha256
from test_framework.regtest_mnemonics import regtest_mnemonics
from test_framework.script import CScript, OP_2, hash160
from test_framework.test_framework import UnitETestFramework, STAKE_SPLIT_THRESHOLD
from test_framework.util import assert_equal, assert_greater_than, bytes_to_hex_str, hex_str_to_bytes, wait_until
def stake_p2wsh(node, staking_node, amount):
"""
Send funds to witness v2 remote staking output.
Args:
node: the node which will be able to spend funds
staking_node: the node which will be able to stake nodes
amount: the amount to send
"""
multisig = node.addmultisigaddress(2, [node.getnewaddress(), node.getnewaddress()])
bare = CScript(hex_str_to_bytes(multisig['redeemScript']))
spending_script_hash = sha256(bare)
addr_info = staking_node.validateaddress(staking_node.getnewaddress('', 'legacy'))
staking_key_hash = hash160(hex_str_to_bytes(addr_info['pubkey']))
rs_p2wsh = CScript([OP_2, staking_key_hash, spending_script_hash])
outputs = [{'address': 'script', 'amount': amount, 'script': bytes_to_hex_str(rs_p2wsh)}]
node.sendtypeto('unite', 'unite', outputs)
class RemoteStakingTest(UnitETestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args=[
[],
['-minimumchainwork=0', '-maxtipage=1000000000']
]
def run_test(self):
alice, bob = self.nodes
alice.importmasterkey(regtest_mnemonics[0]['mnemonics'])
alice.generate(1)
assert_equal(len(alice.listunspent()), regtest_mnemonics[0]['balance'] / STAKE_SPLIT_THRESHOLD)
alices_addr = alice.getnewaddress()
# 'legacy': we need the PK hash, not a script hash
bobs_addr = bob.getnewaddress('', 'legacy')
# Estimate staking fee
recipient = {"address": bobs_addr, "amount": 1}
result = alice.stakeat(recipient, True)
assert_greater_than(0.001, result['fee'])
ps = bob.proposerstatus()
assert_equal(ps['wallets'][0]['stakeable_balance'], 0)
# Stake the funds
result = alice.stakeat(recipient)
stake_p2wsh(alice, staking_node=bob, amount=1)
alice.generatetoaddress(1, alices_addr)
self.sync_all()
wi = alice.getwalletinfo()
assert_equal(wi['remote_staking_balance'], 2)
def bob_is_staking_the_new_coin():
ps = bob.proposerstatus()
return ps['wallets'][0]['stakeable_balance'] == 2
wait_until(bob_is_staking_the_new_coin, timeout=10)
# Change outputs for both staked coins, and the balance staked remotely
assert_equal(len(alice.listunspent()), 2 + (regtest_mnemonics[0]['balance'] // STAKE_SPLIT_THRESHOLD))
if __name__ == '__main__':
RemoteStakingTest().main()
|
python
|
from sanic import Sanic
from sanic.blueprints import Blueprint
from sanic.response import stream, text
from sanic.views import HTTPMethodView
from sanic.views import stream as stream_decorator
bp = Blueprint("bp_example")
app = Sanic("Example")
class SimpleView(HTTPMethodView):
@stream_decorator
async def post(self, request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
@app.post("/stream", stream=True)
async def handler(request):
async def streaming(response):
while True:
body = await request.stream.get()
if body is None:
break
body = body.decode("utf-8").replace("1", "A")
await response.write(body)
return stream(streaming)
@bp.put("/bp_stream", stream=True)
async def bp_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8").replace("1", "A")
return text(result)
async def post_handler(request):
result = ""
while True:
body = await request.stream.get()
if body is None:
break
result += body.decode("utf-8")
return text(result)
app.blueprint(bp)
app.add_route(SimpleView.as_view(), "/method_view")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000)
|
python
|
import argparse
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
self.parser = argparse.ArgumentParser(description="Robyn, a fast async web framework with a rust runtime.")
self.parser.add_argument('--processes', type=int, default=1, required=False)
self.parser.add_argument('--workers', type=int, default=1, required=False)
self.parser.add_argument('--dev', default=False, type=lambda x: (str(x).lower() == 'true'))
self.args = self.parser.parse_args()
def num_processes(self):
return self.args.processes
def workers(self):
return self.args.workers
def is_dev(self):
_is_dev = self.args.dev
if _is_dev and ( self.num_processes() != 1 or self.workers() != 1 ):
raise Exception("--processes and --workers shouldn't be used with --dev")
return _is_dev
|
python
|
r = 's'
while r == 's':
n1 = int(input('Digite o 1º valor: '))
n2 = int(input('Digite o 2º valor: '))
print(' [ 1 ] SOMAR')
print(' [ 2 ] Multiplicar')
print(' [ 3 ] Maior')
print(' [ 4 ] Novos Números')
print(' [ 5 ] Sair do Programa')
opcao = int(input('Escolha uma operação: '))
if (opcao == 1):
soma = n1 + n2
print('Resultado da SOMA entre {} e {} = {}'.format(n1, n2, soma))
r = 's'
elif (opcao == 2):
m = n1 * n2
print('Resultado da MULTIPLICAÇÃO entre {} e {} = {}'.format(n1, n2, m))
r = 's'
elif (opcao == 3):
if (n1 > n2):
maior = n1
print('Maior valor digitado entre {} e {} = {}'.format(n1, n2, maior))
r = 's'
elif (n2 > n1):
maior = n2
print('Maior valor digitado entre {} e {} = {}'.format(n1, n2, maior))
r = 's'
elif (opcao == 4):
r = 's'
print('Você escolheu digitar novos valores!')
elif (opcao == 5):
r = 'n'
print('Finalizando Programa')
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 10:00:06 2017
@author: ldn
"""
EndPointCoordinate=((-3.6,0.0,7.355),(123.6,0.0,7.355)) #west & east end point
rGirderRigidarmCoordinate=((10,8.13,0),(15,8.3675,0),(20,8.58,0),
(25,8.7675,0),(30,8.93,0),(35,9.0675,0),(40,9.18,0),(45,9.2675,0),(50,9.33,0),(55,9.3675,0),
(60,9.38,0),
(65,9.3675,0),(70,9.33,0),(75,9.2675,0),(80,9.18,0),(85,9.0675,0),(90,8.93,0),(95,8.7675,0),
(100,8.58,0),(105,8.3675,0),(110,8.13,0))
rRigidarmSuspenderCoordinate=(((10,7.73,-3.75),(15,7.9675,-3.75),(20,8.18,-3.75),
(30,8.53,-3.75),(35,8.6675,-3.75),(40,8.78,-3.75),(45,8.8675,-3.75),(50,8.93,-3.75),(55,8.9675,-3.75),
(60,8.98,-3.75),
(65,8.9675,-3.75),(70,8.93,-3.75),(75,8.8675,-3.75),(80,8.78,-3.75),(85,8.6675,-3.75),(90,8.53,-3.75),
(100,8.18,-3.75),(105,7.9675,-3.75),(110,7.73,-3.75)),
((10,7.73,3.75),(15,7.9675,3.75),(20,8.18,3.75),
(30,8.53,3.75),(35,8.6675,3.75),(40,8.78,3.75),(45,8.8675,3.75),(50,8.93,3.75),(55,8.9675,3.75),
(60,8.98,3.75),
(65,8.9675,3.75),(70,8.93,3.75),(75,8.8675,3.75),(80,8.78,3.75),(85,8.6675,3.75),(90,8.53,3.75),
(100,8.18,3.75),(105,7.9675,3.75),(110,7.73,3.75)))
lst=[]
lst.append(EndPointCoordinate[0])
for i in range(len(rGirderRigidarmCoordinate)):
lst.append(rGirderRigidarmCoordinate[i])
lst.append(EndPointCoordinate[1])
l=tuple(lst)
3432/3
|
python
|
#
# PySNMP MIB module SGTE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SGTE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:53:51 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, TimeTicks, IpAddress, enterprises, Bits, MibIdentifier, ObjectIdentity, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, iso, Gauge32, ModuleIdentity, NotificationType, Unsigned32, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "TimeTicks", "IpAddress", "enterprises", "Bits", "MibIdentifier", "ObjectIdentity", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "iso", "Gauge32", "ModuleIdentity", "NotificationType", "Unsigned32", "Counter64")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
sgte = MibIdentifier((1, 3, 6, 1, 4, 1, 13743))
sEci48VP = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1))
cIDENTIFICATION = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 1))
iNomEquipement = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iNomEquipement.setStatus('optional')
iNomConstructeur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iNomConstructeur.setStatus('optional')
iMarqueCommerciale = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iMarqueCommerciale.setStatus('optional')
iVersionLogiciel = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iVersionLogiciel.setStatus('optional')
iCaracterisationFine = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: iCaracterisationFine.setStatus('optional')
cMESURES = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 2))
mTensionUtilisation = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionUtilisation.setStatus('optional')
mTensionBatterie = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionBatterie.setStatus('optional')
mCourantUtilisation = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantUtilisation.setStatus('optional')
mCourantBatterie1A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1A.setStatus('optional')
mCourantBatterie2A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2A.setStatus('optional')
mCourantBatterie3A = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3A.setStatus('optional')
mCourantBatterie1B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1B.setStatus('optional')
mCourantBatterie2B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2B.setStatus('optional')
mCourantBatterie3B = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3B.setStatus('optional')
mCourantRedresseur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantRedresseur.setStatus('optional')
mTauxCharge = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTauxCharge.setStatus('optional')
mEtape = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mEtape.setStatus('optional')
mTensionDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 13), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionDebutTestBatt.setStatus('optional')
mTensionFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 14), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTensionFinTestBatt.setStatus('optional')
mCourantBatterie1ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1ADebutTestBatt.setStatus('optional')
mCourantBatterie1AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1AFinTestBatt.setStatus('optional')
mCourantBatterie2ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2ADebutTestBatt.setStatus('optional')
mCourantBatterie2AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2AFinTestBatt.setStatus('optional')
mCourantBatterie3ADebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3ADebutTestBatt.setStatus('optional')
mCourantBatterie3AFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3AFinTestBatt.setStatus('optional')
mCourantBatterie1BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1BDebutTestBatt.setStatus('optional')
mCourantBatterie1BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie1BFinTestBatt.setStatus('optional')
mCourantBatterie2BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2BDebutTestBatt.setStatus('optional')
mCourantBatterie2BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie2BFinTestBatt.setStatus('optional')
mCourantBatterie3BDebutTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3BDebutTestBatt.setStatus('optional')
mCourantBatterie3BFinTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: mCourantBatterie3BFinTestBatt.setStatus('optional')
mTemperature = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 2, 27), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(255, 255)).setFixedLength(255)).setMaxAccess("readonly")
if mibBuilder.loadTexts: mTemperature.setStatus('optional')
cETATS = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 3))
eModifHeure = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eModifHeure.setStatus('optional')
eModifParam = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eModifParam.setStatus('optional')
eLiaisonJbus = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eLiaisonJbus.setStatus('optional')
eTestEnCours = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestEnCours.setStatus('optional')
eUBMin = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUBMin.setStatus('optional')
eTestNonRealise = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestNonRealise.setStatus('optional')
eDefUnRed = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefUnRed.setStatus('optional')
eDefPlusRed = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefPlusRed.setStatus('optional')
eAlimSecteur = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eAlimSecteur.setStatus('optional')
eFuseBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseBatt.setStatus('optional')
eFuseDep = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseDep.setStatus('optional')
eFuseAux = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 12), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFuseAux.setStatus('optional')
eUMin = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUMin.setStatus('optional')
eUMax = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eUMax.setStatus('optional')
eTauxCharge = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 15), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTauxCharge.setStatus('optional')
eTemperature = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTemperature.setStatus('optional')
eIBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eIBatt.setStatus('optional')
eChargeI = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eChargeI.setStatus('optional')
eChargeU = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eChargeU.setStatus('optional')
eFloating = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eFloating.setStatus('optional')
eComptAH = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 21), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eComptAH.setStatus('optional')
eTestBattOK = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 22), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestBattOK.setStatus('optional')
eTestBattKO = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 23), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestBattKO.setStatus('optional')
eTestImpossible = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 24), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestImpossible.setStatus('optional')
eTestRepousse = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 25), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestRepousse.setStatus('optional')
eTestInterrompu = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 26), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestInterrompu.setStatus('optional')
eTestMiniKO = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eTestMiniKO.setStatus('optional')
ePuissTestBatt = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 28), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ePuissTestBatt.setStatus('optional')
eDefEprom = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 29), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDefEprom.setStatus('optional')
eDetectionCSB = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eDetectionCSB.setStatus('optional')
eRAZ = MibScalar((1, 3, 6, 1, 4, 1, 13743, 1, 3, 31), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: eRAZ.setStatus('optional')
cALARMES = MibIdentifier((1, 3, 6, 1, 4, 1, 13743, 1, 4))
aModifHeure = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,1))
aModifParam = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,2))
aLiaisonJbus = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,3))
aTestEnCours = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,4))
aUBMin = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,5))
aTestNonRealise = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,6))
aDefUnRed = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,7))
aDefPlusRed = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,8))
aAlimSecteur = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,9))
aFuseBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,10))
aFuseDep = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,11))
aFuseAux = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,12))
aUMin = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,13))
aUMax = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,14))
aTauxCharge = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,15))
aTemperature = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,16))
aIBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,17))
aChargeI = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,18))
aChargeU = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,19))
aFloating = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,20))
aComptAH = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,21))
aTestBattOK = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,22))
aTestBattKO = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,23))
aTestImpossible = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,24))
aTestRepousse = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,25))
aTestInterrompu = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,26))
aTestMiniKO = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,27))
aPuissTestBatt = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,28))
aDefEprom = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,29))
aDetectionCSB = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,30))
aRAZ = NotificationType((1, 3, 6, 1, 4, 1, 13743, 1, 4) + (0,31))
mibBuilder.exportSymbols("SGTE-MIB", mTemperature=mTemperature, aIBatt=aIBatt, aChargeI=aChargeI, aChargeU=aChargeU, iMarqueCommerciale=iMarqueCommerciale, mCourantRedresseur=mCourantRedresseur, eChargeU=eChargeU, sEci48VP=sEci48VP, eTestBattKO=eTestBattKO, aTestInterrompu=aTestInterrompu, eIBatt=eIBatt, cMESURES=cMESURES, iVersionLogiciel=iVersionLogiciel, eModifHeure=eModifHeure, eLiaisonJbus=eLiaisonJbus, aTauxCharge=aTauxCharge, mCourantBatterie1BDebutTestBatt=mCourantBatterie1BDebutTestBatt, aTestBattKO=aTestBattKO, aRAZ=aRAZ, aUMin=aUMin, mCourantBatterie3ADebutTestBatt=mCourantBatterie3ADebutTestBatt, aTemperature=aTemperature, eTestNonRealise=eTestNonRealise, aFuseBatt=aFuseBatt, eTestImpossible=eTestImpossible, mCourantBatterie1BFinTestBatt=mCourantBatterie1BFinTestBatt, aFloating=aFloating, mCourantBatterie2AFinTestBatt=mCourantBatterie2AFinTestBatt, eFuseBatt=eFuseBatt, eRAZ=eRAZ, eModifParam=eModifParam, aModifParam=aModifParam, aUBMin=aUBMin, aTestNonRealise=aTestNonRealise, aDefPlusRed=aDefPlusRed, mCourantBatterie3AFinTestBatt=mCourantBatterie3AFinTestBatt, eComptAH=eComptAH, iNomEquipement=iNomEquipement, aTestMiniKO=aTestMiniKO, aAlimSecteur=aAlimSecteur, iNomConstructeur=iNomConstructeur, mCourantBatterie1AFinTestBatt=mCourantBatterie1AFinTestBatt, eTestBattOK=eTestBattOK, aTestRepousse=aTestRepousse, aUMax=aUMax, eChargeI=eChargeI, cALARMES=cALARMES, cETATS=cETATS, eDefEprom=eDefEprom, mCourantBatterie3A=mCourantBatterie3A, eAlimSecteur=eAlimSecteur, eUMin=eUMin, aTestEnCours=aTestEnCours, mCourantBatterie3B=mCourantBatterie3B, aFuseDep=aFuseDep, mCourantBatterie2B=mCourantBatterie2B, mEtape=mEtape, ePuissTestBatt=ePuissTestBatt, aDetectionCSB=aDetectionCSB, mCourantBatterie1B=mCourantBatterie1B, eDefPlusRed=eDefPlusRed, mCourantBatterie1A=mCourantBatterie1A, aDefUnRed=aDefUnRed, sgte=sgte, mTensionUtilisation=mTensionUtilisation, mCourantBatterie1ADebutTestBatt=mCourantBatterie1ADebutTestBatt, aModifHeure=aModifHeure, iCaracterisationFine=iCaracterisationFine, eFuseDep=eFuseDep, eTauxCharge=eTauxCharge, mCourantUtilisation=mCourantUtilisation, aDefEprom=aDefEprom, mCourantBatterie3BDebutTestBatt=mCourantBatterie3BDebutTestBatt, cIDENTIFICATION=cIDENTIFICATION, aFuseAux=aFuseAux, aComptAH=aComptAH, aTestBattOK=aTestBattOK, mCourantBatterie2BFinTestBatt=mCourantBatterie2BFinTestBatt, eTestRepousse=eTestRepousse, eTestMiniKO=eTestMiniKO, aPuissTestBatt=aPuissTestBatt, aTestImpossible=aTestImpossible, eTestInterrompu=eTestInterrompu, eFloating=eFloating, mCourantBatterie2ADebutTestBatt=mCourantBatterie2ADebutTestBatt, mTensionFinTestBatt=mTensionFinTestBatt, eUBMin=eUBMin, mCourantBatterie2BDebutTestBatt=mCourantBatterie2BDebutTestBatt, eTestEnCours=eTestEnCours, aLiaisonJbus=aLiaisonJbus, mCourantBatterie2A=mCourantBatterie2A, eTemperature=eTemperature, eDetectionCSB=eDetectionCSB, mTensionDebutTestBatt=mTensionDebutTestBatt, eDefUnRed=eDefUnRed, mTauxCharge=mTauxCharge, mTensionBatterie=mTensionBatterie, eFuseAux=eFuseAux, eUMax=eUMax, mCourantBatterie3BFinTestBatt=mCourantBatterie3BFinTestBatt)
|
python
|
from django import forms
from django.contrib.auth.models import User
from .models import Profile
class UserCreationForm(forms.ModelForm):
username = forms.CharField(label='اسم المستخدم', max_length=30,
help_text='اسم المستخدم يجب ألا يحتوي على مسافات.')
email = forms.EmailField(label='البريد الإلكتروني')
first_name = forms.CharField(label='الاسم الأول')
last_name = forms.CharField(label='الاسم الأخير')
password1 = forms.CharField(
label='كلمة المرور', widget=forms.PasswordInput(), min_length=8)
password2 = forms.CharField(
label='تأكيد كلمة المرور', widget=forms.PasswordInput(), min_length=8)
class Meta:
model = User
fields = ('username', 'email', 'first_name',
'last_name', 'password1', 'password2')
def clean_password2(self):
cd = self.cleaned_data
if cd['password1'] != cd['password2']:
raise forms.ValidationError('كلمة المرور غير متطابقة')
return cd['password2']
def clean_username(self):
cd = self.cleaned_data
if User.objects.filter(username=cd['username']).exists():
raise forms.ValidationError('يوجد مستخدم مسجل بهذا الاسم.')
return cd['username']
class LoginForm(forms.ModelForm):
username = forms.CharField(label='اسم المستخدم')
password = forms.CharField(
label='كلمة المرور', widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'password')
class UserUpdateForm(forms.ModelForm):
first_name = forms.CharField(label='الاسم الأول')
last_name = forms.CharField(label='الاسم الأخير')
email = forms.EmailField(label='البريد الإلكتروني')
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileUpdateForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('image',)
|
python
|
class Seat:
"""Seat contains features of the seat"""
def __init__(self):
self.left_handed = False
self.special_needs = False
self.broken = False
# Describes sid of person sitting there
self.sid = -1
# Used for ChunkIncrease
# True to use the seat, False to keep empty
self.enable = True
class SeatGroups:
"""SeatGroups define a contiguous seats in a row.
This helps determine how to place empty seats in order to minimize student chunks
"""
def __init__(self, _chunk_begin, _chunk_end):
# Chunk ranges from (chunk_begin, chunk_end) inclusive
self.chunk_begin = _chunk_begin
self.chunk_end = _chunk_end
if self.chunk_begin[0] != self.chunk_end[0]:
raise Exception("Rows don't match, can't be a chunk.")
# Used for ConsecDivide (only stores cols)
self.empty = []
def size(self):
return self.chunk_end[1] - self.chunk_begin[1] + 1
def max_chunk_size(self):
max_chunk = 0
cur_chunk = 0
for col in range(self.chunk_begin[1], self.chunk_end[1] + 1):
if col in self.empty:
max_chunk = max(max_chunk, cur_chunk)
cur_chunk = 0
else:
cur_chunk += 1
return max(max_chunk, cur_chunk)
def avail_size(self):
return self.size() - len(self.empty)
def __str__(self):
return str(self.max_chunk_size())
def __repr__(self):
return str(self)
|
python
|
nJoints = 16
accIdxs = [0, 1, 2, 3, 4, 5, 10, 11, 14, 15]
shuffleRef = [[0, 5], [1, 4], [2, 3],
[10, 15], [11, 14], [12, 13]]
edges = [[0, 1], [1, 2], [2, 6], [6, 3], [3, 4], [4, 5],
[10, 11], [11, 12], [12, 8], [8, 13], [13, 14], [14, 15],
[6, 8], [8, 9]]
ntuImgSize = 224
h36mImgSize = 224
outputRes = 64
inputRes = 256
eps = 1e-6
momentum = 0.0
weightDecay = 0.0
alpha = 0.99
epsilon = 1e-8
scale = 0.25
rotate = 30
hmGauss = 1
hmGaussInp = 20
shiftPX = 50
disturb = 10
expDir = '../exp'
dataDir = '../data/'
ntuDataDir = dataDir + 'ntu'
h36mDataDir = dataDir + 'h36m'
mpiiDataDir = dataDir + 'mpii'
posetrackDataDir = dataDir + 'posetrack'
nThreads = 4
root = 7
|
python
|
# -*- coding: utf-8 -*-
from ConfigParser import NoOptionError
import calendar
import datetime
from taxi import remote
from taxi.exceptions import CancelException, UsageError
from taxi.projects import Project
from taxi.timesheet import (
NoActivityInProgressError, Timesheet, TimesheetCollection, TimesheetFile
)
from taxi.timesheet.entry import TimesheetEntry, EntriesCollection
from taxi.timesheet.parser import ParseError
from taxi.settings import Settings
from taxi.utils import file
from taxi.utils.structures import OrderedSet
class BaseCommand(object):
def __init__(self, app_container):
self.options = app_container.options
self.arguments = app_container.arguments
self.view = app_container.view
self.projects_db = app_container.projects_db
self.settings = app_container.settings
def setup(self):
pass
def validate(self):
pass
def run(self):
pass
class BaseTimesheetCommand(BaseCommand):
def get_timesheet_collection(self, skip_cache=False):
timesheet_collection = getattr(self, '_current_timesheet_collection',
None)
if timesheet_collection is not None and not skip_cache:
return timesheet_collection
timesheet_collection = TimesheetCollection()
timesheet_files = self.get_files(
self.options['unparsed_file'],
int(self.settings.get('nb_previous_files'))
)
self.alias_mappings = self.settings.get_aliases()
for file_path in timesheet_files:
timesheet_file = TimesheetFile(file_path)
try:
timesheet_contents = timesheet_file.read()
except IOError:
timesheet_contents = ''
t = Timesheet(
EntriesCollection(
timesheet_contents,
self.settings.get('date_format')
),
self.alias_mappings,
timesheet_file
)
# Force new entries direction if necessary
if (self.settings.get('auto_add') in [
Settings.AUTO_ADD_OPTIONS['TOP'],
Settings.AUTO_ADD_OPTIONS['BOTTOM']]):
t.entries.add_date_to_bottom = (
self.settings.get('auto_add') ==
Settings.AUTO_ADD_OPTIONS['BOTTOM']
)
timesheet_collection.timesheets.append(t)
# Fix `add_date_to_bottom` attribute of timesheet entries based on
# previous timesheets. When a new timesheet is started it won't have
# any direction defined, so we take the one from the previous
# timesheet, if any
previous_timesheet = None
for timesheet in reversed(timesheet_collection.timesheets):
if (timesheet.entries.add_date_to_bottom is None
and previous_timesheet
and previous_timesheet.entries.add_date_to_bottom
is not None):
timesheet.entries.add_date_to_bottom = (
previous_timesheet.entries.add_date_to_bottom
)
previous_timesheet = timesheet
setattr(self, '_current_timesheet_collection', timesheet_collection)
return timesheet_collection
def get_files(self, filename, nb_previous_files):
date_units = ['m', 'Y']
smallest_unit = None
for date in date_units:
if '%%%s' % date in filename:
smallest_unit = date
break
if smallest_unit is None:
return OrderedSet([filename])
files = OrderedSet()
file_date = datetime.date.today()
for i in xrange(0, nb_previous_files + 1):
files.add(file.expand_filename(filename, file_date))
if smallest_unit == 'm':
if file_date.month == 1:
file_date = file_date.replace(day=1,
month=12,
year=file_date.year - 1)
else:
file_date = file_date.replace(day=1,
month=file_date.month - 1)
elif smallest_unit == 'Y':
file_date = file_date.replace(day=1, year=file_date.year - 1)
return files
class AddCommand(BaseCommand):
"""
Usage: add search_string
Searches and prompts for project, activity and alias and adds that as a new
entry to .tksrc.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
search = self.arguments
projects = self.projects_db.search(search, active_only=True)
projects = sorted(projects, key=lambda project: project.name)
if len(projects) == 0:
self.view.msg(
u"No active project matches your search string '%s'" %
''.join(search)
)
return
self.view.projects_list(projects, True)
try:
number = self.view.select_project(projects)
except CancelException:
return
project = projects[number]
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings,
numbered_activities=True)
try:
number = self.view.select_activity(project.activities)
except CancelException:
return
retry = True
while retry:
try:
alias = self.view.select_alias()
except CancelException:
return
if self.settings.activity_exists(alias):
mapping = self.settings.get_aliases()[alias]
overwrite = self.view.overwrite_alias(alias, mapping)
if not overwrite:
return
elif overwrite:
retry = False
# User chose "retry"
else:
retry = True
else:
retry = False
activity = project.activities[number]
self.settings.add_alias(alias, project.id, activity.id)
self.settings.write_config()
self.view.alias_added(alias, (project.id, activity.id))
class AliasCommand(BaseCommand):
"""
Usage: alias [alias]
alias [project_id]
alias [project_id/activity_id]
alias [alias] [project_id/activity_id]
- The first form will display the mappings whose aliases start with the
search string you entered
- The second form will display the mapping(s) you've defined for this
project and all of its activities
- The third form will display the mapping you've defined for this exact
project/activity tuple
- The last form will add a new alias in your configuration file
You can also run this command without any argument to view all your
mappings.
"""
MODE_SHOW_MAPPING = 0
MODE_ADD_ALIAS = 1
MODE_LIST_ALIASES = 2
def validate(self):
if len(self.arguments) > 2:
raise UsageError()
def setup(self):
if len(self.arguments) == 2:
self.alias = self.arguments[0]
self.mapping = self.arguments[1]
self.mode = self.MODE_ADD_ALIAS
elif len(self.arguments) == 1:
self.alias = self.arguments[0]
self.mode = self.MODE_SHOW_MAPPING
else:
self.alias = None
self.mode = self.MODE_LIST_ALIASES
def run(self):
# 2 arguments, add a new alias
if self.mode == self.MODE_ADD_ALIAS:
self._add_alias(self.alias, self.mapping)
# 1 argument, display the alias or the project id/activity id tuple
elif self.mode == self.MODE_SHOW_MAPPING:
mapping = Project.str_to_tuple(self.alias)
if mapping is not None:
for m in self.settings.search_aliases(mapping):
self.view.mapping_detail(m, self.projects_db.get(m[1][0]))
else:
self.mode = self.MODE_LIST_ALIASES
# No argument, display the mappings
if self.mode == self.MODE_LIST_ALIASES:
for m in self.settings.search_mappings(self.alias):
self.view.alias_detail(
m,
self.projects_db.get(m[1][0]) if m[1] is not None else None
)
def _add_alias(self, alias_name, mapping):
project_activity = Project.str_to_tuple(mapping)
if project_activity is None:
raise UsageError("The mapping must be in the format xxxx/yyyy")
if self.settings.activity_exists(alias_name):
existing_mapping = self.settings.get_aliases()[alias_name]
confirm = self.view.overwrite_alias(alias_name, existing_mapping,
False)
if not confirm:
return
self.settings.add_alias(alias_name, project_activity[0],
project_activity[1])
self.settings.write_config()
self.view.alias_added(alias_name, project_activity)
class AutofillCommand(BaseTimesheetCommand):
"""
Usage: autofill
Fills your timesheet up to today, for the defined auto_fill_days.
"""
def run(self):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
today = datetime.date.today()
last_day = calendar.monthrange(today.year, today.month)
last_date = datetime.date(today.year, today.month, last_day[1])
timesheet_collection = self.get_timesheet_collection()
t = timesheet_collection.timesheets[0]
t.prefill(auto_fill_days, last_date)
t.file.write(t.entries)
self.view.msg(u"Your entries file has been filled.")
else:
self.view.err(u"The parameter `auto_fill_days` must be set to "
"use this command.")
class KittyCommand(BaseCommand):
"""
|\ _,,,---,,_
/,`.-'`' -. ;-;;,_
|,4- ) )-,_..;\ ( `'-'
'---''(_/--' `-'\_)
Soft kitty, warm kitty
Little ball of fur
Happy kitty, sleepy kitty
Purr, purr, purr
"""
def run(self):
self.view.msg(self.__doc__)
class CleanAliasesCommand(BaseCommand):
"""
Usage: clean-aliases
Removes aliases from your config file that point to inactive projects.
"""
def run(self):
aliases = self.settings.get_aliases()
inactive_aliases = []
for (alias, mapping) in aliases.iteritems():
# Ignore local aliases
if mapping is None:
continue
project = self.projects_db.get(mapping[0])
if (project is None or not project.is_active() or
(mapping[1] is not None
and project.get_activity(mapping[1]) is None)):
inactive_aliases.append(((alias, mapping), project))
if not inactive_aliases:
self.view.msg(u"No inactive aliases found.")
return
if not self.options.get('force_yes'):
confirm = self.view.clean_inactive_aliases(inactive_aliases)
if self.options.get('force_yes') or confirm:
self.settings.remove_aliases(
[item[0][0] for item in inactive_aliases]
)
self.settings.write_config()
self.view.msg(u"%d inactive aliases have been successfully"
" cleaned." % len(inactive_aliases))
class CommitCommand(BaseTimesheetCommand):
"""
Usage: commit
Commits your work to the server.
"""
def run(self):
timesheet_collection = self.get_timesheet_collection()
if (self.options.get('date', None) is None
and not self.options.get('ignore_date_error', False)):
non_workday_entries = (
timesheet_collection.get_non_current_workday_entries()
)
if non_workday_entries:
self.view.non_working_dates_commit_error(
non_workday_entries.keys()
)
return
self.view.pushing_entries()
r = remote.ZebraRemote(self.settings.get('site'),
self.settings.get('username'),
self.settings.get('password'))
all_pushed_entries = []
all_failed_entries = []
for timesheet in timesheet_collection.timesheets:
entries_to_push = timesheet.get_entries(
self.options.get('date', None), exclude_ignored=True,
exclude_local=True, exclude_unmapped=True, regroup=True
)
(pushed_entries, failed_entries) = r.send_entries(
entries_to_push, self.alias_mappings, self._entry_pushed
)
local_entries = timesheet.get_local_entries(
self.options.get('date', None)
)
local_entries_list = []
for (date, entries) in local_entries.iteritems():
local_entries_list.extend(entries)
for entry in local_entries_list + pushed_entries:
entry.commented = True
for (entry, _) in failed_entries:
entry.fix_start_time()
# Also fix start time for ignored entries. Since they won't get
# pushed, there's a chance their previous sibling gets commented
for (date, entries) in timesheet.get_ignored_entries().items():
for entry in entries:
entry.fix_start_time()
timesheet.file.write(timesheet.entries)
all_pushed_entries.extend(pushed_entries)
all_failed_entries.extend(failed_entries)
ignored_entries = timesheet_collection.get_ignored_entries(
self.options.get('date', None)
)
ignored_entries_list = []
for (date, entries) in ignored_entries.iteritems():
ignored_entries_list.extend(entries)
self.view.pushed_entries_summary(all_pushed_entries,
all_failed_entries,
ignored_entries_list)
def _entry_pushed(self, entry, error):
self.view.pushed_entry(entry, error, self.alias_mappings)
class EditCommand(BaseTimesheetCommand):
"""
Usage: edit
Opens your zebra file in your favourite editor.
"""
def run(self):
timesheet_collection = None
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError:
pass
if timesheet_collection:
t = timesheet_collection.timesheets[0]
if (self.settings.get('auto_add') !=
Settings.AUTO_ADD_OPTIONS['NO']
and not self.options.get('forced_file')):
auto_fill_days = self.settings.get_auto_fill_days()
if auto_fill_days:
t.prefill(auto_fill_days, limit=None)
t.file.write(t.entries)
try:
editor = self.settings.get('editor')
except NoOptionError:
editor = None
file.spawn_editor(self.options['file'], editor)
try:
timesheet_collection = self.get_timesheet_collection(True)
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(regroup=True),
self.alias_mappings, self.settings
)
class HelpCommand(BaseCommand):
"""
YO DAWG you asked for help for the help command. Try to search Google in
Google instead.
"""
def __init__(self, application_container):
super(HelpCommand, self).__init__(application_container)
self.commands_mapping = application_container.commands_mapping
def setup(self):
if len(self.arguments) == 0:
raise UsageError()
else:
self.command = self.arguments[0]
def run(self):
if self.command == 'help':
self.view.command_usage(self)
else:
if self.command in self.commands_mapping:
self.view.command_usage(self.commands_mapping[self.command])
else:
self.view.err(u"Command %s doesn't exist." % self.command)
class SearchCommand(BaseCommand):
"""
Usage: search search_string
Searches for a project by its name. The letter in the first column
indicates the status of the project: [N]ot started, [A]ctive, [F]inished,
[C]ancelled.
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
def run(self):
projects = self.projects_db.search(self.arguments)
projects = sorted(projects, key=lambda project: project.name.lower())
self.view.search_results(projects)
class ShowCommand(BaseCommand):
"""
Usage: show project_id
Shows the details of the given project_id (you can find it with the search
command).
"""
def validate(self):
if len(self.arguments) < 1:
raise UsageError()
try:
int(self.arguments[0])
except ValueError:
raise UsageError("The project id must be a number")
def setup(self):
self.project_id = int(self.arguments[0])
def run(self):
try:
project = self.projects_db.get(self.project_id)
except IOError:
raise Exception("Error: the projects database file doesn't exist. "
"Please run `taxi update` to create it")
if project is None:
self.view.err(
u"The project `%s` doesn't exist" % (self.project_id)
)
else:
mappings = self.settings.get_reversed_aliases()
self.view.project_with_activities(project, mappings)
class StartCommand(BaseTimesheetCommand):
"""
Usage: start project_name
Use it when you start working on the project project_name. This will add
the project name and the current time to your entries file. When you're
finished, use the stop command.
"""
def validate(self):
if len(self.arguments) != 1:
raise UsageError()
def setup(self):
self.project_name = self.arguments[0]
def run(self):
today = datetime.date.today()
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
return
t = timesheet_collection.timesheets[0]
# If there's a previous entry on the same date, check if we can use its
# end time as a start time for the newly started entry
today_entries = t.get_entries(today)
if(today in today_entries and today_entries[today]
and isinstance(today_entries[today][-1].duration, tuple)
and today_entries[today][-1].duration[1] is not None):
new_entry_start_time = today_entries[today][-1].duration[1]
else:
new_entry_start_time = datetime.datetime.now()
duration = (new_entry_start_time, None)
e = TimesheetEntry(self.project_name, duration, '?')
t.entries[today].append(e)
t.file.write(t.entries)
class StatusCommand(BaseTimesheetCommand):
"""
Usage: status
Shows the summary of what's going to be committed to the server.
"""
def setup(self):
self.date = self.options.get('date', None)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
except ParseError as e:
self.view.err(e)
else:
self.view.show_status(
timesheet_collection.get_entries(self.date, regroup=True),
self.alias_mappings,
self.settings
)
class StopCommand(BaseTimesheetCommand):
"""
Usage: stop [description]
Use it when you stop working on the current task. You can add a description
to what you've done.
"""
def setup(self):
if len(self.arguments) == 0:
self.description = None
else:
self.description = ' '.join(self.arguments)
def run(self):
try:
timesheet_collection = self.get_timesheet_collection()
current_timesheet = timesheet_collection.timesheets[0]
current_timesheet.continue_entry(
datetime.date.today(),
datetime.datetime.now().time(),
self.description
)
except ParseError as e:
self.view.err(e)
except NoActivityInProgressError:
self.view.err(u"You don't have any activity in progress for today")
else:
current_timesheet.file.write(current_timesheet.entries)
class UpdateCommand(BaseCommand):
"""
Usage: update
Synchronizes your project database with the server and updates the shared
aliases.
"""
def setup(self):
self.site = self.settings.get('site')
self.username = self.settings.get('username')
self.password = self.settings.get('password')
def run(self):
self.view.updating_projects_database()
aliases_before_update = self.settings.get_aliases()
local_aliases = self.settings.get_aliases(include_shared=False)
r = remote.ZebraRemote(self.site, self.username, self.password)
projects = r.get_projects()
self.projects_db.update(projects)
# Put the shared aliases in the config file
shared_aliases = {}
for project in projects:
if project.is_active():
for alias, activity_id in project.aliases.iteritems():
self.settings.add_shared_alias(alias, project.id,
activity_id)
shared_aliases[alias] = (project.id, activity_id)
aliases_after_update = self.settings.get_aliases()
self.settings.write_config()
self.view.projects_database_update_success(aliases_before_update,
aliases_after_update,
local_aliases,
shared_aliases,
self.projects_db)
|
python
|
"""This module will contain everything needed to train a neural Network.
Authors:
- Johannes Cartus, QCIEP, TU Graz
"""
from os.path import join
from uuid import uuid4
import tensorflow as tf
import numpy as np
from SCFInitialGuess.utilities.usermessages import Messenger as msg
from SCFInitialGuess.nn.cost_functions import MSE, RegularizedMSE
def mse_with_l2_regularisation(
network,
expectation_tensor,
regularisation_parameter=0.001
):
with tf.name_scope("mse_with_l2_regularisation"):
error = tf.losses.mean_squared_error(
network.output_tensor,
expectation_tensor
)
regularisation = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(regularisation_parameter),
network.weights
)
cost = error + regularisation
tf.summary.scalar("weight_decay", regularisation)
tf.summary.scalar("error", error)
tf.summary.scalar("total_loss", cost)
return cost, error, regularisation
class Trainer(object):
def __init__(
self,
network,
optimizer=None,
error_function=None,
cost_function=None):
self.network = network
if optimizer is None:
self.optimizer = tf.train.AdamOptimizer(
learning_rate=0.001
)
else:
self.optimizer = optimizer
if cost_function is None:
self.cost_function = RegularizedMSE()
else:
self.cost_function = cost_function
if error_function is None:
self.error_function = MSE()
else:
self.error_function = error_function
self.training_step = None
self.test_error = None
def setup(self, target_graph=None):
if target_graph is None:
msg.info("No target graph specified for Trainer setup. " + \
"Creating new graph ...", 1)
self.graph = tf.Graph()
else:
msg.info("Appending to graph: " + str(target_graph))
self.graph = target_graph
with self.graph.as_default():
msg.info("Setting up the training in the target graph ...", 1)
# placeholder for dataset target-values
self.target_placeholder = tf.placeholder(
dtype="float32",
shape=[None, self.network.structure[-1]],
name="y"
)
msg.info("network ...", 1)
with tf.name_scope("network/"):
network_output = self.network.setup()
self.input_placeholder = self.network.input_tensor
msg.info("error function ...", 1)
with tf.name_scope("error_function/"):
self.error = self.error_function.function(
self.network,
self.target_placeholder
)
msg.info("cost function ...", 1)
with tf.name_scope("cost_function/"):
self.cost = self.cost_function.function(
self.network,
self.target_placeholder
)
msg.info("training step", 1)
with tf.name_scope("training/"):
self.training_step = self.optimizer.minimize(self.cost)
return self.graph, self.network, self.target_placeholder
def train(
self,
dataset,
max_steps=100000,
evaluation_period=200,
mini_batch_size=0.2,
convergence_threshold=1e-5,
summary_save_path=None
):
with self.graph.as_default():
sess = tf.Session(graph=self.graph)
if self.training_step is None:
self.setup()
#--- prep the writer ---
if not summary_save_path is None:
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(summary_save_path)
writer.add_graph(sess.graph)
#---
#--- train the network ---
old_error = 1e10
sess.run(tf.global_variables_initializer())
msg.info("Starting network training ...", 1)
for step in range(max_steps):
mini_batch = dataset.sample_minibatch(mini_batch_size)
if step % np.ceil(evaluation_period / 10):
if not summary_save_path is None:
writer.add_summary(
sess.run(
summary,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
),
step
)
if step % evaluation_period == 0:
error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
cost = sess.run(
self.cost,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# compare to previous error
diff = np.abs(error - old_error)
# convergence check
if diff < convergence_threshold:
msg.info(
"Convergence reached after " + str(step) + " steps.",
1
)
break
else:
msg.info(
"Val. Cost: " + \
"{:0.3E}. Error: {:0.3E}. Diff: {:0.1E}".format(
cost,
error,
diff
)
)
old_error = error
# do training step
sess.run(
self.training_step,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
)
#---
if not summary_save_path is None:
writer.close()
test_error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.testing[0],
self.target_placeholder: dataset.testing[1]
}
)
self.test_error = test_error
msg.info("Test error: {:0.5E}".format(test_error), 1)
return self.network, sess
class ContinuousTrainer(Trainer):
"""This trainer will train a network until the training is interrupted
by the user. Everytime a new minimum on the validation error is reached,
the model is exported.
"""
def train(
self,
dataset,
network_save_path,
comment=None,
old_error=1e10,
evaluation_period=2000,
mini_batch_size=40
):
"""Similaraly to the train function in the superclass, the function will
start the training. However it will continue to train until the user
aborts it. It will be exported after evaluation_period training
steps if a new minumim of error on the validation training set is reached.
"""
with self.graph.as_default():
sess = tf.Session(graph=self.graph)
if self.training_step is None:
self.setup()
#--- train the network ---
sess.run(tf.global_variables_initializer())
msg.info("Starting network training ...", 1)
#Training will run until user aborts it.
while True:
#--- do training ---
for step in range(evaluation_period):
mini_batch = dataset.sample_minibatch(mini_batch_size)
sess.run(
self.training_step,
feed_dict={
self.input_placeholder: mini_batch[0],
self.target_placeholder: mini_batch[1]
}
)
#---
#--- evaluation ---
# calculate validation errors ...
error = sess.run(
self.error,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# ... and costs.
cost = sess.run(
self.cost,
feed_dict={
self.input_placeholder: dataset.validation[0],
self.target_placeholder: dataset.validation[1]
}
)
# Check for new validation error minimum
diff = error - old_error
# if a new minimum was found notify user
# and save the model.
if diff < 0:
message = (
"New Minimum found! Val. Cost: {:0.1E}. " + \
"Error: {:0.3E}. Diff: {:0.1E}"
).format(cost, error, diff)
msg.info(message)
# export network
self.network.export(sess, network_save_path, error, comment)
# store new minimum
old_error = error
#---
#---
def train_network(
network,
dataset,
sess=None,
learning_rate=0.001,
regularisation_parameter=0.01,
max_steps=100000,
evaluation_period=200,
mini_batch_size=0.2,
convergence_threshold=1e-5,
summary_save_path=None
):
"""Train a neural Neutwork from nn.networks with the AdamOptimizer,
to minimize the mean squared error with l2 regularisation.
Args:
- network <nn.networks.AbstractNeuralNetwork>: the network to be trained.
- dataset <utilities.dataset.Dataset>: the dataset to train the net on.
- learning_rate <float>: the learning rate to use for training w/
AdamOptimizer
- regularisation_parameter <float>: the factor with which the
regularisation is added to the total cost.
- max_steps <int>: max number of learning steps to take if convergence
not met before.
- evaluation_period <int>: period of training steps after which there
will be a check for convergence.
mini_batch_size <int>: size of the minibatch that is randomly sampled
from the training dataset in every training step.
- convergence_threshold <float>: training convergence is reached if
difference in error drops below this value.
- summary_save_path <str>: the full path to a folder in which the
tensorboard data will be written. If None given nothing will be exported.
Returns:
- the trained network
- the session
"""
if sess is None:
sess = tf.Session()
#--- set up the graph ---
msg.info("Setting up the graph ...", 1)
network_output = network.setup()
x = network.input_tensor
y = tf.placeholder(
dtype="float32",
shape=[None, network.structure[-1]],
name="y"
)
# cost is mse w/ l2 regularisation
cost, mse, _ = mse_with_l2_regularisation(
network,
expectation_tensor=y,
regularisation_parameter=regularisation_parameter
)
#optimizer and training
with tf.name_scope("training"):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_step = optimizer.minimize(cost)
#---
#--- prep the writer ---
if not summary_save_path is None:
msg.warn("Careful! If more than 1 network is in current graph, " + \
"it should be cleared before merging the summary!"
)
summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(summary_save_path)
writer.add_graph(sess.graph)
#---
#--- train the network ---
msg.info("Starting network training ...", 1)
old_error = 1e10
sess.run(tf.global_variables_initializer())
for step in range(max_steps):
mini_batch = dataset.sample_minibatch(mini_batch_size)
if step % np.ceil(evaluation_period / 10) == 0:
if not summary_save_path is None:
writer.add_summary(
sess.run(
summary,
feed_dict={
x: mini_batch[0],
y: mini_batch[1]
}
),
step
)
if step % evaluation_period == 0:
error = sess.run(
mse,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
)
# compare to previous error
diff = np.abs(error - old_error)
# convergence check
if diff < convergence_threshold:
msg.info(
"Convergence reached after " + str(step) + " steps.",
1
)
break
else:
msg.info(
"Validation cost: {:0.5E}. Diff to prev.: {:0.1E}".format(
error,
diff
)
)
old_error = error
# do training step
sess.run(train_step, feed_dict={x: mini_batch[0], y: mini_batch[1]})
#---
if not summary_save_path is None:
writer.close()
test_error = sess.run(
mse,
feed_dict={x: dataset.testing[0], y: dataset.testing[1]}
)
msg.info("Test error: {:0.5E}".format(test_error), 1)
return network, sess
def network_benchmark(
models,
dataset,
logdir,
steps_report=250,
max_training_steps=100000,
convergence_eps=1e-7
):
for model in models:
msg.info("Investigating model " + str(model), 2)
save_path = join(logdir, str(model))
# make new session and build graph
tf.reset_default_graph()
sess = tf.Session()
dim_in = model.network.structure[0]
dim_out = model.network.structure[-1]
f = model.network.setup()
x = model.input_tensor
y = tf.placeholder(tf.float32, shape=[None, dim_out])
with tf.name_scope("loss"):
error = tf.losses.mean_squared_error(y, f) / dim_out # sum_i (f8(x_i) - y_i)^2
weight_decay = tf.contrib.layers.apply_regularization(
tf.contrib.layers.l2_regularizer(0.001),
model.network.weights
)
loss = error + weight_decay
tf.summary.scalar("weight_decay", weight_decay)
tf.summary.scalar("error_per_element", error)
tf.summary.scalar("total_loss", loss)
# define loss
with tf.name_scope("train"):
train_step = model.optimizer.minimize(loss)
summary = tf.summary.merge_all()
#saver = tf.train.Saver()
writer = tf.summary.FileWriter(save_path)
writer.add_graph(sess.graph)
msg.info("Start training ... ", 1)
old_error = 1e13
sess.run(tf.global_variables_initializer())
for step in range(max_training_steps):
batch = dataset.sample_minibatch(0.2)
# log progress
if step % 50 == 0:
writer.add_summary(sess.run(
summary,
feed_dict={x: batch[0], y: batch[1]}
), step)
# save graph and report error
if step % steps_report == 0:
validation_error = sess.run(
error,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
) / dim_out
#saver.save(sess, log_dir, step)
diff = np.abs(old_error - validation_error)
msg.info("Error: {:0.4E}. Diff to before: {:0.4E}".format(
validation_error,
diff
))
if diff < convergence_eps:
msg.info(
"Convergence reached after " + str(step) + " steps.", 1
)
break
else:
old_error = validation_error
if step + 1 == max_training_steps:
msg.info("Max iterations exceeded.", 1)
sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
test_error = sess.run(
error,
feed_dict={x: dataset.validation[0], y: dataset.validation[1]}
) / dim_out
msg.info("Test error: {:0.1E}".format(test_error))
|
python
|
from django.contrib import admin
from . import models
from django.conf import settings
admin.site.register(models.OfferCategory)
class OfferAdmin(admin.ModelAdmin):
if settings.MULTI_VENDOR:
list_display = ['title', 'total_vendors', 'starts_from', 'ends_at']
list_filter = ('vendor',)
else:
list_display = ['title', 'create_at', 'starts_from', 'ends_at']
list_per_page = 25
search_fields = ['title', 'description', 'ends_at']
readonly_fields = ['big_banner_tag', 'small_banner_tag']
# autocomplete_fields = ['category']
admin.site.register(models.Offer, OfferAdmin)
|
python
|
from __future__ import annotations
from amulet.world_interface.chunk.interfaces.leveldb.base_leveldb_interface import (
BaseLevelDBInterface,
)
class LevelDB4Interface(BaseLevelDBInterface):
def __init__(self):
BaseLevelDBInterface.__init__(self)
self.features["chunk_version"] = 4
self.features["finalised_state"] = "int0-2"
self.features["data_2d"] = "unused_height512|biome256"
self.features["block_entities"] = "31list"
self.features["block_entity_format"] = "str-id"
self.features["block_entity_coord_format"] = "xyz-int"
self.features["entities"] = "32list"
self.features["entity_format"] = "int-id"
self.features["entity_coord_format"] = "Pos-list-float"
self.features["terrain"] = "2farray"
INTERFACE_CLASS = LevelDB4Interface
|
python
|
import unittest
from ui.stub_io import StubIO
class StubIOTest(unittest.TestCase):
def setUp(self):
self.io = StubIO()
def test_method_write_adds_argument_to_output_list(self):
self.io.write("test")
self.assertEqual(self.io.output, ["test"])
def test_method_set_input_adds_argument_to_input_list(self):
self.io.set_input("test")
self.assertEqual(self.io.input, ["test"])
def test_return_empty_string_when_input_list_is_empty(self):
result = self.io.read("")
self.assertEqual(result, "")
def test_return_first_item_of_input_list_when_it_is_not_empty(self):
self.io.set_input("test")
result = self.io.read("")
self.assertEqual(result, "test")
|
python
|
import unittest
from kafka_influxdb.encoder import heapster_event_json_encoder
class TestHeapsterEventJsonEncoder(unittest.TestCase):
def setUp(self):
self.encoder = heapster_event_json_encoder.Encoder()
def testEncoder(self):
msg = b'{"EventValue":"{\\n \\"metadata\\": {\\n \\"name\\": \\"etcd-operator-562633149-vvr85.149bd41846d603d4\\",\\n \\"namespace\\": \\"default\\",\\n \\"selfLink\\": \\"/api/v1/namespaces/default/events/etcd-operator-562633149-vvr85.149bd41846d603d4\\",\\n \\"uid\\": \\"09f904cd-dff1-11e6-bd3e-005056923a7e\\",\\n \\"resourceVersion\\": \\"21782526\\",\\n \\"creationTimestamp\\": \\"2017-01-21T15:48:22Z\\"\\n },\\n \\"involvedObject\\": {\\n \\"kind\\": \\"Pod\\",\\n \\"namespace\\":\\"default\\",\\n \\"name\\": \\"etcd-operator-562633149-vvr85\\",\\n \\"uid\\":\\"a5f12e21-de53-11e6-bd3e-005056923a7e\\",\\n \\"apiVersion\\": \\"v1\\",\\n \\"resourceVersion\\":\\"21339961\\",\\n \\"fieldPath\\": \\"spec.containers{etcd-operator}\\"\\n },\\n \\"reason\\": \\"BackOff\\",\\n\\"message\\": \\"Back-off pulling image \\\\\\"10.58.9.201:5000/dc/etcd-operator:latest\\\\\\"\\",\\n \\"source\\":{\\n \\"component\\": \\"kubelet\\",\\n \\"host\\": \\"10.58.9.212\\"\\n },\\n \\"firstTimestamp\\":\\"2017-01-21T15:48:22Z\\",\\n \\"lastTimestamp\\": \\"2017-01-22T07:10:28Z\\",\\n \\"count\\": 3955,\\n \\"type\\": \\"Normal\\"\\n}","EventTimestamp":"2017-01-22T07:10:28Z","EventTags":{"eventID":"09f904cd-dff1-11e6-bd3e-005056923a7e","hostname":"10.58.9.212","pod_id":"a5f12e21-de53-11e6-bd3e-005056923a7e","pod_name":"etcd-operator-562633149-vvr85"}}'
expected_msg = ['events,kind=Pod,namespace_name=default,object_name=etcd-operator-562633149-vvr85,reason=BackOff,hostname="10.58.9.212" message="Back-off pulling image \\"10.58.9.201:5000/dc/etcd-operator:latest\\"" 1485069028']
encoded_message = self.encoder.encode(msg)
self.assertEqual(encoded_message, expected_msg)
|
python
|
import os
import numpy as np
from PIL import Image
import subprocess
import cv2
def vision():
output = False # False: Disable display output & True: Enable display output
# subprocess.run(["sudo fswebcam --no-banner -r 2048x1536 image3.jpg"], capture_output=True)
# subprocess.run("sudo fswebcam /home/pi/Desktop/Frame.jpg", capture_output=True)
# path = r"C:\Users\thephysicist\Desktop\pic.jpeg"
# path = r'/home/pi/Desktop/image3.jpg'
path = r"pic_5.jpeg"
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
ret, frame = cap.read()
cv2.imwrite(path, frame)
imcolor = Image.open(path)
# imcolor = Image.open(path)
im = imcolor.convert('L')
pixel = im.load()
x = 0
y = 0
nb = 0
for i in range(im.size[0]):
for j in range(im.size[1]):
if j > (im.size[1]-200):
im.putpixel([i,j], 0)
elif j < (0):
im.putpixel([i,j], 0)
elif i > (im.size[0]-0):
im.putpixel([i,j], 0)
elif i < (0):
im.putpixel([i,j], 0)
elif pixel[i,j] > 220:
x += i
nb += 1
y += j
x = int(x/nb)
y = int(y/nb)
coord = [(0.20*(x-(im.size[0]/2))/(im.size[0]/2)), (0.20*(y)/(im.size[1]/2))] #[x,y] in meters, origin at the A axis
if output:
for i in range(x-10,x+10,1):
for j in range(y-10,y+10,1):
imcolor.putpixel([i,j], (255,0,0))
imcolor.show()
return coord
if __name__ == '__main__':
vision()
print("Done with Fred's vision")
|
python
|
# 生成矩形的周长上的坐标
import numpy as np
from skimage.draw import rectangle_perimeter
img = np.zeros((5, 6), dtype=np.uint8)
start = (2, 3)
end = (3, 4)
rr, cc = rectangle_perimeter(start, end=end, shape=img.shape)
img[rr, cc] = 1
print(img)
|
python
|
# -*- coding: utf-8 -*-
import logging
from openstack import exceptions as openstack_exception
from cinderclient import client as volume_client
from cinderclient import exceptions as cinder_exception
import oslo_messaging
from oslo_config import cfg
from BareMetalControllerBackend.conf.env import env_config
from common import exceptions as exc
from common import utils
LOG = logging.getLogger(__name__)
DEFAULT_URL = None
TRANSPORTS = {}
def get_transport(url, optional=False, cache=True, exchange='vianet_guest'):
global TRANSPORTS, DEFAULT_URL
cache_key = url or DEFAULT_URL
cache_key = '%s_%s' % (cache_key, exchange)
transport = TRANSPORTS.get(cache_key)
if not transport or not cache:
try:
oslo_messaging.set_transport_defaults(exchange)
transport = oslo_messaging.get_transport(cfg.CONF, url)
except (oslo_messaging.InvalidTransportURL,
oslo_messaging.DriverLoadFailure):
if not optional or url:
# NOTE(sileht): oslo_messaging is configured but unloadable
# so reraise the exception
raise
return None
else:
if cache:
TRANSPORTS[cache_key] = transport
return transport
class BaremetalGuestApi(object):
def __init__(self, topic):
self.topic = topic
transport = get_transport(env_config.guest_transport_url,
exchange=env_config.guest_exchange)
target = oslo_messaging.Target(exchange=env_config.guest_exchange,
server=self.topic,
topic=self.topic)
self.client = oslo_messaging.RPCClient(transport, target)
def get_guest_connector(self):
ctxt = {}
return self.client.call(ctxt, method='get_guest_connector')
def guest_connect_volume(self, attachments):
"""
Rpc client to guest
:param attachments: cinder attachments
:return:
"""
ctxt = {}
connection = attachments['connection_info']
return self.client.call(ctxt, method='guest_connect_volume',
connection=connection)
def guest_deconnect_volume(self, attachments):
ctxt = {}
connection = attachments['connection_info']
return self.client.call(ctxt, method='guest_deconnect_volume',
connection=connection)
@utils.check_instance_state(vm_state=['active'])
def baremetal_attach_volume(server, volume, openstack_client):
"""
Baremetal attach volume
:param openstack_client: openstack client
:param server: the server object get by server id
:param volume: volume object get by volume id
:return: attachments
"""
if volume.status != 'available':
raise exc.VolumeInvalidState(state=volume.status)
guest_id = server.metadata.get('guest_id')
if not guest_id:
raise exc.GuestAgentTopicNotFound
guest_client = BaremetalGuestApi(guest_id)
connector_properties = guest_client.get_guest_connector()
server_id = server.id
volume_id = volume.id
cinder = volume_client.Client('3.44', session=openstack_client.session)
info = cinder.attachments.create(volume_id, connector_properties, server_id)
try:
connection = info['connection_info']
# now we only support ISCSI
if connection['driver_volume_type'].lower() != 'iscsi':
raise exc.ProtocolNotSupported
device_info = guest_client.guest_connect_volume(info)
cinder.attachments.complete(info['connection_info']['attachment_id'])
return device_info
except Exception as e:
attachment_id = info.get('connection_info').get('attachment_id')
cinder.attachments.delete(attachment_id)
raise e
@utils.check_instance_state(vm_state=['active'])
def baremetal_detach_volume(server, volume_id, openstack_client, attachment_uuid=None):
"""
Baremetal detach volume
:param openstack_client: openstack client
:param server: the server object get by server id
:param volume: volume id
:return: attachments
"""
guest_id = server.metadata.get('guest_id')
if not guest_id:
raise exc.GuestAgentTopicNotFound
guest_client = BaremetalGuestApi(guest_id)
server_id = server.id
cinder = volume_client.Client('3.44', session=openstack_client.session)
if not attachment_uuid:
# We need the specific attachment uuid to know which one to detach.
# if None was passed in we can only work if there is one and only
# one attachment for the volume.
# Get the list of attachments for the volume.
search_opts = {'volume_id': volume_id}
attachments = cinder.attachments.list(search_opts=search_opts)
if len(attachments) == 0:
raise exc.NoAttachmentsFound(volume_id=volume_id)
if len(attachments) == 1:
attachment_uuid = attachments[0].id
else:
# We have more than 1 attachment and we don't know which to use
raise exc.NeedAttachmentUUID(volume_id=volume_id)
attachment = cinder.attachments.show(attachment_uuid)
guest_client.guest_deconnect_volume(attachment.to_dict())
cinder.attachments.delete(attachment_uuid)
def volume_backup_restore(openstack_client, backup_id, volume_id=None, volume_name=None):
cinder = volume_client.Client('3.44', session=openstack_client.session)
backups = cinder.restores.restore(backup_id, volume_id, volume_name)
return backups
def volume_extend(openstack_client, volume_id, new_size):
try:
cinder = volume_client.Client('2', session=openstack_client.session)
volume = cinder.volumes.extend(volume_id, new_size)
return volume
except cinder_exception.OverLimit as e:
raise openstack_exception.HttpException(details=e.message)
|
python
|
class nodo_error:
def __init__(self, linea, columna, valor, descripcion):
self.line = str(linea)
self.column = str(columna)
self.valor = str(valor)
self.descripcion = str(descripcion)
errores = []
|
python
|
import pytest
from telliot_core.apps.core import TelliotCore
from telliot_core.queries.price.spot_price import SpotPrice
from telliot_core.utils.response import ResponseStatus
from telliot_core.utils.timestamp import TimeStamp
@pytest.mark.asyncio
async def test_main(mumbai_cfg):
async with TelliotCore(config=mumbai_cfg) as core:
chain_id = core.config.main.chain_id
flex = core.get_tellorflex_contracts()
governance_address = await flex.oracle.get_governance_address()
if chain_id == 137:
assert governance_address == "0x2cFC5bCE14862D46fBA3bb46A36A8b2d7E4aC040"
elif chain_id == 80001:
# Old one, TODO confirm w/ Tim it switched
# assert governance_address == "0x0Fe623d889Ad1c599E5fF3076A57D1D4F2448CDe"
# New one
assert governance_address == "0x8A868711e3cE97429faAA6be476F93907BCBc2bc"
stake_amount = await flex.oracle.get_stake_amount()
assert stake_amount == 10.0
print(stake_amount)
tlnv, status = await flex.oracle.get_time_of_last_new_value()
assert isinstance(status, ResponseStatus)
if status.ok:
assert isinstance(tlnv, TimeStamp)
else:
assert tlnv is None
print(tlnv)
lock = await flex.oracle.get_reporting_lock()
print(lock)
token_address = await flex.oracle.get_token_address()
if chain_id == 137:
assert token_address == "0xE3322702BEdaaEd36CdDAb233360B939775ae5f1"
elif chain_id == 80001:
assert token_address == "0x45cAF1aae42BA5565EC92362896cc8e0d55a2126"
total_stake = await flex.oracle.get_total_stake_amount()
print(f"Total Stake: {total_stake}")
staker_info, status = await flex.oracle.get_staker_info(core.get_account().address)
assert isinstance(status, ResponseStatus)
if status.ok:
for info in staker_info:
assert isinstance(info, int)
else:
assert staker_info is None
q = SpotPrice(asset="btc", currency="USD")
count, status = await flex.oracle.get_new_value_count_by_qeury_id(q.query_id)
assert isinstance(status, ResponseStatus)
if status.ok:
assert isinstance(count, int)
else:
assert count is None
|
python
|
from swockets import swockets, SwocketError, SwocketClientSocket, SwocketHandler
handle = SwocketHandler()
server = swockets(swockets.ISSERVER, handle)
handle.sock = server
while(True):
user_input = {"message":raw_input("")}
if len(server.clients) > 0:
server.send(user_input, server.clients[0], server.clients[0].sock)
|
python
|
from tkinter import *
root = Tk()
root.geometry('800x800')
root.title('Rythmic Auditory Device')
root.configure(background="#ececec")
f = ("Times bold", 54)
def next_page():
"""Go to next page of GUI
Function destroys current calibration page and moves on to next main page.
"""
root.destroy()
import calibration
Label(
root,
text="WELCOME",
padx=20,
pady=20,
bg='#ffc0cb',
font=f
).pack(expand=True, fill=BOTH)
Button(
root,
text="Next",
font=f,
command=next_page
).pack(fill=X, expand=TRUE, side=LEFT)
root.mainloop()
|
python
|
from functools import lru_cache
import requests
from six import u
from unidecode import unidecode
_synset_sparql_query = """
SELECT ?item ?itemLabel WHERE {{
?item wdt:P2888 <http://wordnet-rdf.princeton.edu/wn30/{}-n>
SERVICE wikibase:label {{ bd:serviceParam wikibase:language "{}". }}
}}
"""
_wikidata_url = 'https://query.wikidata.org/sparql'
@lru_cache(maxsize=1000)
def synset_to_label(synset, language='en'):
"""
Queries WordNet for the word of a specified synset in a given language.
:param synset:
:param language:
:return:
"""
# Parse to final query
query = _synset_sparql_query.format(synset[1:], language)
# Query wikidata and get data as JSON
params = {'query': query, 'format': 'json'}
response = requests.get(_wikidata_url, params=params)
data = response.json()
# Fetch labels
labels = [item['itemLabel']['value']
for item in data['results']['bindings']]
# Return
if len(labels) > 0:
return labels[0]
else:
return "???"
def unicode_to_ascii(text):
encoded = ''
for character in text:
if character == u('\xe5'):
encoded += 'aa'
elif character == u('\xe6'):
encoded += 'ae'
elif character == u('\xf8'):
encoded += 'oe'
elif character == u('\xf6'):
encoded += 'oe'
elif character == u('\xe4'):
encoded += 'ae'
elif character == u('\xfc'):
encoded += 'u'
else:
encoded += character
return unidecode(encoded)
if __name__ == "__main__":
test_languages = ["en", "da", "fr", "de", "nl"]
decoded = [[('n03207941', 'dishwasher', 0.25054157),
('n04442312', 'toaster', 0.240155),
('n04070727', 'refrigerator', 0.099175394),
('n04554684', 'washer', 0.065704145),
('n04004767', 'printer', 0.063971408)]]
best_decoded = decoded[0][0]
for code in test_languages:
# Attempt to label
unicode_label = unicode_to_ascii(synset_to_label(best_decoded[0], language=code))
print(code + ":", unicode_label)
|
python
|
#!/usr/bin/env python
# encoding: utf-8
import sys
if sys.version_info.major > 2:
import http.server as http_server
import socketserver
else:
import SimpleHTTPServer as http_server
import SocketServer as socketserver
Handler = http_server.SimpleHTTPRequestHandler
# python -c "import SimpleHTTPServer; m = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map; m[''] = 'text/plain'; m.update(dict([(k, v + ';charset=UTF-8') for k, v in m.items()])); SimpleHTTPServer.test();"
Handler.extensions_map = {
'.manifest': 'text/cache-manifest',
'.html': 'text/html',
'.txt': 'text/html',
'.png': 'image/png',
'.jpg': 'image/jpg',
'.svg': 'image/svg+xml',
'.css': 'text/css',
'.js': 'application/x-javascript',
'.md': 'text/x-markdown',
'.markdown': 'text/x-markdown',
'': 'application/octet-stream', # Default
}
m = Handler.extensions_map
m.update(dict([(k, v + ';charset=UTF-8') for k, v in m.items()]))
PORT = 8081
httpd = socketserver.TCPServer(("0.0.0.0", PORT), Handler)
print('serving at port: {}'.format(PORT))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('\nserver shutdown!')
httpd.server_close()
|
python
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Contact: [email protected]
Repository: https://github.com/TimHessels/SEBAL
Module: SEBAL
Description:
This module contains a compilation of scripts and functions to run pySEBAL
"""
from SEBAL import pySEBAL
__all__ = ['pySEBAL']
__version__ = '0.1'
|
python
|
import unittest
import smartphone
from parameterized import parameterized, parameterized_class
TEST_PHONE_NUMBER = '123'
class SmartPhoneTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.phone = smartphone.SmartPhone()
@parameterized.expand([
('idle', smartphone.CallState.IDLE, f'Has call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING),
('ring', smartphone.CallState.RING, f'Busy with call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING),
('incall', smartphone.CallState.INCALL, f'Alerting user of incoming call from {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL),
])
def test_in_call(self, state_name, state_obj, expected_msg, expected_state):
self.phone.state = state_obj
self.assertEqual(
self.phone.in_call(TEST_PHONE_NUMBER),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No incoming call', smartphone.CallState.IDLE, None),
('ring', smartphone.CallState.RING, f'Pickup call from {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, f'Switch call to {TEST_PHONE_NUMBER}', smartphone.CallState.INCALL, TEST_PHONE_NUMBER),
('incall_no_incoming', smartphone.CallState.INCALL, 'No incoming call', smartphone.CallState.INCALL, None),
])
def test_answer_call(self, state_name, state_obj, expected_msg, expected_state, incoming_call_number):
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.assertEqual(
self.phone.answer_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No call', smartphone.CallState.IDLE, None, None),
('ring', smartphone.CallState.RING, f'Can not end call in RING', smartphone.CallState.RING, None, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, f'End call from {TEST_PHONE_NUMBER}', smartphone.CallState.IDLE, TEST_PHONE_NUMBER, None),
('incall_with_incoming', smartphone.CallState.INCALL, f'End call from {TEST_PHONE_NUMBER}', smartphone.CallState.RING, TEST_PHONE_NUMBER, '456'),
])
def test_end_call(self, state_name, state_obj, expected_msg, expected_state, in_call_number, incoming_call_number):
print(f'test case: {state_name}')
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.phone.in_call_number = in_call_number
self.assertEqual(
self.phone.end_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
@parameterized.expand([
('idle', smartphone.CallState.IDLE, 'No call', smartphone.CallState.IDLE, None, None),
('ring', smartphone.CallState.RING, f'Reject call from {TEST_PHONE_NUMBER}', smartphone.CallState.IDLE, None, TEST_PHONE_NUMBER),
('incall', smartphone.CallState.INCALL, 'Can not reject call in INCALL', smartphone.CallState.INCALL, TEST_PHONE_NUMBER, None),
('incall_with_incoming', smartphone.CallState.INCALL, 'Can not reject call in INCALL', smartphone.CallState.INCALL, TEST_PHONE_NUMBER, '456'),
])
def test_reject_call(self, state_name, state_obj, expected_msg, expected_state, in_call_number, incoming_call_number):
print(f'test case: {state_name}')
self.phone.state = state_obj
self.phone.incoming_call_number = incoming_call_number
self.phone.in_call_number = in_call_number
self.assertEqual(
self.phone.reject_call(),
expected_msg)
self.assertEqual(self.phone.state, expected_state)
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/env python
import Bio; from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
import urllib2
import sys
import StringIO
import os
base = os.path.expanduser('~')
prot_folder = base + '/biotools/uniprot_proteomes/'
fasta_records = []
if len(sys.argv) == 1:
accession = input('Enter UNIPROT proteome accession number: ')
filename = accession
elif len(sys.argv) > 1:
filename = '_'.join(sys.argv[1:])
for arg in sys.argv[1:]:
accession = arg
present = False
for prot_file in os.listdir(prot_folder):
if present == False:
if prot_file == '{}.fasta'.format(accession):
fasta = list(SeqIO.parse(prot_folder + prot_file,'fasta'))
fasta_records.append(fasta)
present = True
if present == False:
url = 'http://www.uniprot.org/uniprot/?query=proteome:{}&format=fasta'.format(accession)
path = prot_folder
f = urllib2.urlopen(url)
page = StringIO.StringIO(f.read())
f.close()
prot = list(SeqIO.parse(page,'fasta'))
SeqIO.write(prot,'{}/{}.fasta'.format(path, accession),'fasta')
#SeqIO.write(embl,'{}/{}.fasta'.format(path,accession),'fasta')
#fasta = SeqIO.read('{}/{}.fasta'.format(path,accession),'fasta')
fasta_records.append(prot)
final = []
for i in fasta_records:
final +=i
SeqIO.write(final, sys.stdout,'fasta')
|
python
|
import os
import math
import cereal.messaging as messaging
import cereal.messaging_arne as messaging_arne
from selfdrive.swaglog import cloudlog
from common.realtime import sec_since_boot
from selfdrive.controls.lib.radar_helpers import _LEAD_ACCEL_TAU
from selfdrive.controls.lib.longitudinal_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import MPC_COST_LONG
from common.op_params import opParams
from common.numpy_fast import interp, clip
from common.travis_checker import travis
LOG_MPC = os.environ.get('LOG_MPC', False)
class LongitudinalMpc():
def __init__(self, mpc_id):
self.mpc_id = mpc_id
self.op_params = opParams()
self.setup_mpc()
self.v_mpc = 0.0
self.v_mpc_future = 0.0
self.a_mpc = 0.0
self.v_cruise = 0.0
self.prev_lead_status = False
self.prev_lead_x = 0.0
self.new_lead = False
self.TR_Mod = 0
self.last_cloudlog_t = 0.0
if not travis and mpc_id == 1:
self.pm = messaging_arne.PubMaster(['smiskolData'])
else:
self.pm = None
self.last_cost = 0.0
self.df_profile = self.op_params.get('dynamic_follow', 'relaxed').strip().lower()
self.sng = False
def send_mpc_solution(self, pm, qp_iterations, calculation_time):
qp_iterations = max(0, qp_iterations)
dat = messaging.new_message('liveLongitudinalMpc')
dat.liveLongitudinalMpc.xEgo = list(self.mpc_solution[0].x_ego)
dat.liveLongitudinalMpc.vEgo = list(self.mpc_solution[0].v_ego)
dat.liveLongitudinalMpc.aEgo = list(self.mpc_solution[0].a_ego)
dat.liveLongitudinalMpc.xLead = list(self.mpc_solution[0].x_l)
dat.liveLongitudinalMpc.vLead = list(self.mpc_solution[0].v_l)
dat.liveLongitudinalMpc.cost = self.mpc_solution[0].cost
dat.liveLongitudinalMpc.aLeadTau = self.a_lead_tau
dat.liveLongitudinalMpc.qpIterations = qp_iterations
dat.liveLongitudinalMpc.mpcId = self.mpc_id
dat.liveLongitudinalMpc.calculationTime = calculation_time
pm.send('liveLongitudinalMpc', dat)
def setup_mpc(self):
ffi, self.libmpc = libmpc_py.get_libmpc(self.mpc_id)
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.mpc_solution = ffi.new("log_t *")
self.cur_state = ffi.new("state_t *")
self.cur_state[0].v_ego = 0
self.cur_state[0].a_ego = 0
self.a_lead_tau = _LEAD_ACCEL_TAU
def set_cur_state(self, v, a):
self.cur_state[0].v_ego = v
self.cur_state[0].a_ego = a
def get_TR(self, CS, lead):
if not lead.status or travis:
TR = 1.8
elif CS.vEgo < 5.0:
TR = 1.8
else:
TR = self.dynamic_follow(CS, lead)
if not travis:
self.change_cost(TR,CS.vEgo)
self.send_cur_TR(TR)
return TR
def send_cur_TR(self, TR):
if self.mpc_id == 1 and self.pm is not None:
dat = messaging_arne.new_message('smiskolData')
dat.smiskolData.mpcTR = TR
self.pm.send('smiskolData', dat)
def change_cost(self, TR, vEgo):
TRs = [0.9, 1.8, 2.7]
costs = [1.0, 0.11, 0.05]
cost = interp(TR, TRs, costs)
if self.last_cost != cost:
self.libmpc.change_tr(MPC_COST_LONG.TTC, cost, MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.last_cost = cost
def dynamic_follow(self, CS, lead):
self.df_profile = self.op_params.get('dynamic_follow', 'normal').strip().lower()
x_vel = [5.0, 15.0] # velocities
if self.df_profile == 'far':
y_dist = [1.8, 2.7] # TRs
elif self.df_profile == 'close': # for in congested traffic
x_vel = [5.0, 15.0]
y_dist = [1.8, 0.9]
else: # default to normal
y_dist = [1.8, 1.8]
TR = interp(CS.vEgo, x_vel, y_dist)
# Dynamic follow modifications (the secret sauce)
x = [-5.0, 0.0, 5.0] # relative velocity values
y = [0.3, 0.0, -0.3] # modification values
self.TR_Mod = interp(lead.vRel, x, y)
TR += self.TR_Mod
if CS.leftBlinker or CS.rightBlinker:
x = [9.0, 55.0] #
y = [1.0, 0.65] # reduce TR when changing lanes
TR *= interp(CS.vEgo, x, y)
return clip(TR, 0.9, 2.7)
def update(self, pm, CS, lead, v_cruise_setpoint):
v_ego = CS.vEgo
# Setup current mpc state
self.cur_state[0].x_ego = 0.0
if lead is not None and lead.status:
x_lead = lead.dRel
v_lead = max(0.0, lead.vLead)
a_lead = lead.aLeadK
if (v_lead < 0.1 or -a_lead / 2.0 > v_lead):
v_lead = 0.0
a_lead = 0.0
self.a_lead_tau = lead.aLeadTau
self.new_lead = False
if not self.prev_lead_status or abs(x_lead - self.prev_lead_x) > 2.5:
self.libmpc.init_with_simulation(self.v_mpc, x_lead, v_lead, a_lead, self.a_lead_tau)
self.new_lead = True
self.prev_lead_status = True
self.prev_lead_x = x_lead
self.cur_state[0].x_l = x_lead
self.cur_state[0].v_l = v_lead
else:
self.prev_lead_status = False
# Fake a fast lead car, so mpc keeps running
self.cur_state[0].x_l = 50.0
self.cur_state[0].v_l = v_ego + 10.0
a_lead = 0.0
self.a_lead_tau = _LEAD_ACCEL_TAU
# Calculate mpc
t = sec_since_boot()
n_its = self.libmpc.run_mpc(self.cur_state, self.mpc_solution, self.a_lead_tau, a_lead, self.get_TR(CS, lead))
duration = int((sec_since_boot() - t) * 1e9)
if LOG_MPC:
self.send_mpc_solution(pm, n_its, duration)
# Get solution. MPC timestep is 0.2 s, so interpolation to 0.05 s is needed
self.v_mpc = self.mpc_solution[0].v_ego[1]
self.a_mpc = self.mpc_solution[0].a_ego[1]
self.v_mpc_future = self.mpc_solution[0].v_ego[10]
# Reset if NaN or goes through lead car
crashing = any(lead - ego < -50 for (lead, ego) in zip(self.mpc_solution[0].x_l, self.mpc_solution[0].x_ego))
nans = any(math.isnan(x) for x in self.mpc_solution[0].v_ego)
backwards = min(self.mpc_solution[0].v_ego) < -0.01
if ((backwards or crashing) and self.prev_lead_status) or nans:
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Longitudinal mpc %d reset - backwards: %s crashing: %s nan: %s" % (
self.mpc_id, backwards, crashing, nans))
self.libmpc.init(MPC_COST_LONG.TTC, MPC_COST_LONG.DISTANCE,
MPC_COST_LONG.ACCELERATION, MPC_COST_LONG.JERK)
self.cur_state[0].v_ego = v_ego
self.cur_state[0].a_ego = 0.0
self.v_mpc = v_ego
self.a_mpc = CS.aEgo
self.prev_lead_status = False
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Fourth Paradigm Development, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
URL patterns for the OpenStack Dashboard.
"""
import os
import logging
from glob import glob
from django import shortcuts
from django.core import exceptions
from django.conf.urls.defaults import *
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import messages
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.utils.importlib import import_module
from django.views import generic as generic_views
import django.views.i18n
LOG = logging.getLogger(__name__)
topbars = []
def get_topbar_name(file_name):
return os.path.basename(os.path.dirname(os.path.abspath(file_name)))
class TopbarRoleCheckMiddleware(object):
def __init__(self):
if not hasattr(self, "topbar"):
self.topbar = self.__class__.__module__.split('.')[2]
def process_request(self, request):
if "username" not in request.session:
return
script_name = settings.SCRIPT_NAME
if not request.path.startswith(script_name):
return
path = request.path[len(script_name) + 1:]
if not (path == self.topbar or path.startswith(self.topbar + "/")):
return
if not (self.roles & set(request.session["roles"])):
# flush other error messages
for message in messages.get_messages(request):
pass
messages.error(request,
"Access denied for user %s at topbar %s" %
(request.session["username"],
self.topbar))
return shortcuts.redirect("auth/splash")
|
python
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from azext_devops.dev.common.arguments import should_detect
class TestArgumentsMethods(unittest.TestCase):
def test_should_detect(self):
# tests default behaviour for detect
self.assertEqual(should_detect(None), True)
if __name__ == '__main__':
unittest.main()
|
python
|
"""
This file is part of genofunc (https://github.com/xiaoyu518/genofunc).
Copyright 2020 Xiaoyu Yu ([email protected]) & Rachel Colquhoun ([email protected]).
"""
import os
import unittest
from genofunc.extract_metadata import *
this_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_dir = os.path.join(this_dir, 'tests', 'data')
class TestExtractMetadata(unittest.TestCase):
def test_run_extract_metadata(self):
in_fasta = "%s/sequences/seqB.fasta" %data_dir
in_metadata = "%s/metadata/metadataB.tsv" %data_dir
column = ["country"]
index_field = "strain"
out_fasta = "%s/output/tmp.extract.fasta" %data_dir
out_metadata = "%s/output/tmp.extracted_metadata.csv" %data_dir
log_file = "%s/output/extract_metadata.log" %data_dir
extract_metadata(in_fasta, in_metadata, column, index_field, out_fasta, out_metadata, log_file)
os.unlink(out_fasta)
os.unlink(out_metadata)
os.unlink(log_file)
|
python
|
from .atmosphere import Atmosphere
from .generalized_atmosphere import GeneralizedAtmosphere
from .generalized_matching import GeneralizedMatching
from .psycop import PSYCOP
from .illicit_conversion import IllicitConversion
from .logically_valid_lookup import LogicallyValidLookup
from .matching import Matching
from .mental_models import MentalModels
from .mreasoner import MReasoner
from .phm import PHM
from .verbal_models import VerbalModels
|
python
|
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
import requests
import traceback
from typing import Dict, Any
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
TYPES = {
'threatTypes': ["MALWARE", "SOCIAL_ENGINEERING", "POTENTIALLY_HARMFUL_APPLICATION", "UNWANTED_SOFTWARE"],
'platformTypes': ["ANY_PLATFORM", "WINDOWS", "LINUX", "ALL_PLATFORMS", "OSX", "CHROME", "IOS", "ANDROID"]
}
INTEGRATION_NAME = 'GoogleSafeBrowsing'
URL_OUTPUT_PREFIX = 'GoogleSafeBrowsing.URL'
class Client(BaseClient):
def __init__(self, proxy: bool, verify: bool, reliability: str, base_url: str, params: dict):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
super().__init__(proxy=proxy, verify=verify, base_url=base_url, headers=headers)
self.base_url = base_url
self.client_body = {
'clientId': params.get('client_id'),
'clientVersion': params.get('client_version'),
}
if DBotScoreReliability.is_valid_type(reliability):
self.reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
raise Exception("Google Safe Browsing v2 error: "
"Please provide a valid value for the Source Reliability parameter.")
def build_request_body(self, client_body: Dict, list_url: List) -> Dict:
""" build the request body according to the client body and the urls.
Args:
client_body: client body to add it in the request body
list_url: The urls list
Returns:
(dict) The request body, in the right format.
"""
list_urls = []
for url in list_url:
list_urls.append({"url": url})
body: Dict = {
"client": client_body,
"threatInfo": {
"threatTypes": TYPES.get('threatTypes'),
"platformTypes": TYPES.get('platformTypes'),
"threatEntryTypes": ["URL"],
"threatEntries": list_urls
}
}
return body
def url_request(self, client_body, list_url) -> Dict:
""" send the url request.
Args:
client_body: client body to add it in the request body
list_url: The urls list
Returns:
(dict) The response from the request.
"""
body = self.build_request_body(client_body, list_url)
result = self._http_request(
method='POST',
json_data=body,
full_url=self.base_url)
return result
def test_module(client: Client) -> str:
"""
Performs basic get request to get sample URL details.
"""
try:
# testing a known malicious URL to check if we get matches
test_url = "http://testsafebrowsing.appspot.com/apiv4/ANY_PLATFORM/MALWARE/URL/"
res = client.url_request(client.client_body, [test_url])
if res.get('matches'): # matches - There is a match for the URL we were looking for
message = 'ok'
else:
message = 'Error querying Google Safe Browsing. Expected matching respons, but received none'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: please make sure the API Key is set correctly.'
else:
raise e
return message
def handle_errors(result: Dict) -> None:
"""
Handle errors, raise Exception when there is errors in the response.
"""
status_code = result.get('StatusCode', 0)
result_body = result.get('Body')
if result_body == '' and status_code == 204:
raise Exception('No content received. Possible API rate limit reached.')
if 200 < status_code < 299:
raise Exception(f'Failed to perform request, request status code: {status_code}.')
if result_body == '':
raise Exception('No content received. Maybe you tried a private API?.')
if result.get('error'):
error_massage = result.get('error', {}).get('message')
error_code = result.get('error', {}).get('code')
raise Exception(f'Failed accessing Google Safe Browsing APIs. Error: {error_massage}. Error code: {error_code}')
def arrange_results_to_urls(results: List, url_list: List) -> Dict:
""" Arrange and filter the URLs results according to the URLs list that we asked information on.
Args:
results: the API response.
url_list: The URLs list that we asked information on.
Returns:
(dict) The results according the urls.
"""
urls_results: Dict[str, list] = {}
for url in url_list:
urls_results[url] = []
for result in results:
url = result.get('threat', {}).get('url')
urls_results[url].append(result)
return urls_results
def url_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
url command: Returns URL details for a list of URL
"""
url = argToList(args.get('url'))
result = client.url_request(client.client_body, url)
if not result or result.get('StatusCode'):
handle_errors(result)
urls_data = arrange_results_to_urls(result.get('matches'), url) # type: ignore
url_data_list = []
for url_key, url_data in urls_data.items():
if url_data:
dbot_score = Common.DBotScore(
indicator=url_key,
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=3,
reliability=client.reliability
)
url_standard_context = Common.URL(
url=url_key,
dbot_score=dbot_score
)
url_data_list.append(CommandResults(
readable_output=tableToMarkdown(f'Google Safe Browsing APIs - URL Query: {url_key}', url_data),
outputs_prefix=URL_OUTPUT_PREFIX,
outputs_key_field='IndicatorValue',
outputs=url_data,
indicator=url_standard_context
))
else:
dbot_score = Common.DBotScore(
indicator=url_key,
indicator_type=DBotScoreType.URL,
integration_name=INTEGRATION_NAME,
score=0,
reliability=client.reliability
)
url_standard_context = Common.URL(
url=url_key,
dbot_score=dbot_score
)
url_data_list.append(CommandResults(
readable_output=f'No matches for URL {url_key}',
outputs_prefix=URL_OUTPUT_PREFIX,
outputs_key_field='IndicatorValue',
outputs=result,
indicator=url_standard_context
))
return url_data_list
def build_base_url(params: Dict) -> str:
api_key = params.get('api_key')
base_url = params.get('url', '')
if not base_url.endswith('/'):
base_url += '/'
return f"{base_url}?key={api_key}"
def main() -> None:
params = demisto.params()
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
base_url = build_base_url(params)
reliability = params.get('integrationReliability')
reliability = reliability if reliability else DBotScoreReliability.B
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
params=params,
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
reliability=reliability)
if demisto.command() == 'test-module':
result = test_module(client)
return_results(result)
elif demisto.command() == 'url':
return_results(url_command(client, demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
''' ENTRY POINT '''
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
python
|
from typing import Optional
from odmantic import Model
class Person(Model):
name: str
age: Optional[int]
john = Person(name="John")
print(john.age)
#> None
|
python
|
""" Implementation of DRO models """
import gurobipy as grb
import numpy as np
from time import time
def train_classifier(Kernel, labels_raw, all_epsilon, all_kappa, nc):
print('Train class ', nc + 1, '...')
t = time()
n_samples = Kernel.shape[0]
alpha = np.zeros((n_samples, len(all_kappa), len(all_epsilon)))
labels = -np.ones(n_samples)
labels[labels_raw[nc]] = 1
for nk, kappa in enumerate(all_kappa):
for ne, epsilon in enumerate(all_epsilon):
optimal = ksvm(Kernel, labels, epsilon, kappa)
alpha[:, nk, ne] = optimal['alpha']
elapsed = time() - t
print('Class ', nc + 1, ' is trained in ', np.round(elapsed/60.0, 2), ' minutes.')
return alpha
def ksvm(Kernel, labels, epsilon, kappa):
""" kernelized SVM """
certif = np.linalg.eigvalsh(Kernel)[0]
if certif < 0:
Kernel = Kernel - 2 * certif * np.eye(Kernel.shape[0])
if epsilon == 0:
optimal = hinge_ksvm(Kernel, labels)
elif np.isinf(kappa):
optimal = regularized_ksvm(Kernel, labels, epsilon)
else:
optimal = dist_rob_ksvm(Kernel, labels, epsilon, kappa)
return optimal
def dist_rob_ksvm(Kernel, labels, epsilon, kappa):
""" kernelized distributionally robust SVM """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_DRSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_lambda = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
chg_cons = {}
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
chg_cons[i] = model.addConstr(
1 + labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) -
kappa * var_lambda <= var_s[i])
model.addQConstr(
grb.quicksum(var_alpha[k1] * Kernel[k1, k2] * var_alpha[k2]
for k1 in range(n_samples)
for k2 in range(n_samples)) <= var_lambda * var_lambda)
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = var_lambda * epsilon + 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
def regularized_ksvm(Kernel, labels, epsilon):
""" kernelized robust/regularized SVM """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_RSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_lambda = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
model.addQConstr(
grb.quicksum(var_alpha[k1] * Kernel[k1, k2] * var_alpha[k2]
for k1 in range(n_samples)
for k2 in range(n_samples)) <= var_lambda * var_lambda)
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = var_lambda * epsilon + 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
def hinge_ksvm(Kernel, labels):
""" kernelized hinge loss minimization """
n_samples = Kernel.shape[0]
# Step 0: create model
model = grb.Model('Ker_RSVM')
model.setParam('OutputFlag', False)
# Step 1: define decision variables
var_s = {}
var_alpha = {}
for i in range(n_samples):
var_s[i] = model.addVar(vtype=grb.GRB.CONTINUOUS)
var_alpha[i] = model.addVar(
vtype=grb.GRB.CONTINUOUS, lb=-grb.GRB.INFINITY)
# Step 2: integerate variables
model.update()
# Step 3: define constraints
for i in range(n_samples):
model.addConstr(
1 - labels[i] * grb.quicksum(var_alpha[k] * Kernel[k, i]
for k in range(n_samples)) <= var_s[i])
# Step 4: define objective value
sum_var_s = grb.quicksum(var_s[i] for i in range(n_samples))
obj = 1.0 / n_samples * sum_var_s
model.setObjective(obj, grb.GRB.MINIMIZE)
# Step 5: solve the problem
model.optimize()
# Step 6: store results
alpha_opt = np.array([var_alpha[i].x for i in range(n_samples)])
optimal = {
'alpha': alpha_opt,
'objective': model.ObjVal,
'diagnosis': model.status
}
return optimal
|
python
|
import sys
if len(sys.argv) != 2:
print("Usage: python buildcml.py <cml file>")
exit(1)
infile = sys.argv[1]
# file names
outfile_md = "docs/" + infile.split(".")[0] + ".md"
outfile_txt = infile.split(".")[0] + ".txt"
# file buffers
md_buffer = "# Controller Layouts\n"
txt_buffer = ""
with open(infile, "r") as f:
cml = eval(f.read())
f.close()
for controller in cml:
print(f"Parsing {controller} controller")
# add data to buffers
md_buffer += f"## {controller}\nType: {cml[controller]['Type']}\n\nPort: {cml[controller]['Port']}\n| | |\n| -- | -- |\n"
txt_buffer += f"-- {controller} --\nType: {cml[controller]['Type']}\nPort: {cml[controller]['Port']}\n"
# parse through inputs
for input_type in cml[controller]:
if type(cml[controller][input_type]) != type({}):
# Skip non iterable items
continue
# add data to buffers
md_buffer += f"| {input_type} | -- |\n"
txt_buffer += f" {input_type}:\n"
# parse items
for item in cml[controller][input_type]:
# deal with extra nesting
if type(cml[controller][input_type][item]) == type({}):
md_buffer += f"| {item} | -- |\n"
txt_buffer += f" {item}:\n"
for subitem in cml[controller][input_type][item]:
md_buffer += f"| {subitem} | {cml[controller][input_type][item][subitem]} |\n"
txt_buffer += f" {subitem}: {cml[controller][input_type][item][subitem]}\n"
continue
# add data to buffers
md_buffer += f"| {item} | {cml[controller][input_type][item]} |\n"
txt_buffer += f" {item}: {cml[controller][input_type][item]}\n"
print("Done.")
print("Writing to files...")
with open(outfile_txt, "w") as f:
f.writelines(txt_buffer)
f.close()
with open(outfile_md, "w") as f:
f.writelines(md_buffer)
f.close
print("Done.")
|
python
|
from dataclasses import field
from datetime import datetime
from typing import List, Optional
from pydantic.dataclasses import dataclass
@dataclass
class TypeA:
one: str
two: float
@dataclass
class TypeB(TypeA):
one: str
three: bool = field(default=True)
@dataclass
class TypeC(TypeB):
four: List[datetime] = field(default_factory=list, metadata={"format": "%d %B %Y %H:%M"})
any: Optional[object] = field(default=None, metadata={"type": "Wildcard"})
|
python
|
import requests
import unittest
class TestStringMethods(unittest.TestCase):
'''def test_000_operacoes_ola1(self):
r = requests.get('http://localhost:5000/ola/marcio')
self.assertEqual(r.text,'ola marcio')
r = requests.get('http://localhost:5000/ola/mario')
self.assertEqual(r.text,'ola mario')
def test_001_operacoes_ola2(self):
r = requests.get('http://localhost:5000/ola_upgrade?pessoa1=marcio&pessoa2=alvaro')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=alvaro&pessoa1=marcio')
self.assertEqual(r.text,'ola marcio e alvaro')
r = requests.get('http://localhost:5000/ola_upgrade?pessoa2=robin&pessoa1=batman')
self.assertEqual(r.text,'ola batman e robin')
def test_002_operacoes_ola3(self):
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'batman','pessoa2':'robin'})
self.assertEqual(r.text,'ola batman e robin')
r = requests.post('http://localhost:5000/ola_upgrade', json={'pessoa1':'tonico','pessoa2':'tinoco'})
self.assertEqual(r.text,'ola tonico e tinoco')
def test_003_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney&pessoa2=fred')
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=ron&pessoa1=harry')
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_004_operacoes_ola_com_dic(self):
r = requests.get('http://localhost:5000/ola_com_dic?pessoa1=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.get('http://localhost:5000/ola_com_dic?pessoa2=barney')
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
def test_005_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'barney','pessoa2':'fred'})
self.assertEqual(r.json()['pessoa1'],'barney')
self.assertEqual(r.json()['pessoa2'],'fred')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry','pessoa2':'ron'})
self.assertEqual(r.json()['pessoa1'],'harry')
self.assertEqual(r.json()['pessoa2'],'ron')
def test_006_operacoes_ola_com_dic(self):
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa2':'fred'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
r = requests.post('http://localhost:5000/ola_com_dic',
json={'pessoa1':'harry'})
self.assertEqual(r.status_code,400)
self.assertEqual(r.json()['erro'],'falta gente')
'''
def test_100_arquivo_aquecimento(self):
import aquecimento_dicionarios #esse teste verifica se o arquivo aquecimento_dicionarios esta na mesma pasta que o runtests.py
def test_101_aquecimento_consulta(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('tt0076759','lucio')['comment'],'achei legal')
self.assertEqual(consulta('tt0076759','marcos')['comment'],'gostei')
self.assertEqual(consulta('tt0076759','maria'),'nao encontrado')
def test_102_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
self.assertEqual(consulta('1212','maria'),'nao encontrado')
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
def test_103_aquecimento_adiciona(self):
self.carregar_arquivo_aquecimento()
adiciona('1212','maria','filme otimo')
self.assertEqual(consulta('1212','maria')['comment'],'filme otimo')
antes = len(reviews_aquecimento)
adiciona('1212','maria','mudei de ideia')
self.assertEqual(consulta('1212','maria')['comment'],'mudei de ideia')
adiciona('1212','maria','quer saber? bom mesmo')
self.assertEqual(consulta('1212','maria')['comment'],'quer saber? bom mesmo')
depois = len(reviews_aquecimento)
self.assertEqual(antes,depois)
def test_203_pega_review(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertTrue('gostei' in r.json()['comment'])
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0076759/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'achei legal'})
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/lucio')
self.assertEqual(r.json(),{'user_id':'lucio','comment':'estranho'})
def test_204_pega_review_com_erro(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/outro/gato')
self.assertEqual(r.json(),{'erro':'comentario nao encontrado'})
self.assertEqual(r.status_code,404)
def test_205_adiciona_review(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/tt1211837/marcos',
json={'comment':'esquisito mesmo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'esquisito mesmo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt1211837/marcos')
self.assertEqual(r.json(),{'user_id':'marcos','comment':'esquisito mesmo'})
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'curiosa mistura de fantasmas e empreendedorismo'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'curiosa mistura de fantasmas e empreendedorismo')
def test_206_muda_review(self):
antes = self.total_reviews()
r = requests.put('http://localhost:5001/socialfilm/reviews/tt0087332/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
r = requests.get('http://localhost:5001/socialfilm/reviews/tt0087332/marcos')
self.assertEqual(r.json()['user_id'],'marcos')
self.assertEqual(r.json()['comment'],'mudei de ideia. Nao gosto de fantasmas')
depois = self.total_reviews()
self.assertEqual(antes,depois)
def test_207_all_films(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
self.assertTrue(len(lista_respostas) >= 2)
achei_dr_strange = False
for review in r.json():
if review['film_id'] == 'tt1211837':
achei_dr_strange = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o filme dr strange')
def test_208_estrelas(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/marcos')
self.assertEqual(int(r.json()['stars']),4)
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/lucio')
self.assertEqual(int(r.json()['stars']),5)
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/lucio')
self.assertEqual(int(r.json()['stars']),2)
self.assertEqual(r.status_code,200) #codigo normal, que ocorre
#se voce simplesmente nao fizer nada
def test_209_estrelas_review_nao_encontrada(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt1211837/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'review nao encontrada')
self.assertEqual(r.status_code,404)
def test_210_novas_estrelas(self):
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':3})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],3)
contagem = self.total_stars()
r = requests.put('http://localhost:5001/socialfilm/stars/tt0119177/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0119177/marcos')
self.assertEqual(r.json()['stars'],4)
cont_depois = self.total_stars()
self.assertEqual(contagem,cont_depois)
def test_211_average_stars(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':1})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(2.9 < r.json()['average_stars'] < 3.1)
r = requests.put('http://localhost:5001/socialfilm/stars/tt0076759/marcos',
json={'stars':4})
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759/average')
self.assertTrue(4.4 < r.json()['average_stars'] < 4.6)
def test_301_filme_invalido(self):
r = requests.put('http://localhost:5001/socialfilm/reviews/jamesbond/marcos',
json={'comment':'mudei de ideia. Nao gosto de fantasmas'})
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def test_302_all_films_nome(self):
r = requests.get('http://localhost:5001/socialfilm/reviews/all_films/marcos')
lista_respostas = r.json()
achei_dr_strange = False
achei_star_wars = False
for review in r.json():
if 'film_name' not in review:
self.fail('achei um filme sem nome!')
if 'trange' in review['film_name']:
achei_dr_strange = True
if 'ars' in review['film_name']:
achei_star_wars = True
if not achei_dr_strange:
self.fail('a lista de reviews do marcos nao contem o nome do dr strange')
if not achei_star_wars:
self.fail('a lista de reviews do marcos nao contem o nome do star wars')
def test_303_all_films_nao_deve_alterar_a_review(self):
r = requests.get('http://localhost:5001/socialfilm/all')
lista_reviews = r.json()['reviews']
for review in lista_reviews:
if 'film_name' in review:
self.fail('voce alterou as reviews do servidor, colocando nome')
def test_304_estrelas_filme_inexistente(self):
r = requests.get('http://localhost:5001/socialfilm/stars/tt0076759nao/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
r = requests.get('http://localhost:5001/socialfilm/stars/tt00076759/marcos')
self.assertTrue('error' in r.json())
self.assertEqual(r.json()['error'],'filme nao encontrado')
self.assertEqual(r.status_code,404)
def total_reviews(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['reviews'])
def total_stars(self):
r = requests.get('http://localhost:5001/socialfilm/all')
return len(r.json()['notas'])
def carregar_arquivo_aquecimento(self):
'''
carrega o arquivo aquecimento_dicionarios, se
ele ainda nao foi carregado
'''
global consulta,adiciona,reviews_aquecimento
try:
consulta #se o modulo ainda nao foi carregado
#essa linha da pau e o except é executado
except:
from aquecimento_dicionarios import consulta, adiciona#entao carregue
from aquecimento_dicionarios import reviews_aquecimento
def runTests():
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestStringMethods)
unittest.TextTestRunner(verbosity=2,failfast=True).run(suite)
try:
from aquecimento_dicionarios_gabarito_NAO import consulta,adiciona
from aquecimento_dicionarios_gabarito_NAO import reviews_aquecimento
except:
pass
if __name__ == '__main__':
runTests()
|
python
|
from pytest import (fixture, mark)
from wrdpzl import(Board, Solver)
@fixture(scope='module')
def words():
with open('words.txt') as file:
return list(map(str.strip, file.readlines()))
@fixture(scope='module')
def solver(words):
return Solver(words)
@mark.timeout(0.5)
@mark.parametrize('board', [
(Board.load(['performance'] * 10)),
(Board.load(['top' * 5] * 15)),
(Board.load(['up' * 75] * 150)),
])
def test_performance(board, solver):
assert solver.solve(board) != []
|
python
|
# Copyright (c) 2017-2021 Analog Devices Inc.
# All rights reserved.
# www.analog.com
#
# SPDX-License-Identifier: Apache-2.0
#
import PMBus_I2C
from encodings import hex_codec
import codecs
from time import *
from array import array
import math
import sys
if sys.version_info.major < 3:
input = raw_input
class dac_data:
def __init__(self, address=None, input_channel=None):
self.address = address
self.input_channel = input_channel - 1
ADM1266_Address = 0x00
config_file_name = ""
firmware_file_name = ""
crc_name = ['Main Mini Bootloader CRC', 'Main Bootloader CRC', 'Backup Mini Bootloader CRC', 'Backup Bootloader CRC',
'Main AB Config CRC', 'Main Project CRC',
'Main Firmware CRC', 'Main Password CRC', 'Backup AB Config CRC', 'Backup Project CRC',
'Backup Firmware CRC', 'Backup Password CRC']
# Based on the number of devices the following function calls subfunction to pause the sequence, program firmware hex, and do a system (ADM1266 CPU) reset.
def program_firmware():
for x in range(len(ADM1266_Address)):
pause_sequence(ADM1266_Address[x])
for x in range(len(ADM1266_Address)):
print('Loading firmware to device {0:#04x}.'.format(ADM1266_Address[x]))
program_firmware_hex(ADM1266_Address[x], firmware_file_name, True)
system_reset(ADM1266_Address[x])
# Based on the number devices the following function calls sub function to pause sequence, program the hex file, start the sequence and trigger memory refresh.
# If the number of configuration file provided is not equal to the number of PMBus address of the device the following function will not proceed.
def program_configration(reset=True):
if len(ADM1266_Address) == len(config_file_name):
for x in range(len(ADM1266_Address)):
pause_sequence(ADM1266_Address[x], reset)
for x in range(len(ADM1266_Address)):
print('Loading configuration to device {0:#04x}.'.format(ADM1266_Address[x]))
program_hex(ADM1266_Address[x], config_file_name[x])
for x in range(len(ADM1266_Address)):
start_sequence(ADM1266_Address[x])
for x in range(len(ADM1266_Address)):
unlock(ADM1266_Address[x])
refresh_flash(ADM1266_Address[x])
print('Running Memory Refresh.')
delay(10000)
else:
print("Number of devices does not match with number of configuration files provided.")
# Reads back the firmware version number and checks for the CRC error.
# If there is any CRC error it will display which CRC is failing or else display "All CRC Passed"
def crc_summary():
print("\n\nProgramming Summary")
print("---------------------------------------")
for x in range(len(ADM1266_Address)):
recalculate_crc(ADM1266_Address[x])
crc_status = all_crc_status(ADM1266_Address[x])
fw_version = get_firmware_rev(ADM1266_Address[x])
print(
'\nFirmware version in device {3:#04x} is v{0}.{1}.{2} '.format(fw_version[0], fw_version[1], fw_version[2],
ADM1266_Address[x]))
if crc_status > 0:
print('The following CRC failed in device {0:#04x}:'.format(ADM1266_Address[x]))
for y in range(0, 12):
if (((int(crc_status) & int(math.pow(2, y))) >> int(y)) == 1):
print(crc_name[y])
else:
print('All CRC passed in device {0:#04x}.'.format(ADM1266_Address[x]))
# Based on the number of devices the following function checks if there is a bootloader and the part is unlocked.
# If the part is not unlocked then unlock the part.
def program_firmware_hex(device_address, file, unlock_part):
bootloadVer = get_bootload_rev(device_address)
if bootloadVer != array('B', [0, 0, 0]):
if unlock_part:
unlock(device_address)
assert islocked(device_address) == False, 'device @0x{0:02X} should be unlocked!'.format(i2c_address)
jump_to_iap(device_address)
hex = open(file, "rb")
count = 0
for line in hex.readlines():
if (line.startswith(b":00000001FF")):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
# data = [] if data_len == 0 else array('B', line[9:9 + data_len * 2].decode("hex")).tolist()
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd != 0xD8:
PMBus_I2C.PMBus_Write(device_address, [cmd] + data)
if count == 0:
count = 1
delay(3000)
else:
delay(10)
# The following function unlocks the ADM1266 (if locked), pause sequence, points to main memory, writes the configuration to the part with respective delays
def program_hex(device_address, file, unlock_and_stop=True, main=True):
hex = open(file, "rb")
if unlock_and_stop:
unlock(device_address)
assert islocked(device_address) == False, 'device @0x{0:02X} should be unlocked!'.format(i2c_address)
switch_memory(device_address, main)
for line in hex.readlines():
if (line.startswith(b":00000001FF")):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
# data = [] if data_len == 0 else array('B', line[9:9 + data_len * 2].decode("hex")).tolist()
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd != 0xD8:
PMBus_I2C.PMBus_Write(device_address, [cmd] + data)
delayMs = 0
offset = 0
if cmd == 0xD8:
delayMs = 100
elif cmd == 0x15:
delayMs = 300
elif cmd == 0xD7:
offset = (data[1] | (data[2] << 8))
delayMs = 400 if offset == 0 else 40
elif cmd == 0xE3:
offset = (data[1] | (data[2] << 8))
delayMs = 100 if offset == 0 else 40
elif cmd == 0xE0:
offset = (data[1] | (data[2] << 8))
delayMs = 200 if offset == 0 else 40
elif cmd == 0xD6:
if data[1] == 0xff and data[2] == 0xff:
pageCount = data[3]
delayMs = 100 + (pageCount - 1) * 30
else:
delayMs = 40
elif cmd == 0xF8:
delayMs = 100
delay(delayMs)
# All the functions from here onward writes to ADM1266 to perform different tasks
def refresh_flash(device_address, config=2):
PMBus_I2C.PMBus_Write(device_address, [0xF5, 0x01, config])
# delay(10000)
def system_reset(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x04, 0x00])
delay(1000)
def recalculate_crc(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xF9, 1, 0])
delay(600)
def unlock(device_address,
pwd=[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]):
write_password(device_address, 0x02, pwd)
delay(1)
write_password(device_address, 0x02, pwd)
delay(1)
def write_password(device_address, cmd, pwd):
assert len(pwd) == 16
data = [0xFD, 0x11] + pwd + [cmd]
PMBus_I2C.PMBus_Write(device_address, data)
def pause_sequence(device_address, reset_sequence=True):
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x03 if reset_sequence else 0x11, 0x00])
delay(10)
def start_sequence(device_address, reset=True):
if reset:
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x02, 0x00])
# PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x00, 0x00])
PMBus_I2C.PMBus_Write(device_address, [0xD8, 0x00, 0x00])
delay(500)
def start_sequence(device_address, reset=False):
if reset:
PMBus_I2C.PMBus_Write(device_address, [0xd8, 0x02, 0x00])
PMBus_I2C.PMBus_Write(device_address, [0xd8, 0x00, 0x00])
delay(500)
def switch_memory(device_address, main):
PMBus_I2C.PMBus_Write(device_address, [0xFA, 1, 0 if main else 1])
def status_mfr_specific(device_address):
return PMBus_I2C.PMBus_Write_Read(device_address, [0x80], 1)
def islocked(device_address):
status = status_mfr_specific(device_address)
return (status[0] & 0x04) > 0;
def get_bootload_rev(device_address):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xAE], 9)
return data[4:7]
def get_firmware_rev(device_address):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xAE], 9)
return data[1:4]
def jump_to_iap(device_address):
PMBus_I2C.PMBus_Write(device_address, [0xFC, 2, 0, 0])
delay(1000)
def all_crc_status(device_address):
status = PMBus_I2C.PMBus_Write_Read(device_address, [0xED], 2)
status = status[0] + (status[1] << 8)
return (status >> 4)
def delay(ms):
sleep((ms + 1) / 1000.0) # http://stackoverflow.com/questions/1133857/how-accurate-is-pythons-time-sleep
def refresh_status():
refresh_running = False
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0x80], 1)
refresh = (status[0] & 0x08) >> 3
if refresh == 1:
refresh_running = True
return refresh_running
def device_present():
all_preset = False
for x in range(len(ADM1266_Address)):
for x in range(len(ADM1266_Address)):
ic_id = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0xAD], 4)
if len(ic_id) == 4:
if (ic_id[1] == 66 or ic_id[1] == 65) and ic_id[2] == 18 and ic_id[3] == 102:
all_present = True
else:
all_present = False
raise Exception('Device with address ' + hex(ADM1266_Address[x]) + " is not present.")
else:
all_present = False
raise Exception('Device with address ' + hex(ADM1266_Address[x]) + " is not present.")
return all_present
def margin_all(margin_type, group_command=False):
margin_type = margin_type.upper()
if margin_type == "HIGH":
command_data = 0xA4
elif margin_type == "LOW":
command_data = 0x94
elif margin_type == "VOUT":
command_data = 0x84
else:
command_data = 0x44
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write(ADM1266_Address[x], [0x00, 0xFF])
if group_command == True:
status = PMBus_I2C.PMBus_Group_Write(ADM1266_Address, [0x01, command_data])
else:
for x in range(len(ADM1266_Address)):
status = PMBus_I2C.PMBus_Write(ADM1266_Address[x], [0x01, command_data])
print("Margin all rails - " + margin_type)
def dac_mapping():
dac_config_data = []
for x in range(len(ADM1266_Address)):
for y in range(9):
dac_cofig_reg = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[x], [0xD5, 0x01, y], 3)
dac_cofig_reg = dac_cofig_reg[1] + (dac_cofig_reg[2] << 8)
if (((dac_cofig_reg >> 6) & 0x1f) != 0):
dac_config_data.append(dac_data(ADM1266_Address[x], ((dac_cofig_reg >> 6) & 0x1f)))
return dac_config_data
def margin_single(device_address, pin_number, margin_type):
# device_address = device_address
margin_type = margin_type.upper()
# pin_name = pin_name.upper()
# pin_number = 0xFF
# if pin_name == "VH1":
# pin_number = 0x00
# elif pin_name == "VH2":
# pin_number = 0x01
# elif pin_name == "VH3":
# pin_number = 0x02
# elif pin_name == "VH4":
# pin_number = 0x03
# elif pin_name == "VP1":
# pin_number = 0x04
# elif pin_name == "VP2":
# pin_number = 0x05
# elif pin_name == "VP3":
# pin_number = 0x06
# elif pin_name == "VP4":
# pin_number = 0x07
# elif pin_name == "VP5":
# pin_number = 0x08
# elif pin_name == "VP6":
# pin_number = 0x09
# elif pin_name == "VP7":
# pin_number = 0x0A
# elif pin_name == "VP8":
# pin_number = 0x0B
# elif pin_name == "VP9":
# pin_number = 0x0C
# elif pin_name == "VP10":
# pin_number = 0x0D
# elif pin_name == "VP11":
# pin_number = 0x0E
# elif pin_name == "VP12":
# pin_number = 0x0F
# elif pin_name == "VP13":
# pin_number = 0x10
# else:
# pin_number = 0xFF
if margin_type == "HIGH":
command_data = 0xA4
elif margin_type == "LOW":
command_data = 0x94
elif margin_type == "VOUT":
command_data = 0x84
else:
command_data = 0x44
dac_index = 0
if (pin_number == "0xFF"):
print("Please enter a valid pin number.")
else:
for dac_index in range(9):
data = PMBus_I2C.PMBus_Write_Read(device_address, [0xD5, 1, dac_index], 3)
data_combine = data[1] + (data[2] << 8)
dac_mapping = (data_combine >> 6) & 0x1F
if (dac_mapping == (pin_number + 1)):
dac_check = True
break
else:
dac_check = False
if (dac_check == True):
status = PMBus_I2C.PMBus_Write(device_address, [0x00, pin_number])
status = PMBus_I2C.PMBus_Write(device_address, [0x01, command_data])
print("Rail margined - " + margin_type.lower())
else:
print("Input channel is not closed loop margined by any DAC.")
def margin_open_loop(device_address, dac_name, dac_voltage):
device_address = int(device_address, 16)
dac_voltage = float(dac_voltage)
dac_name = dac_name.upper()
dac_names = ["DAC1", "DAC2", "DAC3", "DAC4", "DAC5", "DAC6", "DAC7", "DAC8", "DAC9"]
dac_index = 0xff
if dac_name in dac_names:
dac_index = dac_names.index(dac_name)
if dac_voltage >= 0.202 and dac_voltage <= 0.808:
mid_code = 0
dac_code = dac_code_calc(dac_voltage, 0.506)
elif dac_voltage >= 0.707 and dac_voltage <= 1.313:
mid_code = 3
dac_code = dac_code_calc(dac_voltage, 1.011)
elif dac_voltage >= 0.959 and dac_voltage <= 1.565:
mid_code = 4
dac_code = dac_code_calc(dac_voltage, 1.263)
else:
mid_code = 5
if mid_code < 5:
dac_code_parameter = 0x01 + (mid_code << 1)
dac_config_data = [0xEB, 0x03, dac_index, dac_code_parameter, dac_code]
status = PMBus_I2C.PMBus_Write(device_address, dac_config_data)
else:
print("Enter DAC voltage in between 0.202V - 1.565V.")
else:
print("Enter a valid DAC name.")
def dac_config(device_address, dac_name):
device_address = int(device_address, 16)
dac_name = dac_name.upper()
dac_names = ["DAC1", "DAC2", "DAC3", "DAC4", "DAC5", "DAC6", "DAC7", "DAC8", "DAC9"]
dac_index = 0xff
if dac_name in dac_names:
dac_index = dac_names.index(dac_name)
write_data = [0xD5, 0x01, dac_index]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 3)
margin_mode = read_data[1] & 0x03
if margin_mode != 1:
print("\nSelected DAC is not configured as open loop, would you like to configure the DAC as open loop?")
set_open_loop = input("Enter 'Y' for yes or press enter to exit: ")
set_open_loop = set_open_loop.upper()
if set_open_loop == "Y":
write_data = [0xD5, 0x03, dac_index, 0x01, 0x00]
status = PMBus_I2C.PMBus_Write(device_address, write_data)
return True
else:
print("DAC is not configured as open loop, output voltage could not be set.")
return False
else:
return True
else:
print("Enter a valid DAC name.")
return False
def dac_code_calc(dac_voltage, mid_code_volt):
dac_code = int((mid_code_volt - dac_voltage) / (0.606 / 256)) + 127
return dac_code
def margin_single_percent(device_address, pin_number, margin_percent):
# Set page to respective input channel
write_data = [0x00, pin_number]
status = PMBus_I2C.PMBus_Write(device_address, write_data)
# Readback exp and ment
write_data = [0x20]
data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 1)
exp = data[0]
write_data = [0x21]
data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 2)
ment = data[0] + (data[1] << 8)
nominal_value = ment_exp_to_val(exp, ment)
# Calculate ment for margin high
margin_high = nominal_value * ((100 + margin_percent) / 100)
ment = val_to_ment(margin_high, exp)
write_data = [None] * 3
write_data[1] = ment & 0xFF
write_data[2] = ment >> 8
write_data[0] = 0x25
status = PMBus_I2C.PMBus_Write(device_address, write_data)
# Calculate ment for margin low
margin_low = nominal_value * ((100 - margin_percent) / 100)
ment = val_to_ment(margin_low, exp)
write_data[1] = ment & 0xFF
write_data[2] = ment >> 8
write_data[0] = 0x26
status = PMBus_I2C.PMBus_Write(device_address, write_data)
def ment_exp_to_val(exp, ment):
value = exp_calc(exp)
value = ment * (2 ** value)
return value
def val_to_ment(value, exp):
value = value / (2 ** exp_calc(exp))
return int(value)
def exp_calc(value):
if value < 16:
temp = value
else:
temp = value - 32
return temp
# Copyright (c) 2017 Analog Devices Inc.
# All rights reserved.
# www.analog.com
# --------------------------------------------------------------------------
# Redistribution and use of this file in source and binary forms, with
# or without modification, are permitted.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ==========================================================================
import datetime
import PMBus_I2C
# variables
VH_Names = ["VH1", "VH2", "VH3", "VH4"]
VP_Names = ["VP1", "VP2", "VP3", "VP4", "VP5", "VP6", "VP7", "VP8", "VP9", "VP10", "VP11", "VP12", "VP13"]
VX_Names = ["VH1", "VH2", "VH3", "VH4", "VP1", "VP2", "VP3", "VP4", "VP5", "VP6", "VP7", "VP8", "VP9", "VP10", "VP11",
"VP12", "VP13"]
PDIO_GPIO_Names = ["PDIO1", "PDIO2", "PDIO3", "PDIO4", "PDIO5", "PDIO6", "PDIO7", "PDIO8", "PDIO9", "PDIO10", "PDIO11",
"PDIO12", "PDIO13", "PDIO14", "PDIO15", "PDIO16", "GPIO1", "GPIO2", "GPIO3", "GPIO4", "GPIO5",
"GPIO6", "GPIO7", "GPIO8", "GPIO9"]
PDIO_GPIO_Pad = [0, 22, 30, 31, 32, 33, 34, 35, 36, 37, 23, 24, 25, 26, 27, 28, 29, 14, 15, 16, 44, 45, 46, 43, 18, 19]
VX_Pad = [0, 47, 48, 49, 50, 51, 56, 57, 58, 59, 60, 61, 62, 63, 52, 53, 54, 55]
GPIO = [0 for k in range(10)]
Normal_Rails = list()
Disabled_Rails = list()
OV_Rails = list()
UV_Rails = list()
System_Data = list()
State_Names = list()
Signals_Status = list()
ADM1266_Address = list()
Summary_Data = [0 for k in range(6)]
Record_Index = 0
Num_Records = 0
# function to dynamically initialize nested lists to store system and blackbox data
def Init_Lists():
Address = ADM1266_Address
global VH_Data
VH_Data = [[[0 for k in range(15)] for j in range(5)] for i in range(len(Address))]
# i - dev_id, j - VH1 - 4, k - Name, PDIO_num, PDIO_dev_id, PDIO_pol, OV BB status, UV BB status, PDIO BB Status,
# Exp, Mant, OV Status, UV Status, OW Status, UW Status, Enable Status
global VP_Data
VP_Data = [[[0 for k in range(15)] for j in range(14)] for i in range(len(Address))]
# i - dev_id, j - VP1 - 13, k - Name, PDIO_num, PDIO_dev_id, PDIO_pol, OV BB status, UV BB status, PDIO BB Status,
# Exp, Mant, OV Status, UV Status, OW Status, UW Status, Enable Status
global BB_Data
BB_Data = [[0 for k in range(65)] for i in range(len(Address))]
# i - dev_id, k - BB data
global Signals_Data
Signals_Data = [[[0 for k in range(7)] for j in range(26)] for i in range(len(Address))]
# i - dev_id, j - PDIO16+GPIO9, k - Name, PDIO_num, PDIOGPIOType, Direction, Input BB Status, Output BB Status, PDIO Inst Status
# readback from first device and get the number of records and index available
def Number_Of_Records():
write_data = [0xE6]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[0], write_data, 5)
global Record_Index
global Num_Records
Record_Index = read_data[3]
Num_Records = read_data[4]
# for the record number provided, based on the number of records and the last index, calculate the record index and read back the blackbox
# information from all the devices
# blackbox raw data is saved in the BB_Data list
def Get_Raw_Data(record_number):
j = Record_Index + int(record_number) - Num_Records
if j < 0:
j += 32
for i in range(len(ADM1266_Address)):
BB_Data[i] = Indexed_Blackbox_Data(ADM1266_Address[i], j)
def Blackbox_Clear():
write_data = [0xDE, 0x02, 0xFE, 0x00]
for i in range(len(ADM1266_Address)):
read_data = PMBus_I2C.PMBus_Write(ADM1266_Address[i], write_data)
# readback system information for the device address passed. Max length = 2kbytes.
# readback the length of the data from the "Common Data" section, and based on the data lenth, readback the remaing "System Config Data".
# all data is stored in the System_Data list
def System_Read(device_address):
write_data = [0xD7, 0x03, 0x80, 0x00, 0x00]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 129)
Data_length = read_data[1] + (read_data[2] * 256)
Summary_Data[0] = "Configuration Name - '"
Summary_Data[0] += List_to_String(read_data[30:(read_data[29] + 30)])
Summary_Data[0] += "'"
j = 256
j = 128
while j < Data_length:
l = j & 0xFF
k = (j & 0xFF00) / 256
n = Data_length - j
if n > 128:
n = 128
write_data = [0xD7, 0x03, n, l, int(k)]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, n + 1)
# read and add one byte of data after commonheader
if k == 0 and l == 128 and n == 128:
System_Data.extend([read_data[128]])
else:
# Remove CRC byte of System Data
if k == 7 and l == 128 and n == 128:
del read_data[128]
# Remove byte count of PMBus Block Read
del read_data[0]
System_Data.extend(read_data)
# remove CRC byte for system data
j += 128
# readback blackbox data for the device address and index provided
def Indexed_Blackbox_Data(device_address, index):
write_data = [0xDE, 0x01, index]
read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 65)
return (read_data)
# get the starting pointer and length for Rails, Signals and States
# call the 3 sub functions to parse the information for Rails, Signals and States, based on their pointers and lengths
def System_Parse():
for i in range(len(ADM1266_Address)):
System_Read(ADM1266_Address[i])
next_pointer = 42
(PadData_length, PadData_pointer) = VLQ_Decode(next_pointer)
next_pointer = PadData_pointer + PadData_length + 1
(RailData_length, RailData_pointer) = VLQ_Decode(next_pointer)
next_pointer = RailData_pointer + RailData_length + 1
(StateData_length, StateData_pointer) = VLQ_Decode(next_pointer)
next_pointer = StateData_pointer + StateData_length + 1
(SignalData_length, SignalData_pointer) = VLQ_Decode(next_pointer)
Rail_Parse(RailData_length, RailData_pointer)
Signal_Parse(SignalData_length, SignalData_pointer)
State_Parse(StateData_length, StateData_pointer)
# parse the Blackbox record, from raw data to filling out lists summary, rails and signals status
def BB_Parse():
Summary_Data[1] = "Record ID : " + str(Blackbox_ID(BB_Data[0][1:3]))
Summary_Data[2] = "Power-up Counter : " + str(Blackbox_ID(BB_Data[0][23:25]))
Summary_Data[3] = "Time : " + RTS(BB_Data[0][25:32])
Summary_Data[4] = "Trigger Source : Enable Blackbox[" + str(BB_Data[0][4]) + "] in '" + State_Names[
(BB_Data[0][8] * 256) + BB_Data[0][7] - 1] + "' state"
Summary_Data[5] = "Previous State : " + State_Names[(BB_Data[0][10] * 256) + BB_Data[0][9] - 1]
for i in range(len(ADM1266_Address)):
VH_BB_Data(BB_Data[i][6], i)
VP_BB_Data(BB_Data[i][11:15], i)
PDIO_Rail_BB_Data(BB_Data[i][21:23], i)
PDIO_Signal_BB_Input_Data(BB_Data[i][19:21], i)
GPIO_Signal_BB_Input_Data(BB_Data[i][15:17], i)
GPIO_Signal_BB_Output_Data(BB_Data[i][17:19], i)
Rails_Status()
Signals_Status_Fill()
def Blackbox_ID(data):
Calculated_Value = data[0] + (data[1] * 256)
return Calculated_Value
def Powerup_Count(data):
Calculated_Value = data[0] + (data[1] * 256)
return Calculated_Value
def RTS(data):
Calculated_Value = 0
for i in range(2, 6, 1):
Calculated_Value = Calculated_Value + (data[i] * (2 ** (8 * i)))
Calculated_Value = Calculated_Value * (1 / (32768 * 2))
if Calculated_Value > 315360000:
Calculated_Value = str(datetime.datetime.utcfromtimestamp(Calculated_Value))
else:
Calculated_Value = str(datetime.timedelta(seconds=Calculated_Value))
return Calculated_Value
def VP_BB_Data(data, device):
tempov = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(14)]
tempov.reverse()
tempuv = [int(x) for x in bin(data[2] + (256 * data[3]))[2:].zfill(14)]
tempuv.reverse()
for i in range(0, 13, 1):
VP_Data[device][i + 1][4] = tempov[i]
VP_Data[device][i + 1][5] = tempuv[i]
def VH_BB_Data(data, device):
temp = [int(x) for x in bin(data)[2:].zfill(8)]
temp.reverse()
for i in range(0, 4, 1):
VH_Data[device][i + 1][4] = temp[i]
VH_Data[device][i + 1][5] = temp[i + 4]
def PDIO_Rail_BB_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for j in range(len(ADM1266_Address)):
for k in range(1, 5, 1):
if (VH_Data[j][k][1] == i + 1 and VH_Data[j][k][2] == device):
VH_Data[j][k][6] = temp[i]
for k in range(1, 14, 1):
if (VP_Data[j][k][1] == i + 1 and VP_Data[j][k][2] == device):
VP_Data[j][k][6] = temp[i]
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][5] = temp[i]
def PDIO_Signal_BB_Input_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][4] = temp[i]
def GPIO_map(data):
GPIO[0] = data[0]
GPIO[1] = data[1]
GPIO[2] = data[2]
GPIO[3] = data[8]
GPIO[4] = data[9]
GPIO[5] = data[10]
GPIO[6] = data[11]
GPIO[7] = data[6]
GPIO[8] = data[7]
return GPIO
def GPIO_Signal_BB_Input_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][4] = temp[i]
def GPIO_Signal_BB_Output_Data(data, device):
temp = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][5] = temp[i]
def Signals_Status_Fill():
del Signals_Status[:]
for i in range(len(ADM1266_Address)):
for j in range(0, 25, 1):
if Signals_Data[i][j][0] != 0:
if Signals_Data[i][j][4] == 1:
i_val = "High"
else:
i_val = "Low"
if Signals_Data[i][j][5] == 1:
o_val = "High"
else:
o_val = "Low"
Signals_Status.append(
str(Signals_Data[i][j][0]) + " - Input Value : " + i_val + " - Output Value : " + o_val)
def Rails_Status():
del OV_Rails[:]
del UV_Rails[:]
del Normal_Rails[:]
del Disabled_Rails[:]
for i in range(len(ADM1266_Address)):
for j in range(1, 5, 1):
if VH_Data[i][j][0] != 0:
if VH_Data[i][j][1] == 0:
if (VH_Data[i][j][4] == 1):
OV_Rails.append(str(VH_Data[i][j][0]) + " : OV ")
if (VH_Data[i][j][5] == 1):
UV_Rails.append(str(VH_Data[i][j][0]) + " : UV ")
if (VH_Data[i][j][4] == 0 and VH_Data[i][j][5] == 0):
Normal_Rails.append(str(VH_Data[i][j][0]) + " : Normal ")
else:
if (VH_Data[i][j][4] == 1 and VH_Data[i][j][3] == VH_Data[i][j][6]):
OV_Rails.append(str(VH_Data[i][j][0]) + " : OV ")
if (VH_Data[i][j][5] == 1 and VH_Data[i][j][3] == VH_Data[i][j][6]):
UV_Rails.append(str(VH_Data[i][j][0]) + " : UV ")
if (VH_Data[i][j][3] != VH_Data[i][j][6]):
Disabled_Rails.append(str(VH_Data[i][j][0]) + " : Disabled ")
if (VH_Data[i][j][4] == 0 and VH_Data[i][j][5] == 0 and VH_Data[i][j][3] == VH_Data[i][j][6]):
Normal_Rails.append(str(VH_Data[i][j][0]) + " : Normal ")
for j in range(1, 14, 1):
if VP_Data[i][j][0] != 0:
if VP_Data[i][j][1] == 0:
if (VP_Data[i][j][4] == 1):
OV_Rails.append(str(VP_Data[i][j][0]) + " : OV ")
if (VP_Data[i][j][5] == 1):
UV_Rails.append(str(VP_Data[i][j][0]) + " : UV ")
if (VP_Data[i][j][4] == 0 and VP_Data[i][j][5] == 0):
Normal_Rails.append(str(VP_Data[i][j][0]) + " : Normal ")
else:
if (VP_Data[i][j][4] == 1 and VP_Data[i][j][3] == VP_Data[i][j][6]):
OV_Rails.append(str(VP_Data[i][j][0]) + " : OV ")
if (VP_Data[i][j][5] == 1 and VP_Data[i][j][3] == VP_Data[i][j][6]):
UV_Rails.append(str(VP_Data[i][j][0]) + " : UV ")
if (VP_Data[i][j][3] != VP_Data[i][j][6]):
Disabled_Rails.append(str(VP_Data[i][j][0]) + " : Disabled ")
if (VP_Data[i][j][4] == 0 and VP_Data[i][j][5] == 0 and VP_Data[i][j][3] == VP_Data[i][j][6]):
Normal_Rails.append(str(VP_Data[i][j][0]) + " : Normal ")
def VP_Status(data, device):
tempov = [int(x) for x in bin(data[0] + (256 * data[1]))[2:].zfill(13)]
tempov.reverse()
tempuv = [int(x) for x in bin(data[2] + (256 * data[3]))[2:].zfill(13)]
tempuv.reverse()
for i in range(0, 13, 1):
if tempov[i] == 0 and tempuv[i] == 0:
Normal_Rails.append(str(VP_Data[device][i + 1][0]) + " : Normal ")
else:
if tempov[i] == 1:
OV_Rails.append(str(VP_Data[device][i + 1][0]) + " : OV ")
if tempuv[i] == 1:
UV_Rails.append(str(VP_Data[device][i + 1][0]) + " : UV ")
def List_to_String(data):
name = ""
for i in range(len(data)):
name += chr(data[i])
return (name)
def VLQ_Decode(index):
i = index
j = 0
value = 0
while System_Data[i] > 127:
if j == 0:
value += (System_Data[i] & 127)
else:
value += (System_Data[i] & 127) * 128 * j
i += 1
j += 1
if j == 0:
value += (System_Data[i] & 127)
else:
value += (System_Data[i] & 127) * 128 * j
return (value, i + 1)
def Rail_Parse(RailData_length, RailData_pointer):
next_pointer = RailData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
while next_pointer < (RailData_pointer + RailData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
Rail_Name = List_to_String(System_Data[next_pointer:(next_pointer + name_length)])
next_pointer += name_length
(temp, next_pointer) = VLQ_Decode(next_pointer)
(PDIO_GPIO_Num, PDIO_GPIO_Type, PDIO_GPIO_dev_id) = PDIO_GPIO_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(VX_Num, VX_Type, VX_dev_id) = VX_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(temp, next_pointer) = VLQ_Decode(next_pointer)
(temp, next_pointer) = VLQ_Decode(next_pointer)
PDIO_GPIO_Polarity = temp & 0x01
#if PDIO_GPIO_Type == 0:
if VX_Type == 0:
VH_Data[VX_dev_id][VX_Num][0] = Rail_Name
VH_Data[VX_dev_id][VX_Num][1] = PDIO_GPIO_Num
VH_Data[VX_dev_id][VX_Num][2] = PDIO_GPIO_dev_id
VH_Data[VX_dev_id][VX_Num][3] = PDIO_GPIO_Polarity
else:
VP_Data[VX_dev_id][VX_Num][0] = Rail_Name
VP_Data[VX_dev_id][VX_Num][1] = PDIO_GPIO_Num
VP_Data[VX_dev_id][VX_Num][2] = PDIO_GPIO_dev_id
VP_Data[VX_dev_id][VX_Num][3] = PDIO_GPIO_Polarity
def Signal_Parse(SignalData_length, SignalData_pointer):
next_pointer = SignalData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
i = 0
while next_pointer < (SignalData_pointer + SignalData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
Signal_Name = List_to_String(System_Data[next_pointer:(next_pointer + name_length)])
next_pointer += name_length
(temp, next_pointer) = VLQ_Decode(next_pointer)
(PDIO_GPIO_Num, PDIO_GPIO_Type, PDIO_GPIO_dev_id) = PDIO_GPIO_Global_Index(temp)
(temp, next_pointer) = VLQ_Decode(next_pointer)
Signal_Direction = temp
Signals_Data[PDIO_GPIO_dev_id][i][0] = Signal_Name
Signals_Data[PDIO_GPIO_dev_id][i][1] = PDIO_GPIO_Num
Signals_Data[PDIO_GPIO_dev_id][i][2] = PDIO_GPIO_Type
Signals_Data[PDIO_GPIO_dev_id][i][3] = Signal_Direction
i += 1
def State_Parse(StateData_length, StateData_pointer):
next_pointer = StateData_pointer
(temp, next_pointer) = VLQ_Decode(next_pointer)
while next_pointer < (StateData_pointer + StateData_length):
(name_length, next_pointer) = VLQ_Decode(next_pointer)
State_Names.append(List_to_String(System_Data[next_pointer:(next_pointer + name_length)]))
next_pointer += name_length
def PDIO_GPIO_Global_Index(data):
if data < 256:
PDIO_GPIO_Num = PDIO_GPIO_Pad.index(data)
Dev_Id = 0
else:
PDIO_GPIO_Num = PDIO_GPIO_Pad.index(data & 0xFF)
Dev_Id = int((data & 0xFF00) / 256)
PDIO_GPIO_Type = 0 # 0 for PDIO, 1 for GPIO
if PDIO_GPIO_Num > 16:
PDIO_GPIO_Num = PDIO_GPIO_Num - 16
PDIO_GPIO_Type = 1
return (PDIO_GPIO_Num, PDIO_GPIO_Type, Dev_Id)
def VX_Global_Index(data):
if data < 256:
VX_Num = VX_Pad.index(data)
Dev_Id = 0
else:
VX_Num = VX_Pad.index(data & 0xFF)
Dev_Id = int((data & 0xFF00) / 256)
VX_Type = 0 # 0 for H, 1 for P
if VX_Num > 4:
VX_Num = VX_Num - 4
VX_Type = 1
return (VX_Num, VX_Type, Dev_Id)
Normal_I_Rails = list()
Disabled_I_Rails = list()
OV_I_Rails = list()
UV_I_Rails = list()
OVW_I_Rails = list()
UVW_I_Rails = list()
Signals_I_Status = list()
def Exp_Calc(data):
if data < 16:
return (data)
else:
temp = data - 32
return (temp)
def VOUT_Status(data):
OVF = (data & 128) / 128
OVW = (data & 64) / 64
UVW = (data & 32) / 32
UVF = (data & 16) / 16
return (OVF, UVF, OVW, UVW)
def PDIO_Rail_Inst_Data(data, device):
temp = [int(x) for x in bin(data[1] + (256 * data[2]))[2:].zfill(16)]
temp.reverse()
for i in range(0, 16, 1):
for j in range(len(ADM1266_Address)):
for k in range(1, 5, 1):
if (VH_Data[j][k][1] == i + 1 and VH_Data[j][k][2] == device):
VH_Data[j][k][14] = temp[i]
for k in range(1, 14, 1):
if (VP_Data[j][k][1] == i + 1 and VP_Data[j][k][2] == device):
VP_Data[j][k][14] = temp[i]
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 0 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][6] = temp[i]
def GPIO_Signal_Inst_Data(data, device):
temp = [int(x) for x in bin(data[1] + (256 * data[2]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
for i in range(0, 10, 1):
for n in range(0, 25, 1):
if Signals_Data[device][n][2] == 1 and Signals_Data[device][n][1] == i + 1:
Signals_Data[device][n][6] = temp[i]
def Get_Rail_Current_Data(address, page):
for i in range(len(ADM1266_Address)):
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
PDIO_Rail_Inst_Data(read_data, i)
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
GPIO_Signal_Inst_Data(read_data, i)
write_data = [0x00, page]
read_data = PMBus_I2C.PMBus_Write(ADM1266_Address[address], write_data)
write_data = [0x7A]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 2)
if page < 4:
(VH_Data[address][page + 1][10], VH_Data[address][page + 1][11], VH_Data[address][page + 1][12],
VH_Data[address][page + 1][13]) = VOUT_Status(read_data[0])
status = VH_Status(address, page + 1)
else:
(VP_Data[address][page - 3][10], VP_Data[address][page - 3][11], VP_Data[address][page - 3][12],
VP_Data[address][page - 3][13]) = VOUT_Status(read_data[0])
status = VP_Status(address, page - 3)
write_data = [0x20]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 2)
if page < 4:
VH_Data[address][page + 1][8] = Exp_Calc(read_data[0])
else:
VP_Data[address][page - 3][8] = Exp_Calc(read_data[0])
write_data = [0x8B]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
if page < 4:
VH_Data[address][page + 1][9] = read_data[0] + (read_data[1] * 256)
value = VH_Data[address][page + 1][9] * (2 ** VH_Data[address][page + 1][8])
name = VH_Data[address][page + 1][0]
else:
VP_Data[address][page - 3][9] = read_data[0] + (read_data[1] * 256)
value = VP_Data[address][page - 3][9] * (2 ** VP_Data[address][page - 3][8])
name = VP_Data[address][page - 3][0]
return (round(value, 3), status, name)
def Get_Signal_Current_Data(address, index):
status = 0
name = 0
if index < 16:
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
PDIO_Rail_Inst_Data(read_data, address)
index = index + 1
for n in range(0, 25, 1):
if Signals_Data[address][n][2] == 0 and Signals_Data[address][n][1] == (index) and Signals_Data[address][n][
0] != 0:
status = Signals_Data[address][n][6]
name = Signals_Data[address][n][0]
if name == 0:
temp = [int(x) for x in bin(read_data[1] + (256 * read_data[2]))[2:].zfill(16)]
temp.reverse()
status = temp[index - 1]
else:
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[address], write_data, 3)
GPIO_Signal_Inst_Data(read_data, address)
index = index - 15
for n in range(0, 25, 1):
if Signals_Data[address][n][2] == 1 and Signals_Data[address][n][1] == (index) and Signals_Data[address][n][
0] != 0:
status = Signals_Data[address][n][6]
name = Signals_Data[address][n][0]
if name == 0:
temp = [int(x) for x in bin(read_data[1] + (256 * read_data[2]))[2:].zfill(16)]
temp.reverse()
temp = GPIO_map(temp)
status = temp[index - 1]
return (status, name)
def Get_Current_Data():
for i in range(len(ADM1266_Address)):
k = 1
write_data = [0xE8]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 52)
for j in range(1, 5, 1):
VH_Data[i][j][9] = read_data[k] + (read_data[k + 1] * 256)
VH_Data[i][j][8] = Exp_Calc(read_data[j + 34])
k += 2
for j in range(1, 14, 1):
VP_Data[i][j][9] = read_data[k] + (read_data[k + 1] * 256)
VP_Data[i][j][8] = Exp_Calc(read_data[j + 38])
k += 2
k = 1
write_data = [0xE7]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 18)
for j in range(1, 5, 1):
(VH_Data[i][j][10], VH_Data[i][j][11], VH_Data[i][j][12], VH_Data[i][j][13]) = VOUT_Status(read_data[k])
k += 1
for j in range(1, 14, 1):
(VP_Data[i][j][10], VP_Data[i][j][11], VP_Data[i][j][12], VP_Data[i][j][13]) = VOUT_Status(read_data[k])
k += 1
write_data = [0xE9]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
PDIO_Rail_Inst_Data(read_data, i)
write_data = [0xEA]
read_data = PMBus_I2C.PMBus_Write_Read(ADM1266_Address[i], write_data, 3)
GPIO_Signal_Inst_Data(read_data, i)
def VH_Status(address, page):
result = 0
if VH_Data[address][page][1] == 0:
if (VH_Data[address][page][10] == 1):
result = 5
if (VH_Data[address][page][11] == 1):
result = 4
if (VH_Data[address][page][12] == 1):
result = 3
if (VH_Data[address][page][13] == 1):
result = 2
if (VH_Data[address][page][10] == 0 and VH_Data[address][page][11] == 0 and VH_Data[address][page][12] == 0 and
VH_Data[address][page][13] == 0):
result = 0
else:
if (VH_Data[address][page][10] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 5
if (VH_Data[address][page][11] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 4
if (VH_Data[address][page][12] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 3
if (VH_Data[address][page][13] == 1 and VH_Data[address][page][3] == VH_Data[address][page][14]):
result = 2
if (VH_Data[address][page][3] != VH_Data[address][page][14]):
result = 1
if (VH_Data[address][page][10] == 0 and VH_Data[address][page][11] == 0 and VH_Data[address][page][3] ==
VH_Data[address][page][14]):
result = 0
return (result)
def VP_Status(address, page):
result = 0
if VP_Data[address][page][1] == 0:
if (VP_Data[address][page][10] == 1):
result = 5
if (VP_Data[address][page][11] == 1):
result = 4
if (VP_Data[address][page][12] == 1):
result = 3
if (VP_Data[address][page][13] == 1):
result = 2
if (VP_Data[address][page][10] == 0 and VP_Data[address][page][11] == 0 and VP_Data[address][page][12] == 0 and
VP_Data[address][page][13] == 0):
result = 0
else:
if (VP_Data[address][page][10] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 5
if (VP_Data[address][page][11] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 4
if (VP_Data[address][page][12] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 3
if (VP_Data[address][page][13] == 1 and VP_Data[address][page][3] == VP_Data[address][page][14]):
result = 2
if (VP_Data[address][page][3] != VP_Data[address][page][14]):
result = 1
if (VP_Data[address][page][10] == 0 and VP_Data[address][page][11] == 0 and VP_Data[address][page][3] ==
VP_Data[address][page][14]):
result = 0
return (result)
def Rails_I_Status():
del OV_I_Rails[:]
del UV_I_Rails[:]
del OVW_I_Rails[:]
del UVW_I_Rails[:]
del Normal_I_Rails[:]
del Disabled_I_Rails[:]
for i in range(len(ADM1266_Address)):
for j in range(1, 5, 1):
if VH_Data[i][j][0] != 0:
temp = VH_Data[i][j][9] * (2 ** VH_Data[i][j][8])
if VH_Data[i][j][1] == 0:
if (VH_Data[i][j][10] == 1):
OV_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][11] == 1):
UV_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][12] == 1):
OVW_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][13] == 1):
UVW_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][10] == 0 and VH_Data[i][j][11] == 0 and VH_Data[i][j][12] == 0 and VH_Data[i][j][
13] == 0):
Normal_I_Rails.append(str(VH_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
else:
if (VH_Data[i][j][10] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
OV_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][11] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
UV_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][12] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
OVW_I_Rails.append(str(VH_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][13] == 1 and VH_Data[i][j][3] == VH_Data[i][j][14]):
UVW_I_Rails.append(str(VH_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][3] != VH_Data[i][j][14]):
Disabled_I_Rails.append(str(VH_Data[i][j][0]) + " : Disabled - " + str(round(temp, 3)) + "V")
if (VH_Data[i][j][10] == 0 and VH_Data[i][j][11] == 0 and VH_Data[i][j][3] == VH_Data[i][j][14]):
Normal_I_Rails.append(str(VH_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
for j in range(1, 14, 1):
if VP_Data[i][j][0] != 0:
temp = VP_Data[i][j][9] * (2 ** VP_Data[i][j][8])
if VP_Data[i][j][1] == 0:
if (VP_Data[i][j][10] == 1):
OV_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][11] == 1):
UV_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][12] == 1):
OVW_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][13] == 1):
UVW_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][10] == 0 and VP_Data[i][j][11] == 0 and VP_Data[i][j][12] == 0 and VP_Data[i][j][
13] == 0):
Normal_I_Rails.append(str(VP_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
else:
if (VP_Data[i][j][10] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
OV_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][11] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
UV_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Fault - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][12] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
OVW_I_Rails.append(str(VP_Data[i][j][0]) + " : OV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][13] == 1 and VP_Data[i][j][3] == VP_Data[i][j][14]):
UVW_I_Rails.append(str(VP_Data[i][j][0]) + " : UV Warning - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][3] != VP_Data[i][j][14]):
Disabled_I_Rails.append(str(VP_Data[i][j][0]) + " : Disabled - " + str(round(temp, 3)) + "V")
if (VP_Data[i][j][10] == 0 and VP_Data[i][j][11] == 0 and VP_Data[i][j][3] == VP_Data[i][j][14]):
Normal_I_Rails.append(str(VP_Data[i][j][0]) + " : Normal - " + str(round(temp, 3)) + "V")
def Signals_I_Status_Fill():
del Signals_I_Status[:]
for i in range(len(ADM1266_Address)):
for j in range(0, 25, 1):
if Signals_Data[i][j][0] != 0:
if Signals_Data[i][j][6] == 1:
i_val = "High"
else:
i_val = "Low"
Signals_I_Status.append(str(Signals_Data[i][j][0]) + " - Value : " + i_val)
# offline blackbox
def System_Read_Offline(system_data):
# write_data = [0xD7, 0x03, 0x80, 0x00, 0x00]
# read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, 129)
read_data = system_data[0]
Data_length = read_data[1] + (read_data[2] * 256)
Summary_Data[0] = "Configuration Name - '"
Summary_Data[0] += List_to_String(read_data[30:(read_data[29] + 30)])
Summary_Data[0] += "'"
j = 256
j = 128
counter = 1
while j < Data_length:
l = j & 0xFF
k = (j & 0xFF00) / 256
n = Data_length - j
if n > 128:
n = 128
write_data = [0xD7, 0x03, n, l, int(k)]
# read_data = PMBus_I2C.PMBus_Write_Read(device_address, write_data, n + 1)
read_data = system_data[counter]
# read and add one byte of data after commonheader
if k == 0 and l == 128 and n == 128:
System_Data.extend([read_data[128]])
else:
# Remove CRC byte of System Data
if k == 7 and l == 128 and n == 128:
del read_data[128]
# Remove byte count of PMBus Block Read
del read_data[0]
System_Data.extend(read_data)
# remove CRC byte for system data
j += 128
counter += 1
def System_Parse_Offline(hex_file_path, system_data):
hex_file = open(hex_file_path, "rb")
if hex_file is not None:
for line in hex_file.readlines():
if line.startswith(b":00000001FF"):
break
data_len = int(line[1:3], 16)
cmd = int(line[3:7], 16)
data = [] if data_len == 0 else array('B', codecs.decode((line[9:9 + data_len * 2]), "hex_codec")).tolist()
if cmd is 0xD7:
del data[1:3]
system_data.append(data)
for i in range(len(ADM1266_Address)):
System_Read_Offline(system_data)
next_pointer = 42
(PadData_length, PadData_pointer) = VLQ_Decode(next_pointer)
next_pointer = PadData_pointer + PadData_length + 1
(RailData_length, RailData_pointer) = VLQ_Decode(next_pointer)
next_pointer = RailData_pointer + RailData_length + 1
(StateData_length, StateData_pointer) = VLQ_Decode(next_pointer)
next_pointer = StateData_pointer + StateData_length + 1
(SignalData_length, SignalData_pointer) = VLQ_Decode(next_pointer)
Rail_Parse(RailData_length, RailData_pointer)
Signal_Parse(SignalData_length, SignalData_pointer)
State_Parse(StateData_length, StateData_pointer)
return True
else:
return False
def Get_Raw_Data_Offline(bb_data_list, record_number):
j = Record_Index + int(record_number) - Num_Records
if j < 0:
j += 32
for i in range(len(ADM1266_Address)):
BB_Data[i] = bb_data_list[64*(j):64*(j+1)]
|
python
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Url Entity class."""
from typing import Any, Dict, Mapping
from ..._version import VERSION
from ...common.utility import export
from ...sectools.domain_utils import url_components
from .entity import Entity
__version__ = VERSION
__author__ = "Ian Hellen"
# pylint: disable=invalid-name
@export
class Url(Entity):
"""URL Entity."""
def __init__(self, src_entity: Mapping[str, Any] = None, **kwargs):
"""
Create a new instance of the entity type.
:param src_entity: instantiate entity using properties of src entity
:param kwargs: key-value pair representation of entity
"""
super().__init__(src_entity=src_entity, **kwargs)
if self.Url:
self.__dict__.update(url_components(self.Url))
@property
def description_str(self) -> str:
"""Return Entity Description."""
return f"{self.Url}"
# # We need to do some trickery with the Url defined as
# # a property since base Entity class expects to be able to set
# # attributes directly in self.__dict__
# @property
# def Url(self) -> Optional[str]:
# """Return Url."""
# if self._url is None and "Url" in self.__dict__:
# self.Url = self.__dict__["Url"]
# return self._url
# @Url.setter
# def Url(self, url):
# """Return host component of Url."""
# self._url = url
# if url:
# self.__dict__.update(url_components(url))
_entity_schema: Dict[str, Any] = {}
|
python
|
import queue
import time
from threading import Thread
import cv2
from scripts import centerface_utils
TARGET_WIDTH = 640
TARGET_HEIGHT = 640
TARGET_FPS = 30
class CameraDemo:
"""Multi-threaded python centerface detection demo."""
def __init__(self, runner: centerface_utils.CenterFaceNoDetection) -> None:
self.keep_going = True
self.runner = runner
def capture_frame(self, cap, queue):
"""Thread function which captures data from webcam and places into queue"""
prev = 0
cur = 0
while self.keep_going:
cur = time.time()
_, img = cap.read()
if (cur - prev) >= 1.0 / TARGET_FPS:
prev = cur
queue.put(img)
def process_frame(
self, runner, processing_func, input_queue, output_queue, threshold
):
"""Thread function which detects and overlays results, add it to queue for rendering"""
while self.keep_going:
if input_queue.empty():
continue
frame = input_queue.get()
frame = processing_func(frame)
np_array = cv2.dnn.blobFromImage(
frame,
scalefactor=1.0,
size=(TARGET_WIDTH, TARGET_HEIGHT),
mean=(0, 0, 0),
swapRB=True,
crop=True,
)
start = time.time()
detections, landmarks = runner(
np_array, TARGET_HEIGHT, TARGET_WIDTH, threshold=threshold
)
end = time.time()
print(f"Processing frame too {(end - start) * 1000} ms")
# Draw predictions and show frame
for det in detections:
boxes, _ = det[:4], det[4]
cv2.rectangle(
frame,
(int(boxes[0]), int(boxes[1])),
(int(boxes[2]), int(boxes[3])),
(2, 255, 0),
3,
)
for lm in landmarks:
for i in range(0, 5):
cv2.circle(
frame, (int(lm[i * 2]), int(lm[i * 2 + 1])), 4, (0, 0, 255), -1
)
output_queue.put(frame)
def run(self, threshold=0.5):
cap = cv2.VideoCapture(0)
cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# Doesn't seem to do anything :/
# cap.set(cv2.CAP_PROP_FPS, TARGET_FPS)
cap_fps = cap.get(cv2.CAP_PROP_FPS)
print("* Capture width:", cap_width)
print("* Capture height:", cap_height)
print("* Capture FPS:", cap_fps)
_, frame = cap.read()
# assume w > h
h, w = frame.shape[:2]
scale = TARGET_WIDTH / h
new_width = int(scale * w)
new_height = int(scale * h)
# For centercrop
left = (new_width - TARGET_WIDTH) // 2
top = (new_height - TARGET_HEIGHT) // 2
right = (new_width + TARGET_WIDTH) // 2
bottom = (new_height + TARGET_HEIGHT) // 2
# initial queue for webcam data
frames_queue = queue.Queue(maxsize=0)
# queue after we've streamed it to real-time feed
ready_for_processing_queue = queue.Queue(maxsize=0)
# queue for processed frames with prediction overlays
processed_frames_queue = queue.Queue(maxsize=0)
# start thread to capture data from webcam
capture_thread = Thread(
target=self.capture_frame,
args=(
cap,
frames_queue,
),
daemon=True,
)
capture_thread.start()
def processing_func(cv2_frame):
# Resize and center crop frame
frame = cv2.resize(cv2_frame, (new_width, new_height))
frame = frame[top:bottom, left:right]
return frame
# start thread to process images with model
processing_thread = Thread(
target=self.process_frame,
args=(
self.runner,
processing_func,
ready_for_processing_queue,
processed_frames_queue,
threshold,
),
daemon=True,
)
processing_thread.start()
while self.keep_going:
if not frames_queue.empty():
img_real_time = frames_queue.get()
if img_real_time is not None:
cv2.imshow("realtime", img_real_time)
ready_for_processing_queue.put(img_real_time)
if not processed_frames_queue.empty():
img_processed = processed_frames_queue.get()
if img_processed is not None:
cv2.imshow("predicted", img_processed)
if cv2.waitKey(1) & 0xFF == ord("q"):
self.keep_going = False
break
cap.release()
capture_thread.join()
processing_thread.join()
if __name__ == "__main__":
onnx_runner = centerface_utils.CenterFaceOnnx("models/centerface-optimized.onnx")
tvm_runner_fp32 = centerface_utils.CenterFaceTVM(
"compiled_packages/centerface_autoscheduler_30000kt_fp32_llvm.tar"
)
tvm_runner_fp16 = centerface_utils.CenterFaceTVM(
"compiled_packages/centerface_autoscheduler_30000kt_fp16_llvm.tar"
)
dummy_runner = centerface_utils.CenterFaceNoDetection()
# Change runners at will
demo = CameraDemo(tvm_runner_fp16)
demo.run()
|
python
|
#!/usr/bin/env python
'''
ASMO Configuration
Author:
Rony Novianto ([email protected])
'''
# Run web to support any programming language via RESTful web service
# Run local if a higher performance is required (e.g. using ASMO with machine learning)
is_running_local = False
host = 'http://localhost:12766'
# Memory
memory_uri = 'memory'
# Attention
process_uri = 'process'
compete_uri = 'compete'
competition_time = 0.5
priority_level_key = 'priority_level'
total_attention_level_key = 'total_attention_level'
attention_value_key = 'attention_value'
boost_value_key = 'boost_value'
required_resources_key = 'required_resources'
actions_key = 'actions'
|
python
|
def double_first(vec):
try:
first = vec[0]
parsed = int(first)
return parsed * 2
except IndexError:
print("no first item")
except ValueError:
print("invalid first item")
if __name__ == '__main__':
numbers = ["42", "93", "18"]
empty = []
strings = ["tofu", "93", "18"]
print(double_first(numbers))
print(double_first(empty))
print(double_first(strings))
|
python
|
import numpy as np
# initial values
ARRAY = []
with open("xoData.txt") as f:
for line in f:
ARRAY.append([int(x) for x in line.split()])
# step function (activation function)
def step_function(sum):
if sum >= 0:
return 1
return -1
# calculateing output
def calculate_output(instance, weights, bias):
sum = instance.dot(weights) + bias
return step_function(sum)
# Hebbian Algorithm
def hebb():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
bias = 0.0
for i in range(len(inputs)):
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (inputs[i][j] * inputs[i][25])
bias = bias + (1 * inputs[i][25])
return weights, bias
# Perceptron Algorithm
def perceptron():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
learning_rate = 0.1
bias = 0.0
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s = inputs[i][:-1].dot(weights)
prediction = step_function(s + bias)
if inputs[i][25] != prediction:
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (
learning_rate * inputs[i][j] * inputs[i][25]
)
bias = bias + (learning_rate * inputs[i][25])
return weights, bias
# Adaline Algorithm
def adaline():
inputs = np.array(ARRAY)
weights = np.array([0.0] * 25)
learning_rate = 0.1
bias = 0.0
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s = inputs[i][:-1].dot(weights) + bias
prediction = step_function(s)
error = inputs[i][25] - s
if inputs[i][25] != prediction:
for j in range(len(inputs[i]) - 1):
weights[j] = weights[j] + (learning_rate * inputs[i][j] * error)
bias = bias + (learning_rate * error)
return weights, bias
# Multi Class Perceptron
def multiClassPerceptron():
inputs = np.array(ARRAY)
weights = np.array([[0.0] * 25, [0.0] * 25])
learning_rate = 0.1
bias = [0.0, 0.0]
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s1 = inputs[i][:-1].dot(weights[0])
s2 = inputs[i][:-1].dot(weights[1])
predictionX = step_function(s1 + bias[0])
predictionO = step_function(s2 + bias[1])
if inputs[i][25] != predictionX:
for j in range(len(inputs[i]) - 1):
weights[0][j] = weights[0][j] + (
learning_rate * inputs[i][j] * inputs[i][25]
)
bias[0] = bias[0] + (learning_rate * inputs[i][25])
if (inputs[i][25] * (-1)) != predictionO:
for j in range(len(inputs[i]) - 1):
weights[1][j] = weights[1][j] + (
learning_rate * inputs[i][j] * (inputs[i][25] * (-1))
)
bias[1] = bias[1] + (learning_rate * (inputs[i][25] * (-1)))
return weights, bias
# Multi Class Adaline
def multiClassAdaline():
inputs = np.array(ARRAY)
weights = np.array([[0.0] * 25, [0.0] * 25])
learning_rate = 0.1
bias = [0.0, 0.0]
x = 0
while x < 100:
x += 1
for i in range(len(inputs)):
s1 = inputs[i][:-1].dot(weights[0]) + bias[0]
s2 = inputs[i][:-1].dot(weights[1]) + bias[1]
predictionX = step_function(s1)
predictionO = step_function(s2)
error1 = inputs[i][25] - s1
error2 = (inputs[i][25] * (-1)) - s2
if inputs[i][25] != predictionX:
for j in range(len(inputs[i]) - 1):
weights[0][j] = weights[0][j] + (
learning_rate * inputs[i][j] * error1
)
bias[0] = bias[0] + (learning_rate * error1)
if (inputs[i][25] * (-1)) != predictionO:
for j in range(len(inputs[i]) - 1):
weights[1][j] = weights[1][j] + (
learning_rate * inputs[i][j] * error2
)
bias[1] = bias[1] + (learning_rate * error2)
return weights, bias
|
python
|
from yargy.utils import Record
|
python
|
import networkx as nx
import numpy as np
from copy import deepcopy
from collections import defaultdict
from ylearn.utils import to_repr
from . import prob
from .utils import (check_nodes, ancestors_of_iter, descendents_of_iter)
class CausalGraph:
"""
A class for representing DAGs of causal structures.
Attributes
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiDiGraph
Graph represented by the networkx package.
prob : ylearn.causal_model.prob.Prob
The encoded probability distribution of the causal graph.
latent_confounding_arcs : list of tuple of two str
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
is_dag : bool
Determine whether the graph is a DAG, which is a necessary condition
for it to be a valid causal graph.
c_components : set
The C-components of the graph.
observed_dag : nx.MultiDiGraph
A causal graph with only observed variables.
topo_order : list
The topological order of the graph.
explicit_unob_var_dag : nx.MultiDiGraph
A new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables. See latent_confounding_arcs for more
details of the unobserved variables.
Methods
----------
to_adj_matrix()
Return the numpy matrix of the adjecency matrix.
to_adj_list()
Return the numpy array of the adjecency matrix.
ancestors(y)
Return ancestors of y.
add_nodes(nodes, new=False)
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
add_edges_from(edge_list, new=False, observed=True)
Add all edges in the edge_list to the CausalGraph.
parents(x, observed=True)
Find the parents of the node x in the CausalGraph.
add_edge(i, j, observed=True)
Add an edge between nodes i and j to the CausalGraph. Add an unobserved
confounding arc if not observed.
remove_nodes(nodes, new=False)
Remove all nodes in the graph. If new, do this in a new CausalGraph.
remove_edge(i, j, observed=True)
Remove the edge in the CausalGraph. If observed, remove the unobserved
latent confounding arcs.
remove_edges_from(edge_list, new=False, observed=True)
Remove all edges in the edge_list in the CausalGraph.
build_sub_graph(subset)
Return a new CausalGraph as the subgraph of self with nodes in the
subset.
remove_incoming_edges(y, new=False)
Remove all incoming edges of all nodes in y. If new, return a new
CausalGraph.
remove_outgoing_edges(y, new=False)
Remove all outgoing edges of all nodes in y. If new, return a new
CausalGraph.
"""
def __init__(self, causation, dag=None, latent_confounding_arcs=None):
"""
Parameters
----------
causation : dict
Descriptions of the causal structures where values are parents of the
corresponding keys.
dag : nx.MultiGraph, optional
A konw graph structure represented. If provided, dag must represent
the causal structures stored in causation. Defaults to None.
latent_confounding_arcs : set or list of tuple of two str, optional
Two elements in the tuple are names of nodes in the graph where there
exists an latent confounding arcs between them. Semi-Markovian graphs
with unobserved confounders can be converted to a graph without
unobserved variables, where one can add bi-directed latent confounding
arcs to represent these relations. For example, the causal graph X <- U -> Y,
where U is an unobserved confounder of X and Y, can be converted
equivalently to X <-->Y where <--> is a latent confounding arc.
"""
self.causation = defaultdict(list, causation)
self.ava_nodes = self.causation.keys()
self.dag = self.observed_dag.copy() if dag is None else dag
# add unobserved bidirected confounding arcs to the graph, with the
# letter 'n' representing that the edge is unobserved
if latent_confounding_arcs is not None:
for edge in latent_confounding_arcs:
self.dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
@property
def prob(self):
"""The encoded probability distribution.
Returns
----------
Prob
"""
return prob.Prob(variables=set(self.causation.keys()))
@property
def latent_confounding_arcs(self):
"""Return the latent confounding arcs encoded in the graph.
Returns
----------
list
"""
W = nx.to_numpy_matrix(self.dag)
a, w_t = np.where(W >= 1), W.T.A
arcs, nodes = [], list(self.dag.nodes)
for row, col in zip(a[0], a[1]):
if w_t[row][col] >= 1 and (nodes[col], nodes[row]) not in arcs:
arcs.append((nodes[row], nodes[col]))
return arcs
@property
def is_dag(self):
"""Verify whether the constructed graph is a DAG.
"""
# TODO: determin if the graph is a DAG, try tr(e^{W\circledot W}-d)=0
return nx.is_directed_acyclic_graph(self.observed_dag)
def to_adj_matrix(self):
"""Return the adjacency matrix.
"""
W = nx.to_numpy_matrix(self.dag)
return W
# def to_adj_list(self):
# """Return the adjacency list."""
# pass
def is_d_separated(self, x, y, test_set):
"""Check if test_set d-separates x and y.
Parameters
----------
x : set of str
y : set of str
test_set : set of str
Returns
----------
Bool
If test_set d-separates x and y, return True else return False.
"""
return nx.d_separated(self.explicit_unob_var_dag, x, y, test_set)
@property
def c_components(self):
"""Return the C-component set of the graph.
Returns
----------
set of str
The C-component set of the graph
"""
bi_directed_graph = nx.Graph()
bi_directed_graph.add_nodes_from(self.dag.nodes)
bi_directed_graph.add_edges_from(self.latent_confounding_arcs)
return nx.connected_components(bi_directed_graph)
def ancestors(self, x):
"""Return the ancestors of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Ancestors of nodes in x in the graph
"""
g = self.observed_dag
return ancestors_of_iter(g, x)
def descendents(self, x):
"""Return the descendents of all nodes in x.
Parameters
----------
x : set of str
a set of nodes in the graph
Returns
----------
set of str
Descendents of nodes x of the graph
"""
# des = set()
# x = {x} if isinstance(x, str) else x
# for node in x:
# des.add(node)
# try:
# des.update(nx.descendants(self.observed_dag, node))
# except Exception:
# pass
g = self.observed_dag
return descendents_of_iter(g, x)
def parents(self, x, only_observed=True):
"""Return the direct parents of the node x in the graph.
Parameters
----------
x : str
Name of the node x.
only_observed : bool, optional
If True, then only find the observed parents in the causal graph,
otherwise also include the unobserved variables, by default True
Returns
-------
list
Parents of the node x in the graph
"""
if only_observed:
return self.causation[x]
else:
return list(self.explicit_unob_var_dag.predecessors(x))
@property
def observed_dag(self):
"""Return the observed part of the graph, including observed nodes and
edges between them.
Returns
----------
nx.MultiDiGraph
The observed part of the graph
"""
edges = []
for k, v in self.causation.items():
for para in v:
edges.append((para, k, 0))
ob_dag = nx.MultiDiGraph()
ob_dag.add_edges_from(edges)
return ob_dag
@property
def explicit_unob_var_dag(self):
"""Build a new dag where all unobserved confounding arcs are replaced
by explicit unobserved variables
Returns
----------
nx.MultiDiGraph
"""
new_dag = self.observed_dag
for i, (node1, node2) in enumerate(self.latent_confounding_arcs):
new_dag.add_edges_from(
[(f'U{i}', node1, 'n'), (f'U{i}', node2, 'n')]
)
return new_dag
@property
def topo_order(self):
"""Retrun the topological order of the nodes in the observed graph
Returns
----------
generator
Nodes in the topological order
"""
return nx.topological_sort(self.observed_dag)
def add_nodes(self, nodes, new=False):
"""
If not new, add all nodes in the nodes to the current
CausalGraph, else create a new graph and add nodes.
Parameters
----------
nodes : set or list
new : bool, optional
If new create and return a new graph. Defaults to False.
Returns
----------
CausalGraph
"""
ori_nodes = self.dag.nodes
if not new:
self.dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
self.causation[node] = []
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
new_dag.add_nodes_from(nodes)
for node in nodes:
if node not in ori_nodes:
new_causation[node] = []
return CausalGraph(new_causation, dag=new_dag)
def add_edges_from(self, edge_list, new=False, observed=True):
"""
Add edges to the causal graph.
Parameters
----------
edge_list : list
Every element of the list contains two elements, the first for
the parent
new : bool
Return a new graph if set as True
observed : bool
Add unobserved bidirected confounding arcs if not observed.
"""
if not new:
if observed:
for edge in edge_list:
self.causation[edge[1]].append(edge[0])
self.dag.add_edge(edge[0], edge[1], 0)
else:
for edge in edge_list:
self.dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
if observed:
new_dag.add_edges_from(edge_list)
for edge in edge_list:
new_causation[edge[1]].append(edge[0])
else:
for edge in edge_list:
new_dag.add_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
return CausalGraph(new_causation, dag=new_dag)
def add_edge(self, s, t, observed=True):
"""
Add an edge between nodes i and j. Add an unobserved latent confounding
arc if not observed.
Parameters
----------
s : str
Source of the edge.
t : str
Target of the edge.
observed : bool
Add an unobserved latent confounding arc if False.
"""
if observed:
self.dag.add_edge(s, t, 0)
self.causation[t].append(s)
else:
self.dag.add_edge(s, t, 'n')
def remove_nodes(self, nodes, new=False):
"""
Remove all nodes in the graph.
Parameters
----------
nodes : set or list
new : bool, optional
If True, create a new graph, remove nodes in that graph and return
it. Defaults to False.
Returns
---------
CausalGraph
Return a CausalGraph if new.
"""
if not new:
for node in nodes:
for k in list(self.causation.keys()):
if k == node:
del self.causation[node]
continue
try:
self.causation[k].remove(node)
except Exception:
pass
self.dag.remove_nodes_from(nodes)
else:
new_causation = deepcopy(self.causation)
new_dag = deepcopy(self.dag)
new_dag.remove_nodes_from(nodes)
for node in nodes:
for k in list(new_causation.keys()):
if k == node:
del new_causation[node]
continue
try:
new_causation[k].remove(node)
except Exception:
pass
return CausalGraph(new_causation, dag=new_dag)
def remove_edge(self, edge, observed=True):
"""
Remove the edge in the CausalGraph. If observed, remove the unobserved
latent confounding arcs.
Parameters
----------
edge : tuple
2 elements denote the start and end of the edge, respectively
observed : bool
If not observed, remove the unobserved latent confounding arcs.
"""
if observed:
self.dag.remove_edge(edge[0], edge[1], 0)
try:
self.causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
self.dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
def remove_edges_from(self, edge_list, new=False, observed=True):
"""
Remove all edges in the edge_list in the graph.
Parameters
----------
edge_list : list
new : bool, optional
If new, creat a new CausalGraph and remove edges.
observed : bool, optional
Remove unobserved latent confounding arcs if not observed.
Returns
----------
CausalGraph
Return a new CausalGraph if new.
"""
if not new:
if observed:
for edge in edge_list:
self.dag.remove_edge(edge[0], edge[1], 0)
try:
self.causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
for edge in edge_list:
self.dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
else:
new_dag = deepcopy(self.dag)
new_causation = deepcopy(self.causation)
if observed:
for edge in edge_list:
new_dag.remove_edge(edge[0], edge[1], 0)
try:
new_causation[edge[1]].remove(edge[0])
except Exception:
pass
else:
for edge in edge_list:
new_dag.remove_edges_from(
[(edge[0], edge[1], 'n'), (edge[1], edge[0], 'n')]
)
return CausalGraph(new_causation, new_dag)
def build_sub_graph(self, subset):
"""Return a new CausalGraph as the subgraph of the graph with nodes in the
subset.
Parameters
----------
subset : set
Returns
----------
CausalGraph
"""
check_nodes(self.ava_nodes, subset)
nodes = set(self.causation.keys()).difference(subset)
return self.remove_nodes(nodes, new=True)
def remove_incoming_edges(self, x, new=False):
"""Remove incoming edges of all nodes of x. If new, do this in the new
CausalGraph.
Parameters
----------
x : set or list
new : bool
Return a new graph if set as Ture.
Returns
----------
CausalGraph
If new, return a subgraph of the graph without all incoming edges
of nodes in x
"""
check_nodes(self.ava_nodes, x)
edges = self.dag.in_edges(x, keys=True)
o_edges, u_edges = [], []
for edge in edges:
if edge[2] == 'n':
u_edges.append(edge)
else:
o_edges.append(edge)
if new:
return self.remove_edges_from(o_edges, new).remove_edges_from(
u_edges, new, observed=False
)
else:
self.remove_edges_from(o_edges, new)
self.remove_edges_from(u_edges, new, observed=False)
def remove_outgoing_edges(self, x, new=False):
"""Remove outcoming edges of all nodes in x.
Parameters
----------
x : set
new : bool
Returns
----------
CausalGraph
If new, return a subgraph of the graph without all outcoming edges
of nodes in x.
"""
check_nodes(self.ava_nodes, x)
removing_edges = [
edge for edge in self.dag.out_edges(x, keys=True) if edge[2] == 0
]
return self.remove_edges_from(removing_edges, new, observed=True)
def plot(self, **kwargs):
ng = nx.DiGraph(self.causation)
options = dict(with_labels=True, node_size=1000, **kwargs)
nx.draw(ng, **options)
def __repr__(self):
return to_repr(self)
|
python
|
import typing
import uuid
from datetime import datetime
class SetType:
_all: typing.Set = set()
def __init__(self, api_name: str, desc: str):
self.name = str(api_name)
self.desc = desc
SetType._all.add(self)
def __eq__(self, other):
if not isinstance(other, SetType):
return NotImplemented
return self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
def __str__(self) -> str:
return self.name.lower()
@staticmethod
def parse(name: str) -> 'SetType':
for s in SetType._all:
if s.name == name:
return s
return None
TypeCore = SetType('core', 'A yearly Magic core set (Tenth Edition, etc)')
TypeExpansion = SetType('expansion', 'A rotational expansion set in a block (Zendikar, etc)')
TypeMasters = SetType('masters', 'A reprint set that contains no new cards (Modern Masters, etc)')
TypeMasterpiece = SetType('masterpiece', 'Masterpiece Series premium foil cards')
TypeFromTheVault = SetType('from_the_vault', 'From the Vault gift sets')
TypeSpellbook = SetType('spellbook', 'Spellbook series gift sets')
TypePremiumDeck = SetType('premium_deck', 'Premium Deck Series decks')
TypeDuelDeck = SetType('duel_deck', 'Duel Decks')
TypeDraftInnovation = SetType('draft_innovation', 'Special draft sets, like Conspiracy and Battlebond')
TypeTreasureChest = SetType('treasure_chest', 'Magic Online treasure chest prize sets')
TypeCommander = SetType('commander', 'Commander preconstructed decks')
TypePlanechase = SetType('plainchase', 'Planechase sets')
TypeArchenemy = SetType('archenemy', 'Archenemy sets')
TypeVanguard = SetType('vanguard', 'Vanguard card sets')
TypeFunny = SetType('funny', 'A funny un-set or set with funny promos (Unglued, Happy Holidays, etc)')
TypeStarter = SetType('starter', 'A starter/introductory set (Portal, etc)')
TypeBox = SetType('box', 'A gift box set')
TypePromo = SetType('promo', 'A set that contains purely promotional cards')
TypeToken = SetType('token', 'A set made up of tokens and emblems.')
TypeMemorabilia = SetType('memorabilia', 'A set made up of gold-bordered, oversize, or trophy cards that are not legal')
class Set:
"""
Represents a group of related MTG cards. Not all are from official releases,
some are for grouping purposes only. All are provided by data from Scryfall.
:ivar id: Scryfall ID of this set.
:ivar code: The unique three to five-letter code for this set.
:ivar name: English language name for the set.
"""
def __init__(self, **kwargs):
self.id: uuid.UUID = uuid.UUID("00000000-0000-0000-0000-000000000000")
self.code: str = ''
self.name: str = ''
self.type: SetType = TypeCore
self.release_date: datetime = datetime.min
self.block: str = ''
self.parent_set: str = ''
self.card_count: int = 0
self.digital: bool = False
self.foil_only: bool = False
self.nonfoil_only: bool = False
if kwargs is not None:
if 'id' in kwargs:
id = kwargs['id']
if isinstance(id, uuid.UUID):
self.id = id
else:
self.id = uuid.UUID(kwargs['id'])
if 'code' in kwargs:
self.code = str(kwargs['code'])
if 'name' in kwargs:
self.name = str(kwargs['name'])
if 'type' in kwargs:
t = kwargs['type']
if isinstance(t, SetType):
self.type = t
else:
self.type = SetType.parse(str(t))
if 'release_date' in kwargs:
rd = kwargs['release_date']
if isinstance(rd, datetime):
self.release_date = rd
else:
self.release_date = datetime.fromisoformat(str(rd))
self.number = kwargs['number']
if 'block' in kwargs:
self.block = str(kwargs['block'])
if 'parent_set' in kwargs:
self.parent_set = str(kwargs['parent_set'])
if 'card_count' in kwargs:
self.card_count = int(kwargs['card_count'])
if 'digital' in kwargs:
self.digital = bool(kwargs['digital'])
if 'foil_only' in kwargs:
self.foil_only = bool(kwargs['foil_only'])
if 'nonfoil_only' in kwargs:
self.nonfoil_only = bool(kwargs['nonfoil_only'])
def __hash__(self) -> int:
return hash((self.id, self.code))
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return (self.id, self.code) == (other.id, other.code)
def __ne__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__eq__(other)
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return (self.type.name, self.release_date, self.name) < (other.type.name, other.release_date, other.name)
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__lt__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return not self.__le__(other)
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
s = 'Set(id={!r}, code={!r}, name={!r}, type={!r}, release_date={!r}, '
s += 'block={!r}, parent_set={!r}, card_count={!r}, digital={!r}, '
s += 'foil_only={!r}, nonfoil_only={!r})'
return s.format(
self.id,
self.code,
self.name,
self.type,
self.release_date,
self.block,
self.parent_set,
self.card_count,
self.digital,
self.foil_only,
self.nonfoil_only,
)
@property
def has_foils(self) -> bool:
return not self.nonfoil_only
@property
def has_nonfoils(self) -> bool:
return not self.foil_only
def to_dict(self) -> typing.Dict[str, typing.Any]:
d = {
'id': str(self.id),
'code': self.code,
'name': self.name,
'type': self.type,
'release_date': self.release_date.isoformat(),
'block': self.block,
'parent_set': self.parent_set,
'card_count': self.card_count,
'digital': self.digital,
'foil_only': self.foil_only,
'nonfoil_only': self.nonfoil_only
}
return d
|
python
|
# coding=utf8
import re
from decimal import Decimal
from typing import Union
MAX_VALUE_LIMIT = 1000000000000 # 10^12
LOWER_UNITS = '千百十亿千百十万千百十_'
LOWER_DIGITS = '零一二三四五六七八九'
UPPER_UNITS = '仟佰拾亿仟佰拾万仟佰拾_'
UPPER_DIGITS = '零壹贰叁肆伍陆柒捌玖'
class ChineseNumbers:
RULES = [
(r'一十', '十'),
(r'零[千百十]', '零'),
(r'零{2,}', '零'),
(r'零([亿|万])', r'\g<1>'),
(r'亿零{0,3}万', '亿'),
(r'零?_', ''),
]
@staticmethod
def measure_number(num: Union[int, str]) -> str:
if isinstance(num, str):
_n = int(num)
else:
_n = num
if _n < 0 or _n >= MAX_VALUE_LIMIT:
raise ValueError('Out of range')
num_str = str(num)
capital_str = ''.join([LOWER_DIGITS[int(i)] for i in num_str])
s_units = LOWER_UNITS[len(LOWER_UNITS) - len(num_str):]
o = ''.join('{}{}'.format(u, d) for u, d in zip(capital_str, s_units))
for p, d in ChineseNumbers.RULES:
o = re.sub(p, d, o)
if 10 <= _n < 20:
o.replace('一十', '十')
return o
@staticmethod
def order_number(num: Union[int, str]) -> str:
val = ChineseNumbers.measure_number(num)
return val.replace('零', '〇')
@staticmethod
def to_chinese_number(num: Union[int, str], upper: bool = False, order: bool = False) -> str:
if order:
lower_string = ChineseNumbers.order_number(num)
else:
lower_string = ChineseNumbers.measure_number(num)
if upper:
for _ld, _ud in zip(LOWER_DIGITS + LOWER_UNITS[:3], UPPER_DIGITS + UPPER_UNITS[:3]):
lower_string = lower_string.replace(_ld, _ud)
return lower_string
class FinanceNumbers:
RULES = [
(r'零角零分$', '整'),
(r'零[仟佰拾]', '零'),
(r'零{2,}', '零'),
(r'零([亿|万])', r'\g<1>'),
(r'零+元', '元'),
(r'亿零{0,3}万', '亿'),
(r'^元', '零元')
]
@staticmethod
def to_capital_str(num: Union[int, float, Decimal, str]) -> str:
units = UPPER_UNITS[:-1] + '元角分'
if isinstance(num, str):
_n = Decimal(num)
else:
_n = num
if _n < 0 or _n >= MAX_VALUE_LIMIT:
raise ValueError('Out of range')
num_str = str(num) + '00'
dot_pos = num_str.find('.')
if dot_pos > -1:
num_str = num_str[:dot_pos] + num_str[dot_pos + 1:dot_pos + 3]
capital_str = ''.join([UPPER_DIGITS[int(i)] for i in num_str])
s_units = units[len(units) - len(num_str):]
o = ''.join('{}{}'.format(u, d) for u, d in zip(capital_str, s_units))
for p, d in FinanceNumbers.RULES:
o = re.sub(p, d, o)
return o
|
python
|
"""
Configuration module for ladim
"""
# ----------------------------------
# Bjørn Ådlandsvik <[email protected]>
# Institute of Marine Research
# 2017-01-17
# ----------------------------------
# import datetime
import logging
from typing import Dict, Any
import numpy as np
import yaml
import yaml.parser
from netCDF4 import Dataset, num2date
Config = Dict[str, Any] # type of the config dictionary
def configure_ibm(conf: Dict[str, Any]) -> Config:
"""Configure the IBM module
Input: Raw conf dictionary from configuration file
Return: Dictionary with IBM configuration
If an IBM is used, check that module name is present
Special treatment for the variables item
Other items are stored for the IBM module
"""
logging.info("Configuration: IBM")
if conf is None: # No ibm section
return {}
D = conf.get("ibm") # Empty ibm section
if D is None:
return {}
# Mandatory: module name (or obsolete ibm_module)
if "module" not in D:
if "ibm_module" in D:
D["module"] = D.pop("ibm_module")
else:
logging.error("No IBM module specified")
raise SystemExit(1)
logging.info(f' {"module":15s}: {D["module"]}')
# The variables item
if "variables" not in D:
if "ibm_variables" in D:
D["variables"] = D.pop("ibm_variables")
# ibm_variables may live under state (obsolete)
elif "state" in conf and conf["state"] is not None:
if "ibm_variables" in conf.get("state", dict()):
D["variables"] = conf["state"]["ibm_variables"]
else:
D["variables"] = []
for key in D:
if key != "module":
logging.info(f" {key:15s}: {D[key]}")
return D
def configure_gridforce(conf: Dict[str, Any]) -> Config:
"""Parse gridforce related info and pass on
Input: raw conf dictionary from configuration file
Return: dictionary with gridforce configuration
"""
logging.info("Configuration: gridforce")
if conf is None:
logging.error("No gridforce section in configuration file")
raise SystemExit(1)
D = conf.get("gridforce")
if D is None:
logging.error("Empty gridforce section in configuration file")
raise SystemExit(1)
# module is the only mandatory field
if "module" not in D:
logging.error("No gridforce module specified")
raise SystemExit(1)
logging.info(f' {"module":15s}: {D["module"]}')
# Backwards compability (for ROMS.py)
if "files" in conf and conf["files"] is not None:
if "grid_file" in conf["files"]:
# Give grid_file under gridforce highest priority
if "grid_file" not in D:
D["grid_file"] = conf["files"]["grid_file"]
if "input_file" in conf["files"]:
# Give input_file under gridforce highest priority
if "input_file" not in D:
D["input_file"] = conf["files"]["input_file"]
for key in D:
if key != "module":
logging.info(f" {key:15s}: {D[key]}")
return D
# ---------------------------------------
def configure(config_stream) -> Config:
"""The main configuration handling function
Input: Name of configuration file in yaml format
Returns: Configuration dictionary
"""
config: Config = dict()
# --- Read the configuration file ---
try:
conf = yaml.safe_load(config_stream)
except yaml.parser.ParserError:
logging.critical("Can not parse configuration")
raise SystemExit(2)
# ----------------
# Time control
# ----------------
logging.info("Configuration: Time Control")
for name in ["start_time", "stop_time"]:
config[name] = np.datetime64(conf["time_control"][name]).astype("M8[s]")
logging.info(f" {name.replace('_', ' '):15s}: {config[name]}")
# reference_time, default = start_time
config["reference_time"] = np.datetime64(
conf["time_control"].get("reference_time", config["start_time"])
).astype("M8[s]")
logging.info(f' {"reference time":15s}: {config["reference_time"]}')
# -------------
# Files
# -------------
logging.info("Configuration: Files")
logging.info(f' {"config_stream":15s}: {config_stream}')
for name in ["particle_release_file", "output_file"]:
config[name] = conf["files"][name]
logging.info(f" {name:15s}: {config[name]}")
try:
config["warm_start_file"] = conf["files"]["warm_start_file"]
config["start"] = "warm"
logging.info(f' {"Warm start from":15s}: {config["warm_start_file"]}')
except KeyError:
config["start"] = "cold"
config["warm_start_file"] = ""
# Override start time for warm start
if config["start"] == "warm":
try:
nc = Dataset(config["warm_start_file"])
except (FileNotFoundError, OSError):
logging.error(f"Could not open warm start file,{config['warm_start_file']}")
raise SystemExit(1)
tvar = nc.variables["time"]
# Use last record in restart file
warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units))
warm_start_time = warm_start_time.astype("M8[s]")
config["start_time"] = warm_start_time
logging.info(f" Warm start at {warm_start_time}")
# Variables needed by restart, mightwarm_ be changed
# default should be instance variables among release variables
try:
warm_start_variables = conf["state"]["warm_start_variables"]
except KeyError:
warm_start_variables = ["X", "Y", "Z"]
config["warm_start_variables"] = warm_start_variables
# --- Time stepping ---
logging.info("Configuration: Time Stepping")
# Read time step and convert to seconds
dt = np.timedelta64(*tuple(conf["numerics"]["dt"]))
config["dt"] = int(dt.astype("m8[s]").astype("int"))
config["simulation_time"] = np.timedelta64(
config["stop_time"] - config["start_time"], "s"
).astype("int")
config["numsteps"] = config["simulation_time"] // config["dt"]
logging.info(f' {"dt":15s}: {config["dt"]} seconds')
logging.info(
f' {"simulation time":15s}: {config["simulation_time"] // 3600} hours'
)
logging.info(f' {"number of time steps":15s}: {config["numsteps"]}')
# --- Grid ---
config["gridforce"] = configure_gridforce(conf)
# --- Forcing ---
try:
config["ibm_forcing"] = conf["gridforce"]["ibm_forcing"]
except (KeyError, TypeError):
config["ibm_forcing"] = []
# ibm_forcing used to be a dictionary
if isinstance(config["ibm_forcing"], dict):
config["ibm_forcing"] = list(config["ibm_forcing"].keys())
logging.info(f' {"ibm_forcing":15s}: {config["ibm_forcing"]}')
# --- IBM ---
config["ibm"] = configure_ibm(conf)
# Make obsolete
config["ibm_variables"] = config["ibm"].get("variables", [])
config["ibm_module"] = config["ibm"].get("module")
# --- Particle release ---
logging.info("Configuration: Particle Releaser")
prelease = conf["particle_release"]
try:
config["release_type"] = prelease["release_type"]
except KeyError:
config["release_type"] = "discrete"
logging.info(f' {"release_type":15s}: {config["release_type"]}')
if config["release_type"] == "continuous":
config["release_frequency"] = np.timedelta64(
*tuple(prelease["release_frequency"])
)
logging.info(
f' {"release_frequency":11s}: {str(config["release_frequency"])}'
)
config["release_format"] = conf["particle_release"]["variables"]
config["release_dtype"] = dict()
# Map from str to converter
type_mapping = dict(int=int, float=float, time=np.datetime64, str=str)
for name in config["release_format"]:
config["release_dtype"][name] = type_mapping[
conf["particle_release"].get(name, "float")
]
logging.info(f' {name:15s}: {config["release_dtype"][name]}')
config["particle_variables"] = prelease["particle_variables"]
# --- Model state ---
# logging.info("Configuration: Model State Variables")
# -----------------
# Output control
# -----------------
logging.info("Configuration: Output Control")
try:
output_format = conf["output_variables"]["format"]
except KeyError:
output_format = "NETCDF3_64BIT_OFFSET"
config["output_format"] = output_format
logging.info(f' {"output_format":15s}: {config["output_format"]}')
# Skip output of initial state, useful for restart
# with cold start the default is False
# with warm start, the default is true
try:
skip_initial = conf["output_variables"]["skip_initial_output"]
except KeyError:
skip_initial = config["start"] == "warm"
config["skip_initial"] = skip_initial
logging.info(f" {'Skip inital output':15s}: {skip_initial}")
try:
numrec = conf["output_variables"]["numrec"]
except KeyError:
numrec = 0
config["output_numrec"] = numrec
logging.info(f' {"output_numrec":15s}: {config["output_numrec"]}')
outper = np.timedelta64(*tuple(conf["output_variables"]["outper"]))
outper = outper.astype("m8[s]").astype("int") // config["dt"]
config["output_period"] = outper
logging.info(f' {"output_period":15s}: {config["output_period"]} timesteps')
config["num_output"] = 1 + config["numsteps"] // config["output_period"]
logging.info(f' {"numsteps":15s}: {config["numsteps"]}')
config["output_particle"] = conf["output_variables"]["particle"]
config["output_instance"] = conf["output_variables"]["instance"]
config["nc_attributes"] = dict()
for name in config["output_particle"] + config["output_instance"]:
value = conf["output_variables"][name]
if "units" in value:
if value["units"] == "seconds since reference_time":
timeref = str(config["reference_time"]).replace("T", " ")
value["units"] = f"seconds since {timeref}"
config["nc_attributes"][name] = conf["output_variables"][name]
logging.info(" particle variables")
for name in config["output_particle"]:
logging.info(8 * " " + name)
for item in config["nc_attributes"][name].items():
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
logging.info(" particle instance variables")
for name in config["output_instance"]:
logging.info(8 * " " + name)
for item in config["nc_attributes"][name].items():
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
# --- Numerics ---
# dt belongs here, but is already read
logging.info("Configuration: Numerics")
try:
config["advection"] = conf["numerics"]["advection"]
except KeyError:
config["advection"] = "RK4"
logging.info(f' {"advection":15s}: {config["advection"]}')
try:
diffusion = conf["numerics"]["diffusion"]
except KeyError:
diffusion = 0.0
if diffusion > 0:
config["diffusion"] = True
config["diffusion_coefficient"] = diffusion
logging.info(
f' {"diffusion coefficient":15s}: {config["diffusion_coefficient"]}'
)
else:
config["diffusion"] = False
logging.info(" no diffusion")
return config
|
python
|
# TODO: put the name flexstring in the Class.
# Class is not "Named" and its names are not interned.
# Sym continues like today. Meth is named.
import os, re, sys
import collections
from logging import info
# Tuning.
MEMORY_LEN = 0x8000 # Somewhat arbtrary.
SYM_VEC_LEN = 256
CLASS_VEC_LEN = 256
# Leave a little gap for future overhead.
MAX_OBJ_SIZE = 258
MAX_FLEX_BYTES = 256
MAX_FLEX_PTRS = 128
# Target memory.
Memory = MEMORY_LEN * ['#']
SymVec = SYM_VEC_LEN * [0]
ClassVec = CLASS_VEC_LEN * [0]
# Compiler state.
OpList = []
OpNums = {}
Method = collections.defaultdict(dict)
Op = {}
# Util
def Hi(x): return 255 & (x>>8)
def Lo(x): return 255 & x
##### LEX
LEX_INT = re.compile('(-?[0-9]+|[$][0-9a-fA-F]+)').match
LEX_COLON = re.compile('(([A-Za-z][A-Za-z0-9]*)\\s*[:])').match
LEX_IDENT = re.compile('([A-Za-z][A-Za-z0-9]*)').match
LEX_MULOP = re.compile('([*]|/|%)').match
LEX_ADDOP = re.compile('([+]|-)').match
LEX_RELOP = re.compile('(<|>|==|!=|<=|>=)').match
LEX_PUNCT = re.compile('([():,.;=])').match
LEX_WHITE = re.compile('([ \\t\\n\\r]*)').match
PATTERNS = [('C',LEX_COLON), ('I',LEX_INT), ('W',LEX_IDENT), ('M',LEX_MULOP), ('A',LEX_ADDOP), ('R',LEX_RELOP), ('P',LEX_PUNCT)]
class Lex(object):
def __init__(self, source):
self.source = source
self.n = len(source)
self.i = 0
print ('Inital', self.source, self.i, self.n)
self.Advance()
def Advance(self):
self.token = self.Next()
def Next(self):
if self.i == self.n:
print 'Next', 60, ('Z', '', self.i)
return ('Z', '', self.i)
rest = self.source[self.i:]
white = LEX_WHITE(rest)
if white:
self.i += len(white.group(1))
if self.i == self.n:
print 'Next', 69, ('Z', '', self.i)
return ('Z', '', self.i)
rest = self.source[self.i:]
for typ,pat in PATTERNS:
m = pat(rest)
if m:
self.i += len(m.group(1))
print 'Next', 78, (typ, m.group(1), self.i)
return (typ, m.group(1), self.i)
raise Exception('Cannot lex rest: %s' % repr(rest))
class PExpr(object):
pass
class PSeq(PExpr):
def __init__(self, exprs):
self.exprs = exprs
def __str__(self):
return '{%s}' % ' ; '.join(str(e) for e in self.exprs)
def visit(self, v):
v.visitSeq(self)
class PAssign(PExpr):
def __init__(self, varz, expr):
self.vars = varz
self.expr = expr
def __str__(self):
return '%s= %s' % (self.vars, self.expr)
def visit(self, v):
v.visitAssign(self)
class PList(PExpr):
def __init__(self, exprs):
self.exprs = exprs
def __str__(self):
return '[%s]' % ' , '.join(str(e) for e in self.exprs)
def visit(self, v):
v.visitList(self)
class PVar(PExpr):
def __init__(self, s):
self.s = s.upper()
def __str__(self):
return '%s' % self.s
def visit(self, v):
v.visitVar(self)
class PInt(PExpr):
def __init__(self, n):
self.n = n
def __str__(self):
return '%d' % self.n
def visit(self, v):
v.visitInt(self)
class PUnary(PExpr):
def __init__(self, r, meth):
self.r = r
self.meth = meth.upper()
def __str__(self):
return '%s %s' % (self.r, self.meth)
def visit(self, v):
v.visitUnary(self)
class PMul(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitMul(self)
class PAdd(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitAdd(self)
class PRel(PExpr):
def __init__(self, r, meth, a):
self.r = r
self.meth = meth
self.a = a
def __str__(self):
return '(%s %s %s)' % (self.r, self.meth, self.a)
def visit(self, v):
v.visitRel(self)
class PKeyword(PExpr):
def __init__(self, r, meth, args):
self.r = r
self.meth = meth.upper()
self.args = args
def __str__(self):
z = '( (%s) ' % self.r
for k,v in zip(self.meth.split(':'), [str(a) for a in self.args]):
z += '%s: %s ' % (k, v)
return z + ')'
def visit(self, v):
v.visitKeyword(self)
class PMacro(PExpr):
def __init__(self, keywords, varz, exprs):
self.keywords = keywords
self.vars = varz
self.exprs = exprs
def __str__(self):
z = ''
for k,v,e in zip(self.keywords, self.vars, self.exprs):
z += '%s(%s%s)' % (k, ('%s:' % v if v else ''), e)
return z
def visit(self, v):
v.visitMacro(self)
class Parser(object):
def __init__(self, source):
self.source = source
self.lex = Lex(source)
def Parse(self):
seq = self.ParseSeq()
typ, s, i = self.lex.token
if typ != 'Z':
print('Extra stuff: %s' % repr((typ, s, i)))
raise Exception('Extra stuff: %s' % repr(self.source[i:]))
return seq
def ParseSeq(self):
z = []
while True:
a = self.ParseAssign()
z.append(a)
typ, s, i = self.lex.token
if typ=='Z' or s==')' or s=='=':
break
elif s=='.' or s==';':
self.lex.Advance()
typ, s, i = self.lex.token
if typ=='Z' or s==')' or s=='=': # If trailing "."
break
else:
raise Exception('EXPECTED EOS or ")" or "." or ";" AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
return z[0] if len(z)==1 else PSeq(z)
def ParseAssign(self):
a = self.ParseList()
typ, s, i = self.lex.token
if s == '=':
if not isinstance(a, PList) and not isinstance(a, PVar):
raise Exception('Bad target of assignment AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
if isinstance(a, PList):
for b in a.exprs:
if not isinstance(b, PVar):
raise Exception('Bad subtarget "%s" of assignment AFTER %s BEFORE %s' % (b, repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
b = self.ParseList()
return PAssign(a, b)
return a
def ParseList(self):
z = []
while True:
a = self.ParseKeyword()
z.append(a)
typ, s, i = self.lex.token
if s==',':
self.lex.Advance()
else:
break
return z[0] if len(z)==1 else PList(z)
def ParseMacro(self, name):
typ, s, i = self.lex.token
keywords = [name]
varz = []
exprs = []
while True:
if varz: # Not the first time:
keywords.append(s)
# next comes the open paren
self.lex.Advance()
typ, s, i = self.lex.token
if s != '(':
raise Exception('Expected "(" in macro AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
typ, s, i = self.lex.token
var = None
if typ == 'C':
var = LEX_COLON(s).group(2) # extract word.
self.lex.Advance()
ex = self.ParseSeq()
varz.append(var)
exprs.append(ex)
typ, s, i = self.lex.token
if s != ')':
raise Exception('Expected ")" in macro AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
typ, s, i = self.lex.token
if typ != 'W':
break
return PMacro(keywords, varz, exprs)
def ParsePrim(self):
typ, s, i = self.lex.token
if typ == 'I':
self.lex.Advance()
val = int(s[1:],base=16) if s[0]=='$' else int(s)
return PInt(val)
elif typ == 'W':
name = s
self.lex.Advance()
typ, s, i = self.lex.token
if s == '(':
# Macro syntax
self.lex.Advance()
return self.ParseMacro(name)
else:
# Just a var name
return PVar(name)
elif s == '(':
self.lex.Advance()
seq = self.ParseSeq()
typ, s, i = self.lex.token
if s != ')':
raise Exception('EXPECTED ")" AFTER %s BEFORE %s' % (repr(self.source[:i]), repr(self.source[i:])))
self.lex.Advance()
return seq
else:
raise Exception('UNEXPECTED prim: %s' % repr((typ, s, i)))
def ParseKeyword(self):
rargs = [ self.ParseRel() ] # rargs are receiver and args.
keywords = ''
while True:
typ, s, i = self.lex.token
if typ == 'C': # a word and a colon
s = LEX_COLON(s).group(2) # extract word.
keywords += s + ':'
self.lex.Advance()
rargs.append(self.ParseRel())
else:
break
if len(rargs) > 1:
return PKeyword(rargs[0], keywords, rargs[1:])
else:
return rargs[0]
def ParseRel(self):
a = self.ParseAdd()
typ, s, i = self.lex.token
if typ == 'R':
op = s
self.lex.Advance()
b = self.ParseAdd()
return PRel(a, op, b)
return a
def ParseAdd(self):
a = self.ParseMul()
typ, s, i = self.lex.token
if typ == 'A':
op = s
self.lex.Advance()
b = self.ParseMul()
return PAdd(a, op, b)
return a
def ParseMul(self):
a = self.ParseUnary()
typ, s, i = self.lex.token
if typ == 'M':
op = s
self.lex.Advance()
b = self.ParseUnary()
return PMul(a, op, b)
return a
def ParseUnary(self):
a = self.ParsePrim()
typ, s, i = self.lex.token
while typ == 'W':
a = PUnary(a, s)
self.lex.Advance()
typ, s, i = self.lex.token
return a
for s in [
'4',
'4 - 6',
'sys print:( x square + y square ) sqrt ',
'sys print:( x square + y square ) sqrt on: stdout',
'2. 4. 6. 64 sqrt',
'(foo foo; bar bar; 2, 4, 6, 64 sqrt) len',
'(1,2,3),(4,5,6),(7,8,9)',
'(1,2,3)x,(4,5,6)y,(7,8,9)z',
'r, s, t = (1,2,3)x,(4,5,6)y,(7,8,9)z',
'z = IF(a<2)THEN(a+0)ELSE(demo recurse: a - 1)',
'a,b,c = FOR(i: words)MAP( IF(i<0)THEN(i neg) ELSE (i) )',
]:
print '<<< %s' % s
print '>>> %s' % str(Parser(s).Parse())
class LocalsVisitor(object):
def __init__(self):
self.locals = set()
def visitSeq(self, p):
for e in p.exprs:
e.visit(self)
def visitAssign(self, p):
p.expr.visit(self)
if isinstance(p.vars, list):
for v in p.vars:
self.locals.add(v.s)
elif isinstance(p.vars, PVar):
self.locals.add(p.vars.s)
else:
raise type(p)
def visitList(self, p):
for e in p.exprs:
e.visit(self)
def visitVar(self, p):
pass
def visitInt(self, p):
pass
def visitUnary(self, p):
p.r.visit(self)
def visitMul(self, p):
p.r.visit(self)
p.a.visit(self)
def visitAdd(self, p):
p.r.visit(self)
p.a.visit(self)
def visitRel(self, p):
p.r.visit(self)
p.a.visit(self)
def visitKeyword(self, p):
p.r.visit(self)
for e in p.args:
e.visit(self)
def visitMacro(self, p):
for v in p.vars:
self.locals.add(v)
for e in p.exprs:
e.visit(self)
class CompilerVisitor(object):
def __init__(self, top, cls):
self.top = top
self.cls = cls
self.explain = []
self.codes = []
self.slots = {}
self.flex = None
self.localindex = {}
for k,offset in self.cls.bslots:
# Strip prefix b_ from k.
self.slots[k[2:].upper()] = ('b', offset)
for k,offset in self.cls.pslots:
# Strip prefix p_ from k.
self.slots[k[2:].upper()] = ('p', offset)
for k,offset in self.cls.flexes:
# Like ('FLEX_BYTES', 2) or ('FLEX_PTRS', 2).
self.flex = (k, offset)
# Find all names assigned. Filter out the slots, to get locals.
# (This is like the python rule: instead of declaring locals, assign them.)
lv = LocalsVisitor()
top.visit(lv)
self.locals = sorted([e for e in lv.locals if e not in self.slots])
for i,var in zip(range(len(self.locals)), self.locals):
self.localindex[var] = i
def AddLocal(self, var):
i = len(self.locals)
self.localindex[var] = i
self.locals.append(var)
return i
def visitSeq(self, p):
last = p.exprs.pop()
for e in p.exprs:
e.visit(self)
self.codes.append('drop') # Drop middle results.
last.visit(self) # the last one returns the result.
def visitAssign(self, p):
p.expr.visit(self)
self.codes.append('dup') # one to assign and one for result.
if isinstance(p.vars, list):
raise 'TODO: list deconstruction'
var = p.vars.s
print 'visitAssign:', p, '|', p.vars, '|', p.expr
print 'self.slots,var:', self.slots, '|', var, type(var)
slot = self.slots.get(var)
if slot:
kind, offset = slot
if kind=='b':
self.codes.append('self')
self.codes.append('putb_b')
self.codes.append(offset)
elif kind=='p':
self.codes.append('self')
self.codes.append('putp_b')
self.codes.append(offset)
else:
raise 'bad'
else:
# Not a slot, so it should be a local var.
i = self.localindex[var]
if i<4 and False:
self.codes.append('sto%d' % i)
else:
self.codes.append('sto_b')
self.codes.append(i)
def visitList(self, p):
raise 'TODO'
def visitVar(self, p):
var = p.s
slot = self.slots.get(var)
cls = ClassDict.get(var)
if var in ['SELF','SUPER','TRUE','FALSE','NIL','A','B','C','D']:
self.codes.append(var)
elif slot:
kind, offset = slot
if kind=='b':
self.codes.append('self')
self.codes.append('getb_b')
self.codes.append(offset)
elif kind=='p':
self.codes.append('self')
self.codes.append('getp_b')
self.codes.append(offset)
else:
raise 'bad'
elif cls:
self.codes.append('cls_b')
self.codes.append(cls.b_this)
else:
i = self.localindex[var]
if i<4 and False:
self.codes.append('rcl%d' % i)
else:
self.codes.append('rcl_b')
self.codes.append(i)
def visitInt(self, p):
n = p.n
if -64 <= n < 64:
if n<0: n+=256
self.codes.append('lit_b')
self.codes.append(255&((n<<1)|1))
else:
if n<0: n+=65536
self.codes.append('lit_w')
self.codes.append(255&(n>>7))
self.codes.append(255&((n<<1)|1))
def visitUnary(self, p):
p.r.visit(self)
self.codes.append('call0_b')
self.codes.append(InternDict[p.meth])
def visitMul(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitAdd(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitRel(self, p):
p.a.visit(self)
p.r.visit(self)
self.codes.append('call1_b')
self.codes.append(InternDict[p.meth])
def visitKeyword(self, p):
args = p.args[:]
args.reverse()
for a in args:
a.visit(self)
p.r.visit(self)
self.codes.append('call%d_b' % len(args))
self.codes.append(InternDict[p.meth])
def visitMacro(self, p):
name = '_'.join(p.keywords)
macro = MACROS[name]
macro(self, p.vars, p.exprs)
_Serial = 0
def Serial():
global _Serial
_Serial += 1
return _Serial
def IfThenMacro(v, varz, exprs):
varz.append(None)
exprs.append(PVar('NIL'))
IfThenElseMacro(v, varz, exprs)
def IfThenElseMacro(v, varz, exprs):
assert all([var is None for var in varz])
mark1 = Serial()
mark2 = Serial()
exprs[0].visit(v)
v.codes.append('/bfalse/%d' % mark1)
exprs[1].visit(v)
v.codes.append('/jump/%d' % mark2)
v.codes.append('/mark/%d' % mark1)
exprs[2].visit(v)
v.codes.append('/mark/%d' % mark2)
def WhileDoMacro(v, varz, exprs):
assert all([var is None for var in varz])
mark1 = Serial()
mark2 = Serial()
v.codes.append('/mark/%d' % mark1)
exprs[0].visit(v)
v.codes.append('/bfalse/%d' % mark2)
exprs[1].visit(v)
v.codes.append('/jump/%d' % mark1)
v.codes.append('/mark/%d' % mark2)
def ForDoMacro(v, varz, exprs):
assert varz[0]
assert not varz[1]
# Create the local variable for Limit:
limit = '_tmp_%d' % Serial()
lim = v.AddLocal(limit.upper())
# Find the index variable.
ix = v.AddLocal(varz[0].upper())
# Store 0 in index.
v.codes.append('lit_b')
v.codes.append('1')
v.codes.append('sto_b')
v.codes.append(ix)
# Evaluate limit.
exprs[0].visit(v)
v.codes.append('sto_b')
v.codes.append(lim)
mark1 = Serial()
mark2 = Serial()
v.codes.append('/mark/%d' % mark1)
# Check for ix reached the limit.
v.codes.append('rcl_b')
v.codes.append(lim)
v.codes.append('rcl_b')
v.codes.append(ix)
v.codes.append('subtract')
v.codes.append('/bfalse/%d' % mark2)
exprs[1].visit(v)
v.codes.append('incr_local_b')
v.codes.append(ix)
v.codes.append('/jump/%d' % mark1)
v.codes.append('/mark/%d' % mark2)
MACROS = dict(
IF_THEN = IfThenMacro,
IF_THEN_ELSE = IfThenElseMacro,
WHILE_DO = WhileDoMacro,
FOR_DO = ForDoMacro,
)
def CompileToCodes(s, cls):
p = Parser(s).Parse()
v = CompilerVisitor(p, cls)
p.visit(v)
return v.codes, len(v.locals)
InternDict = {} # str to index.
def Intern(s):
n = InternDict.get(s)
if not n:
n = len(InternDict)
InternDict[s] = n
return n
Intern("") # Empty string is intern index 0.
CLASS_PATTERN = re.compile("^@([A-Za-z0-9_:]+)$").match
SYM_PATTERN = re.compile("^#([A-Za-z0-9_:]+)$").match
INT_PATTERN = re.compile("^-?[0-9]+$").match
MARK_PATTERN = re.compile("^/([a-z]+)/([0-9]+)$").match
def EvalInt(s):
z = 0
for ch in s:
i = ord(ch) - ord('0')
if 0 <= i and i <= 9:
z = 10*z + i
else:
raise Exception('Bad decimal digit in string: %s' % s)
return z
def Num2Oop(x):
z = (x << 1) | 1
if z > 0xFFFF:
raise Exception('Num2Oop too big: %d.' % x)
return z
# Nicknames are just for debugging the compiler.
Nick = 0
def GetNick():
global Nick
Nick+=1
return Nick
# All objects that need copying into Mem.
Here = 0
MemorableList = []
class Memorable(object):
def __init__(self):
self.nick = GetNick()
def Reify(self):
global Here
assert len(getattr(self, 'flexbytes', [])) == getattr(self, 'flexsize', 0)
self.basesize = self.BaseByteSize() # not including flex len and flex bytes.
self.size = self.basesize
fs = getattr(self, 'flexsize', None)
if fs is not None:
self.size += fs
if self.size & 1:
self.padded = 1
self.size += 1 # Final size must be even.
else:
self.padded = 0
if self.size < 4:
self.padded = 4 - self.size
self.size = 4
if self.size > 256:
raise Exception("Object size too big: %d: %s", self.size, vars(self))
self.addr = Here
Here += self.size
MemorableList.append(self)
def Materialize(self):
print 'Materialize:', self
print 'Materialize:', vars(self)
for k,v in vars(self).items():
if k.startswith('b_'):
k2 = 'B_' + k[2:]
v2 = getattr(self, k2)
print self, k, v, k2, v2
Memory[self.addr + v2] = v
if k.startswith('p_'):
k2 = 'P_' + k[2:]
v2 = getattr(self, k2)
print self, k, v, k2, v2
if isinstance(v, Memorable):
Memory[self.addr + v2] = Hi(v.addr)
Memory[self.addr + v2 + 1] = Lo(v.addr)
elif type(v) is int:
Memory[self.addr + v2] = Hi(v)
Memory[self.addr + v2 + 1] = Lo(v)
else:
raise Exception('Weird kind: %s' % type(v))
fb = getattr(self, 'flexbytes', None)
print ':fb:', self.basesize, fb, self
if fb is not None:
i = 0
for b in fb:
Memory[self.addr + self.basesize + i] = b
i += 1
for i in range(self.padded):
Memory[self.addr + self.size - 1 - i] = '^'
def BaseByteSize(self):
self.Bslots = [k for k in dir(self) if k.startswith('B_')]
self.Pslots = [k for k in dir(self) if k.startswith('P_')]
z = len(self.Bslots) + 2*len(self.Pslots)
assert z <= MAX_OBJ_SIZE
return z
def __str__(self):
if hasattr(self, 'addr'):
return '<%s:%s:%04x>' % (self.__class__.__name__, self.nick, self.addr)
else:
return '<%s:%s:????>' % (self.__class__.__name__, self.nick)
def __repr__(self):
return self.__str__()
class Ur(Memorable): # Proto Object (for proxies).
B_gcsize = 0 # Garbage Collection size, for allocator and collector.
B_cls = 1 # Class Number.
class Obj(Ur): # Smalltalk root Object.
pass
class Num(Obj):
pass
class Int(Num): # Small 15-bit signed integer. Encoded in an oop with low bit set.
pass
class Addr(Num): # 16-bit unsigned integer.
B_hi = 2
B_lo = 3
class NilT(Obj): # class of nil
pass
class Bool(Obj):
pass
class TrueT(Bool): # class of true
pass
class FalseT(Bool): # class of false
pass
class Arr(Obj): # LowLevel: Flexible-length abstract object.
pass
class ArrByt(Arr): # LowLevel: Flexible-length bytes storage.
FLEX_BYTES = 2
class ArrPtr(Arr): # LowLevel: Flexible-length oops storage.
FLEX_PTRS = 2
class Tuple(ArrPtr): # Tuple: uses fixed FlexP.
pass
class Slice(Obj): # LowLevel: Slice of a Flx.
B_begin = 2
B_len = 3
B_intern = 4 # used in Sym for intern number.
P_guts = 5
class Vec(Slice): # Vector of Pointers.
pass
class Buf(Slice): # Buffer of Bytes.
pass
class Str(Buf):
pass
class Sym(Str):
pass
class Err(Buf):
pass
class Named(Obj): # Object with interned name.
B_name = 2 # Symbol index.
# The only linked lists we need is methods in a class, so no Link List class.
K_FLEX_BYTES = 1
K_FLEX_PTRS = 2
class Cls(Obj):
B_flags = 2 # e.g. K_FLEX_BYTES, K_FLEX_PTRS
B_bSize = 3 # Base size of instance in bytes.
B_numB = 4 # Number of byte slots.
B_numP = 5 # Number of Oop slots.
B_this = 6 # This class index.
B_partner = 7 # class to meta; meta to class.
P_sup = 8 # Superclass, by Pointer, for faster method dispatch.
P_meths = 10 # Head of linked list of meths.
FLEX_BYTES = 12 # For the name of the class, so it does not have to be interned.
class Metacls(Cls):
pass
class Meth(Named):
B_owner = 3 # Owning Class.
B_numL = 4 # num Locals.
P_next = 5 # Linked list of methods on this class.
FLEX_BYTES = 7
# Since bytecodes are built-in flex bytes, if you recompile, you may have to
# replace (and re-linked-list) the entire Meth object.
# Limit of about 250 bytecodes.
class Demo(Obj):
B_one = 2 # two byte fields: one, two.
B_two = 3
P_three = 4 # two oop fields: three, four.
P_four = 6
#### Stack layout
## [
## args
## 10 ]
## 8 Receiver
## 6 Selector Sym|Method, debug.
## 4 DE, argcount, debug.
## 2 ReturnPC
## 0 ReturnFP <--- fp
## -2 [
## locals
## ] <--- sp?
# Offests from Frame pointer.
K_ARG4 = 16
K_ARG3 = 14
K_ARG2 = 12
K_ARG1 = 10
K_RCVR = 8 # conceptually, RCVR is like ARG0.
K_MSG = 6 # Needed for a debugger interpreting a stack.
K_DE = 4 # Could omit this. It makes viewing stacks easier.
K_RET_PC = 2
K_RET_FP = 0
K_LCL0 = -2
K_LCL1 = -4
K_LCL2 = -6
K_LCL3 = -8
Method['URCLS']['new'] = '''C
word rcvr = W(fp + K_RCVR);
fprintf(stderr, "URCLS::new -- rcvr=%04x\\n", rcvr);
word z = MakeInstance(rcvr, 0);
Inspect(z, "URCLS new.");
PUSH(z);
'''
Method['URCLS']['new:'] = '''C
word rcvr = W(fp + K_RCVR);
word n = W(fp + K_ARG1);
fprintf(stderr, "URCLS::new:a -- rcvr=$%04x a==$%x=%d.\\n", rcvr, n, n);
word z = MakeInstance(rcvr, OOP2NUM(n));
Hex20("URCLS new: -->", n, z);
Inspect(z, "URCLS new:.");
PUSH(z);
'''
Method['UR']['same'] = 'B self a same'
Method['UR']['=='] = 'B self a same'
Method['UR']['must'] = 'B self must self' # Most objects are true.
Method['UR']['not'] = 'B self not' # Most objects are true.
Method['UR']['bytlen'] = 'B self bytlen'
Method['UR']['ptrlen'] = 'B self ptrlen'
Method['UR']['bytat:'] = 'B a self bytat'
Method['UR']['ptrat:'] = 'B a self ptrat'
Method['UR']['bytat:put:'] = 'B b a self bytatput nil'
Method['UR']['ptrat:put:'] = 'B b a self ptratput nil'
Method['ARRBYT']['len'] = 'B self bytlen'
Method['ARRBYT']['at:'] = 'B a self bytat'
Method['ARRBYT']['at:put:'] = 'B b self a bytatput nil'
Method['ARRPTR']['len'] = 'B self ptrlen'
Method['ARRPTR']['at:'] = 'B a self ptrat'
Method['ARRPTR']['at:put:'] = 'B b self a ptratput nil'
Method['DEMO']['run'] = '''B
lit_b 51 self #double: lit_w %d %d call dup show
lit_b 51 self #twice: lit_w %d %d call dup show
add dup show
''' % (0xDE, 0x01, 0xDE, 0x01)
Method['DEMO']['run2setup'] = '''T
acct = Demo new init.
acct balance show.
acct deposit: 10.
acct balance show.
acct deposit: 100.
acct balance show.
acct withdraw: 20.
acct balance show.
'''
Method['DEMO']['RUN2'] = '''T
self run2setup.
IF( 5 )THEN( 5 show ).
IF( true )THEN( 42 show )ELSE( 666 show ).
n = 3.
WHILE( n )DO( n show. n = n - 1. ).
FOR( i : 5 )DO( i show ).
p = ArrByt new: 5.
FOR( i : 5 )DO( p bytAt: i put: 10 + i ).
FOR( i : 5 )DO( (p bytAt: i) show ).
FOR( i : 5 )DO( ((p bytAt: i) == i) must ).
p bytLen show.
'''
Method['DEMO']['double:'] = 'B arg1 arg1 add ' # Using Bytecodes.
Method['DEMO']['twice:'] = 'T a + a ' # Using TerseTalk.
Method['DEMO']['init'] = 'T one = 0. self'
Method['DEMO']['deposit:'] = 'T one = one + a. nil'
Method['DEMO']['withdraw:'] = 'T one = one - a. nil'
Method['DEMO']['balance'] = 'T one'
Method['INT']['+'] = 'B self arg1 add '
Method['INT']['-'] = 'B self arg1 subtract '
Method['INT']['show'] = 'B self show self'
Op['stop'] = ' goto STOP; '
Op['self'] = ' PUSH(W(fp+K_RCVR));'
Op['arg1'] = ' PUSH(W(fp+K_ARG1));'
Op['arg2'] = ' PUSH(W(fp+K_ARG2));'
Op['arg3'] = ' PUSH(W(fp+K_ARG3));'
Op['a'] = ' PUSH(W(fp+K_ARG1));'
Op['b'] = ' PUSH(W(fp+K_ARG2));'
Op['c'] = ' PUSH(W(fp+K_ARG3));'
Op['d'] = ' PUSH(W(fp+K_ARG4));'
Op['cls_b'] = ' byte n = BYTE(pc); pc += 1; PUSH(ClassVec[n]); '
Op['clsof'] = ' word x = PEEK(0); POKE(0, CLASSOF(x)); '
Op['same'] = ' word x = POP(); word y = PEEK(0); POKE(0, (x==y));'
Op['bytlen'] = ''' // p -> len
word p = PEEK(0);
POKE(0, NUM2OOP(BytLen(p)));
'''
Op['ptrlen'] = ''' // p -> len
word p = PEEK(0);
POKE(0, NUM2OOP(PtrLen(p)));
'''
Op['bytat'] = ''' // p i -> b
word p = POP();
word i = OOP2NUM(PEEK(0));
POKE(0, NUM2OOP(BytAt(p, i)));
'''
Op['ptrat'] = '''
word p = POP();
word i = OOP2NUM(PEEK(0));
POKE(0, PtrAt(p, i));
'''
Op['bytatput'] = '''
word p = PEEK(0);
word i = OOP2NUM(PEEK(2));
word v = PEEK(4);
BytAtPut(p, i, v);
sp += 6;
'''
Op['ptratput'] = '''
word p = PEEK(0);
word i = OOP2NUM(PEEK(2));
word v = PEEK(4);
PtrAtPut(p, i, v);
sp += 6;
'''
Op['forward_jump_b'] = '''
byte n = BYTE(pc); pc += 1;
pc += n;
'''
Op['reverse_jump_b'] = '''
byte n = BYTE(pc); pc += 1;
pc -= n;
'''
Op['forward_bfalse_b'] = '''
byte n = BYTE(pc); pc += 1;
word x = POP();
if (!Truth(x)) {
pc += n;
}
'''
Op['reverse_bfalse_b'] = '''
byte n = BYTE(pc); pc += 1;
word x = POP();
if (!Truth(x)) {
pc -= n;
}
'''
# Get/Put pointer slots.
Op['getp_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = PEEK(0);
word x = W(obj + n);
POKE(0, x);
'''
Op['putp_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = POP();
word x = POP();
PUT_WORD(obj+n, x);
'''
# Get/Put byte slots: values are unsigned integers in 0..255.
Op['getb_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = PEEK(0);
byte x = B(obj + n);
POKE(0, NUM2OOP(x));
'''
Op['putb_b'] = '''
byte n = BYTE(pc); pc += 1;
word obj = POP();
word x = POP();
CHECK3(x&1, 1, x);
CHECK3(x&0xFE00, 0, x);
PUT_BYTE(obj+n, OOP2BYTE(x));
'''
# Store/Recall local variables.
Op['sto0'] = ' word w = POP(); PUT_WORD(fp-2, w);'
Op['sto1'] = ' word w = POP(); PUT_WORD(fp-4, w);'
Op['sto2'] = ' word w = POP(); PUT_WORD(fp-6, w);'
Op['sto3'] = ' word w = POP(); PUT_WORD(fp-8, w);'
Op['sto_b'] = ' byte n = BYTE(pc); pc += 1; word w = POP(); PUT_WORD(fp-2*(n+1), w);'
Op['rcl0'] = ' word w = W(fp-2); PUSH(w);'
Op['rcl1'] = ' word w = W(fp-4); PUSH(w);'
Op['rcl2'] = ' word w = W(fp-6); PUSH(w);'
Op['rcl3'] = ' word w = W(fp-8); PUSH(w);'
Op['rcl_b'] = ' byte n = BYTE(pc); pc += 1; word w = W(fp-2*(n+1)); PUSH(w);'
Op['incr_local_b'] = 'byte n = BYTE(pc); pc += 1; word p = fp-2*(n+1); word w = W(p); PUT_WORD(p, w+2);'
Op['true'] = ' PUSH(trueAddr);'
Op['false'] = ' PUSH(falseAddr);'
Op['nil'] = ' PUSH(nilAddr);'
Op['show'] = ' word w = POP(); printf(" ==$%04x=%u.==\\n", w, w); fflush(stdout); '
Op['lit2pcr'] = '''
PUSH(WORD(pc) - pc);
pc += 2;
'''
Op['lit1pcr'] = '''
byte n = BYTE(pc);
word w = ((word)n) | ((n & 128) ? 0xFF00U : 0x0000U); // SEX.
PUSH(w - pc);
pc += 1;
'''
Op['lit_w'] = '''
PUSH(WORD(pc));
pc += 2;
'''
Op['lit_b'] = '''
byte n = BYTE(pc);
word w = (0x80&n) ? (0xFF80 | (word)n) : (word)n; // SEX.
PUSH(w);
pc += 1;
'''
Op['sym_b'] = '''
byte n = BYTE(pc);
PUSH(SymVec[n]);
pc += 1;
'''
Op['get'] = '''
word off = PEEK(0);
POKE(0, WORD(fp+off));
'''
Op['drop'] = '''
sp += 2;
'''
Op['dup'] = '''
word top = PEEK(0);
PUSH(top);
'''
Op['lognum'] = '''
word id = POP();
word value = POP();
fprintf(stderr, "%04x:=:%04x ", id, value);
'''
Op['must'] = '''
word x = POP();
CHECK3(Truth(x), 1, pc);
'''
Op['not'] = '''
word x = PEEK(0);
POKE(0, Truth(x) ? trueAddr : falseAddr);
'''
Op['add'] = '''
word a = POP();
CHECK3(a&1, 1, a);
word b = PEEK(0);
CHECK3(b&1, 1, b);
POKE(0, (0xFFFE & a)+b);
'''
Op['subtract'] = '''
word a = POP();
CHECK3(a&1, 1, a);
word b = PEEK(0);
CHECK3(b&1, 1, b);
word nega = (~a) + 2;
fprintf(stderr, "subtract a=%04x b=%04x nega=%04x z=%04x\\n", a, b, nega, b+nega);
POKE(0, b+nega);
'''
Op['call0_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE00);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
byte numL = B(meth + METH_B_numL);
for (i=0; i<numL; i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call1_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE01);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
for (i=0; i<B(meth + METH_B_numL); i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call2_b'] = '''
byte msg = BYTE(pc);
pc += 1;
word rcvr = PEEK(0);
PUSH(msg);
PUSH(0xDE02); // This is all that changes..... TODO
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
byte i;
for (i=0; i<B(meth + METH_B_numL); i++) {
PUSH(nilAddr);
}
pc = meth + METH_FLEXSIZE;
PrintWhere();
'''
Op['call'] = '''
word rcvr = PEEK(4);
//Hex20("call--rcvr", rcvr, rcvr);
Inspect(rcvr, "call--rcvr");
word msg = PEEK(2);
//Hex20("call--msg", msg, -1);
PrintSymNum(msg, "call--msg");
Inspect(SymVec[msg], "call--msg");
word de = PEEK(0);
Hex20("call--de", de, -1);
CHECK3(de & 0xFFF0, 0xDE00, de);
PUSH(pc);
PUSH(fp);
fprintf(stderr, "Old FP = $%04x\\n", fp);
fp = sp;
fprintf(stderr, "New FP = $%04x\\n", fp);
Hex20("STACK fp,pc,de,msg,rcvr...", sp, sp);
word meth = FindMethBySymbolNumber(rcvr, msg);
Inspect(meth, "call--meth");
byte i;
byte num_locals = BF(meth, METH_B_numL);
fprintf(stderr, "Num Locals = %d\\n", num_locals);
for (i=0; i<num_locals; i++) {
PUSH(nilAddr);
}
if (BytLen(meth)) {
pc = FlexAddrAt(meth, 0);
}
PrintWhere();
'''
Op['return'] = '''
word result = PEEK(0);
sp = fp;
fp = POP();
fprintf(stderr, "Popped FP = $%04x\\n", fp);
if (!fp) {
fprintf(stderr, "Finishing with Zero FP.\\n");
goto STOP;
}
pc = POP();
if (!pc) {
fprintf(stderr, "Finishing with Zero PC.\\n");
goto STOP;
}
fprintf(stderr, "Popped PC = $%04x\\n", pc);
word nargs = POP();
fprintf(stderr, "Peeked nargs = $%04x\\n", nargs);
nargs &= 255;
sp += 2 * (nargs + 2 /* msg, rcvr*/ );
PUSH(result);
PrintWhere();
'''
def AllClassesPreorder(start=Ur):
z = [start]
for sub in sorted(start.__subclasses__(), key=lambda c: c.__name__):
z += AllClassesPreorder(sub)
return z
# Gather all Konstants.
Konsts = {}
for k, v in globals().items():
if k.startswith('K_'):
Konsts[k] = v
# First create NIL, FALSE, TRUE instances, in that order.
NIL, FALSE, TRUE = NilT(), FalseT(), TrueT()
NIL.Reify(), FALSE.Reify(), TRUE.Reify()
def FixSlotsOnClass(c, inst):
print 'FNORD', inst.nick, dir(c)
# Check the bslots, pslots, & flexes; compute b_flags, b_bSize, b_numB, b_numP
bslots = [(k, getattr(c, k)) for k in dir(c) if k.startswith('B_')]
pslots = [(k, getattr(c, k)) for k in dir(c) if k.startswith('P_')]
flexes = [(k, getattr(c, k)) for k in dir(c) if k.startswith('FLEX_')]
bslots = sorted(bslots, key=lambda pair: pair[1])
pslots = sorted(pslots, key=lambda pair: pair[1])
print 'cBPF', c, bslots, pslots, flexes
for i, (k, v) in zip(range(len(bslots)), bslots):
if i != v: raise Exception("Bad B_ numbers in class %s: %s" % (c, bslots))
for i, (k, v) in zip(range(len(pslots)), pslots):
if len(bslots)+2*i != v: raise Exception("Bad P_ numbers in class %s: %s" % (c, pslots))
inst.b_numB = len(bslots)
inst.b_numP = len(pslots)
inst.b_bSize = inst.b_numB + 2*inst.b_numP
if flexes:
assert len(flexes) == 1 # ThereCanOnlyBeOne
if flexes[0][0]=='FLEX_BYTES':
inst.b_flags = K_FLEX_BYTES
elif flexes[0][0]=='FLEX_PTRS':
inst.b_flags = K_FLEX_PTRS
else:
raise Exception('Bad FLEX records in class %s: %s' % (c, flexes))
else:
inst.b_flags = 0
inst.bslots = bslots
inst.pslots = pslots
inst.flexes = flexes
# Create class objects.
ClassDict = {}
for c in AllClassesPreorder():
inst = Cls()
inst.pycls = c
inst.nick = c.__name__
inst.name = c.__name__.upper()
inst.flexstring = inst.name
inst.flexsize = len(inst.name)
inst.flexbytes = [ord(s) for s in inst.name]
inst.b_this = len(ClassDict) + 1 # Skip the 0 class, meaning unused memory.
ClassVec[inst.b_this] = inst
inst.sup = None if c is Ur else c.__bases__[0]
inst.p_sup = NIL if c is Ur else ClassDict[c.__bases__[0].__name__.upper()]
inst.p_meths = NIL
inst.Reify()
ClassDict[inst.name] = inst
FixSlotsOnClass(c, inst)
def WriteInspectors():
for cname, c in sorted(ClassDict.items()):
print 'struct FieldInfo FI_%s[] = {' % (
c.name)
for bs in c.bslots:
fname, foff = bs
print ' { "%s", 1, %d }, ' % (fname, foff)
for ps in c.pslots:
fname, foff = ps
print ' { "%s", 2, %d }, ' % (fname, foff)
print ' { NULL, 0, 0 }'
print '};'
print '''
struct ClassInfo CI_%s = {
"%s",
%d,
FI_%s};
''' % (c.name, c.name, c.b_this, c.name)
print 'void InitInfo() {'
for cname, c in sorted(ClassDict.items()):
print ' ClassInfos[%d] = &CI_%s;' % (
c.b_this, c.name)
print '}'
# Create metaclass objects.
METACLS = ClassDict['METACLS']
for c in AllClassesPreorder():
meta = Metacls()
meta.nick = c.__name__ + 'ClS'
meta.name = c.__name__.upper() + 'CLS'
if True:
meta.flexstring = meta.name
meta.flexsize = len(meta.name)
meta.flexbytes = [ord(s) for s in meta.name]
else:
meta.flexstring = ''
meta.flexsize = 0
meta.flexbytes = []
meta.b_this = len(ClassDict) + 1 # Skip the 0 class, meaning unused.
ClassVec[meta.b_this] = meta
meta.p_sup = ClassDict['CLS'] if c is Ur else ClassDict[c.__bases__[0].__name__.upper() + 'CLS']
meta.sup = meta.p_sup
meta.p_meths = NIL
meta.Reify()
ClassDict[meta.name] = meta
FixSlotsOnClass(METACLS, meta)
# Link metaclass class objects.
for c in AllClassesPreorder():
meta = ClassDict[c.__name__.upper() + 'CLS']
meta.b_cls = METACLS.b_this
inst = ClassDict[c.__name__.upper()]
inst.b_cls = meta.b_this
inst.b_partner = meta.b_this
meta.b_partner = inst.b_this
#### Compile methods and intern symbols.
Op = dict([(k.upper(), v) for k,v in Op.items()]) # Normalize keys upper.
Method = dict([(k.upper(), v) for k,v in Method.items()]) # Normalize keys upper.
for k,v in Method.items(): # Also normalize inner keys (method names).
for k2 in v:
Intern(k2.upper())
Method[k] = dict([(k2.upper(), v2) for k2,v2 in v.items()])
print '=== Op:', repr(Op)
print '=== Method:', repr(Method)
for cname, m in sorted(Method.items()):
for mname, v in sorted(m.items()):
Intern(mname.upper())
OpList = ['STOP'] + sorted([k.upper() for k in Op if k != 'STOP'])
for i, op in zip(range(len(OpList)), OpList):
OpNums[op] = i
print '=== OpNums:', repr(OpNums)
def CompileMethod(cname, mname, v):
numL = 2
if v[0] == 'T':
codes, numL = CompileToCodes(v[1:], ClassDict[cname])
# Change to format 'B' for text bytecode string.
v = 'B ' + ' '.join([str(c) for c in codes])
if v[0] == 'C':
# Create an opcode for the C code.
opname = ('%s_%s_c' % (cname, mname)).upper().replace(':', '_')
Op[opname] = v[1:]
opnum = len(OpList)
OpList.append(opname)
OpNums[opname] = opnum
# Now pretend it was a B definition, to call the new opcode.
v = 'B %s' % opname
if v[0] != 'B':
raise Exception('Only Bytecode (B) supported: %s' % repr(v))
v = v[1:]
explain, codes = [], []
ww = v.split()
print 'Compiling (%s %s): %s' % (cname, mname, ww)
Marks = {}
Fixes = []
for w in ww:
if INT_PATTERN(w):
explain.append(EvalInt(w))
codes.append(EvalInt(w))
elif SYM_PATTERN(w):
num = Intern(SYM_PATTERN(w).group(1).upper())
explain.append('lit_b')
explain.append(num)
codes.append(OpNums['LIT_B'])
codes.append(num)
elif CLASS_PATTERN(w):
cn = CLASS_PATTERN(w).group(1).upper()
c = ClassDict.get(cn)
explain.append('CLASS_B')
explain.append(c.b_this)
codes.append(OpNums['CLASS_B'])
codes.append(c.b_this)
elif MARK_PATTERN(w):
verb, target = MARK_PATTERN(w).groups()
target = int(target)
if verb == 'mark':
Marks[target] = len(codes)
elif verb == 'jump':
Fixes.append((target, len(codes)))
codes.append('jump_b')
codes.append(0)
elif verb == 'bfalse':
Fixes.append((target, len(codes)))
codes.append('bfalse_b')
codes.append(0)
else:
raise 'bad'
else:
num = OpNums.get(w.upper())
if num is None:
raise Exception('No such opcode: [%s %s]: %s: %s' % (cname, mname, w, repr(v)))
explain.append(w)
codes.append(OpNums[w.upper()])
explain.append('RETURN');
codes.append(OpNums['RETURN']);
for (mark, loc) in Fixes:
target = Marks[mark]
if target < loc:
codes[loc] = OpNums[('reverse_' + codes[loc]).upper()]
codes[loc+1] = loc + 2 - target
else:
codes[loc] = OpNums[('forward_' + codes[loc]).upper()]
codes[loc+1] = target - loc - 2
print 'CompileMethod: %s %s: %s' % (cname, mname, explain)
print 'CompileMethod: %s %s: %s' % (cname, mname, codes)
return explain, codes, numL
CompiledMethods = {}
for cname, m in sorted(Method.items()):
cname = cname.upper();
print 'CNAME: %s METHODS: %s' % (cname, m)
for mname, v in sorted(m.items()):
mname = mname.upper();
explain, codes, numL = CompileMethod(cname, mname, v)
CompiledMethods[(cname, mname)] = (codes, numL)
for (cname,mname),(codes,numL) in sorted(CompiledMethods.items()):
meth = Meth()
cls = ClassDict[cname]
meth.b_cls = ClassDict['METH'].b_this
meth.b_name = Intern(mname.upper())
meth.b_owner = cls.b_this
meth.b_numL = numL
meth.p_next = cls.p_meths # prepend to linked list.
cls.p_meths = meth
meth.flexsize = len(codes)
meth.flexbytes = codes
meth.Reify()
# Prepare packed strings with all interned symbols.
InternLoc = {}
InternSym = {}
PackedStrings = ['']
for (k, v) in InternDict.items():
s = PackedStrings[-1]
if len(s) + len(k) > MAX_FLEX_BYTES:
s = ''
PackedStrings.append(s)
InternLoc[k] = (len(PackedStrings), len(s))
s += k
PackedStrings[-1] = s # Put the new string back.
# Reify the interned symbols.
for (k, v) in InternDict.items():
sym = Sym()
sym.b_intern = v
sym.Reify()
SymVec[v] = sym
InternSym[k] = sym
# Reify the packed strings.
PackedList = []
for ps in PackedStrings:
po = ArrByt()
po.flexstring = ps
po.flexsize = len(ps)
po.flexbytes = [ord(s) for s in ps]
assert len(po.flexbytes) == po.flexsize
po.Reify()
print 'PackedString:', po.nick, po.addr, po.basesize, po.flexsize, po.flexbytes
PackedList.append(po)
# Fill in symbol fields.
for (k, v) in InternSym.items():
v.str = k
packNum, offset = InternLoc[k]
v.b_begin = offset
v.b_len = len(k)
v.p_guts = PackedList[packNum-1].addr
for m in MemorableList:
m.b_gcsize = ((m.size-2)>>1, m.nick, m.addr, m.basesize, m.__class__.__name__)
assert m.b_gcsize[0] > 0, vars(m)
assert m.b_gcsize[0] < 128, vars(m)
m.CLS = ClassDict[m.__class__.__name__.upper()]
if type(m) is not Cls:
# Do not change classes, which are already correct.
m.b_cls = m.CLS.b_this
m.Materialize()
pass;pass;pass
def GenerateH():
print '''/* This is Generated Code */
#ifndef TERSETALK9_GENERATED_H_
#define TERSETALK9_GENERATED_H_
#include "vm.h"
'''
for k,v in globals().items():
if k.startswith('K_'):
print '#define %s %s' % (k, v)
for op in OpList:
print '#define OP_%-17s %d' % (op, OpNums[op])
print
for c in ClassVec[1:]:
if not c: continue
i = 0
for e, off in c.bslots:
assert i == off
print ' #define %s_%s %d' % (c.name, e, off)
i += 1
for e, off in c.pslots:
assert i == off
print ' #define %s_%s %d' % (c.name, e, off)
i += 2
if c.flexes:
print ' #define %s_FLEXSIZE %d' % (c.name, c.flexes[0][1])
i += 1
print
print '#endif'
def GenerateC():
print '''/* This is Generated Code */
#include "vm.h"
#include "_generated.h"
'''
print '''
#ifdef DEBUG
char* OpNames[] = {
'''
for e in OpList:
print ' "%s",' % e
print '''
NULL,
};
#endif
'''
print '''
void Boot() {
'''
print ' nilAddr = 0x%04x;' % NIL.addr
print ' falseAddr = 0x%04x;' % FALSE.addr
print ' trueAddr = 0x%04x;' % TRUE.addr
print ' intAddr = 0x%04x;' % ClassDict['INT'].addr
print ' clsAddr = 0x%04x;' % ClassDict['CLS'].addr
print '''
}
'''
print '''
void Loop() {
while (1) {
#ifdef DEBUG
Hex20("pc", pc, pc);
Hex20("fp", fp, fp);
Hex20("sp", sp, sp);
#endif
byte opcode = BYTE(pc);
++pc;
#ifdef DEBUG
fprintf(stderr, "Step: opcode: $%02x=%d.=%s\\n", opcode, opcode, OpNames[opcode]);
#endif
switch (opcode) {
'''
for op in OpList:
print '\tcase OP_%s: {' % op
if op.endswith('_B'):
print '\t fprintf(stderr, "OP: %s $%%02x=%%d.\\n", B(pc), B(pc));' % op
elif op.endswith('_W'):
print '\t fprintf(stderr, "OP: %s $%%04x=%%d.\\n", W(pc), W(pc));' % op
else:
print '\t fprintf(stderr, "OP: %s\\n");' % op
for k,v in Op.items():
done = False
if k.upper() == op:
if done:
raise Exception('already done Op[%s]' % op)
for s in v.split('\n'):
print '\t\t%s' % s
done = True
print '\t}'
print '\tbreak;'
print
print '''
}
}
STOP:
return;
}
'''
def GenerateImage():
def w(x): sys.stdout.write(x)
def w2(x): w('%c%c' % (Hi(x), Lo(x)))
def w1(x): w('%c' % x)
# Terse talk version number 1
w('T/'); w2(2); w2(1)
# Class vector
n = len(ClassDict)
w('C/'); w2(n*2)
for c in ClassVec[:n]:
w2(0 if c==0 else c.addr) # Initial 0 for unused mem.
# Intern Symbol vector
n = len(InternDict)
w('S/'); w2(n*2)
for y in SymVec[:n]:
w2(y.addr)
# Memory bytes
n = Here
w('M/'); w2(n)
for x in Memory[:n]:
if x == '^': w1(0) # padding
#elif x == '#': raise 'bad' # w1(0) # unused part of page
#elif x == '@': raise 'bad' # w1(0) # last in page
elif type(x) is tuple: w1(x[0])
elif type(x) is int: w1(x)
else: raise Exception('weird memory: %s' % x)
# End with a zero-length segment with '!/' name.
w('!/'); w2(0);
pass
print dir(TRUE)
print vars(TRUE)
print
for m in MemorableList:
print 'M:', m, vars(m)
print
for name, cls in ClassDict.items():
print name, cls, (cls.__class__.__bases__)
print
print 'SymVec:', SymVec
print
print 'InternSym:', len(InternSym), InternSym
print
print 'InternDict:', len(InternDict), InternDict
print
print 'ClassDict:', len(ClassDict), ClassDict
print
print 'ClassVec:', ClassVec
print
print 'OpList:', OpList
print
print 'Memory:', Here, Memory[:Here]
print
for resource, used, maximum in [
('Memory', Here, 65536),
('Classes', len(ClassDict), 255),
('Symbols', len(InternDict), 256),
('Opcodes', len(OpNums), 256),
]:
print '%10s %5.2f%% %6d/%6d full' % (resource, 100.0 * used / maximum, used, maximum)
sys.stdout = open('_generated.h', 'w')
GenerateH()
sys.stdout.close()
sys.stdout = open('_generated.c', 'w')
GenerateC()
WriteInspectors()
sys.stdout.close()
sys.stdout = open('_generated.image', 'wb')
GenerateImage()
sys.stdout.close()
pass
|
python
|
"""
This file contains the full ImageFeaturizer class, which allows users to upload
an image directory, a csv containing a list of image URLs, or a directory with a
csv containing names of images in the directory.
It featurizes the images using pretrained, decapitated InceptionV3 model, and
saves the featurized data to a csv, as well as within the ImageFeaturizer class
itself. This allows data scientists to easily analyze image data using simpler models.
Functionality:
1. Build the featurizer model. The class initializer ImageFeaturizer() takes as input:
depth : int
1, 2, 3, or 4, depending on how far down you want to sample the featurizer layer
autosample : bool
a boolean flag signalling automatic downsampling
downsample_size : int
desired number of features to downsample the final layer to. Must be an
integer divisor of the number of features in the layer.
2. Load the data. The self.load_data() function takes as input:
image_columns : str
the name of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank.
If csv exists, this is the path where the featurized csv will be
generated.
scaled_size : tuple
The size that the images get scaled to. Default is (299, 299)
grayscale : bool
Decides if image is grayscale or not. May get deprecated. Don't
think it works on the InceptionV3 model due to input size.
3. Featurize the data. The self.featurize_preloaded_data() function takes no input, and
featurizes the loaded data, writing the new csvs to the same path as the loaded csv
Also adds a binary "image_missing" column automatically, for any images that are missing
from the image list.
3a. Users can also load and featurize the data in one pass, with the
self.featurize_data function, which takes the same input as the
load_data function and performs the featurization automatically.
"""
import logging
import os
import math
import time
import numpy as np
import trafaret as t
import pandas as pd
from .build_featurizer import build_featurizer, supported_model_types
from .feature_preprocessing import preprocess_data, _image_paths_finder
from .data_featurizing import featurize_data, create_features
logger = logging.getLogger(__name__)
SIZE_DICT = {'squeezenet': (227, 227), 'vgg16': (224, 224), 'vgg19': (224, 224),
'resnet50': (224, 224), 'inceptionv3': (299, 299), 'xception': (299, 299)}
DEFAULT_NEW_CSV_PATH = '{}{}'.format(os.path.expanduser('~'), '/Downloads/images.csv')
class ImageFeaturizer:
"""
This object can load images, rescale, crop, and vectorize them into a
uniform batch, and then featurize the images for use with custom classifiers.
Methods
------------------
__init__(depth, autosample,
downsample_size):
--------------------------------
Initialize the ImageFeaturizer. Build the featurizer model with the
depth and feature downsampling specified by the inputs.
featurize_data(image_columns, image_path,
csv_path, new_csv_path, scaled_size, grayscale):
--------------------------------
Loads image directory and/or csv into the model, and
featurizes the images
load_data(image_columns, image_path, csv_path,
scaled_size, grayscale):
--------------------------------
Loads image directory and/or csv into the model, and vectorize the
images for input into the featurizer
featurize_preloaded_data():
--------------------------------
Featurize the loaded data, append the features to the csv, and
return the full dataframe
"""
@t.guard(depth=t.Int(gte=1, lte=4),
autosample=t.Bool,
downsample_size=t.Int(gte=0),
model=t.Enum(*supported_model_types.keys()))
def __init__(self,
depth=1,
autosample=False,
downsample_size=0,
model='squeezenet'
):
"""
Initializer.
Loads an initial InceptionV3 pretrained network, decapitates it and
downsamples according to user specifications.
Parameters:
----------
depth : int
How deep to decapitate the model. Deeper means less specific but
also less complex
autosample : bool
If True, feature layer is automatically downsampled to the right size.
downsample_size: int
The number of features to downsample the featurizer to
Returns:
--------
None. Initializes and saves the featurizer object attributes.
"""
# BUILDING THE MODEL #
logging.info("Building the featurizer.")
featurizer = build_featurizer(depth, autosample,
downsample_size, model_str=model.lower())
# Saving initializations of model
self.depth = depth
self.autosample = autosample
self.downsample_size = downsample_size
self.num_features = featurizer.layers[-1].output_shape[-1]
# Save the model
self.model_name = model.lower()
self.featurizer = featurizer
self.visualize = featurizer.summary
# Initializing preprocessing variables for after we load and featurize the images
self.data = np.zeros((1))
self.features = pd.DataFrame()
self.df_original = pd.DataFrame()
self.full_dataframe = pd.DataFrame()
self.df_features = pd.DataFrame()
self.csv_path = ''
self.image_dict = {}
self.image_columns = ''
self.image_path = ''
# Image scaling and cropping
self.scaled_size = (0, 0)
self.crop_size = (0, 0)
self.number_crops = 0
self.isotropic_scaling = False
def load_data(self,
image_columns,
image_path='',
image_dict='',
csv_path='',
grayscale=False,
save_data=True,
# crop_size = (299, 299),
# number_crops = 0,
# random_crop = False,
# isotropic_scaling = True
):
"""
Load image directory and/or csv, and vectorize the images for input into the featurizer.
Parameters:
----------
image_columns : str
the name of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank.
If csv exists, this is the path where the featurized csv will be
generated.
# These features haven't been implemented yet.
# grayscale : bool
# Flags the image as grayscale
#
# isotropic_scaling : bool
# If True, images are scaled keeping proportions and then cropped
#
# crop_size: tuple
# If the image gets cropped, decides the size of the crop
#
# random_crop: bool
# If False, only take the center crop. If True, take random crop
#
"""
# Fix column headers and image path if they haven't been done, build path for new csv
image_columns, image_path = _input_fixer(image_columns, image_path)
# If there's no dataframe, build it!
if csv_path == '':
if len(image_columns) > 1:
raise ValueError('If building the dataframe from an image directory, the featurizer'
'can only create a single image column. If two image columns are '
'needed, please create a csv to pass in.')
# If the image_dict hasn't been passed in (which only happens in batch processing),
# build the full image dict and save the original dataframe
if not image_dict:
image_dict, df = _build_image_dict(image_path, csv_path,
image_columns)
self.df_original = df
self.full_dataframe = df
self.image_columns = image_columns
self.image_dict = image_dict
scaled_size, full_image_data = \
self._load_data_helper(self.model_name, image_columns,
image_path, image_dict, csv_path, grayscale)
# Save all of the necessary data to the featurizer
if save_data:
self.data = full_image_data
self.csv_path = csv_path
self.image_path = image_path
self.scaled_size = scaled_size
return full_image_data
@t.guard(batch_data=t.Type(np.ndarray),
image_columns=t.List(t.String(allow_blank=True)) | t.String(allow_blank=True),
batch_processing=t.Bool,
features_only=t.Bool,
save_features=t.Bool,
save_csv=t.Bool,
new_csv_path=t.String(allow_blank=True),
omit_model=t.Bool,
omit_depth=t.Bool,
omit_output=t.Bool,
omit_time=t.Bool,
)
def featurize_preloaded_data(self, batch_data=np.zeros((1)), image_columns='',
batch_processing=False, features_only=False,
save_features=False, save_csv=False, new_csv_path='',
omit_model=False, omit_depth=False, omit_output=False,
omit_time=False):
"""
Featurize the loaded data, returning the dataframe and writing the features
and the full combined data to csv
Parameters
----------
Returns
-------
full_dataframe or df_features: pandas.DataFrame
If features_only, this returns a Dataframe containing the features.
Otherwise, it returns a DataFrame containing the features appended to the
original csv. If save_csv is set to True, it also writes csv's
to the same path as the csv containing the list of names.
"""
# If the batch data isn't passed in, then load the full data from the attributes
if np.array_equal(batch_data, np.zeros((1))):
batch_data = self.data
if image_columns == '':
image_columns = self.image_columns
if isinstance(image_columns, str):
image_columns = [image_columns]
# Check data has been loaded, and that the data was vectorized correctly
if np.array_equal(batch_data, np.zeros((1))):
raise IOError('Must load data into the model first. Call load_data.')
# If batch processing, make sure we're only doing a single column at a time.
# Otherwise, make sure the number of columns matches the first dimension of the data
if batch_processing:
assert len(image_columns) == 1 or isinstance(image_columns, str)
else:
assert len(image_columns) == batch_data.shape[0]
logging.info("Trying to featurize data.")
# Initialize featurized data vector with appropriate size
features = np.zeros((batch_data.shape[1],
self.num_features * len(image_columns)))
# Get the image features
df_features = self._featurize_helper(
features, image_columns, batch_data)
# Save features if boolean set to True
if save_features:
self.features = df_features
# If called with features_only, returns only the features
if features_only:
return df_features
# Save the image features with the original dataframe
full_dataframe = pd.concat([self.df_original, df_features], axis=1)
# If batch processing, this is only the batch dataframe. Otherwise, this is the actual
# full dataframe.
if not batch_processing:
self.full_dataframe = full_dataframe
# Save csv if called
if save_csv:
self.save_csv(new_csv_path=new_csv_path, omit_model=omit_model, omit_depth=omit_depth,
omit_output=omit_output, omit_time=omit_time, save_features=save_features)
return full_dataframe
@t.guard(image_columns=t.List(t.String(allow_blank=True)) | t.String(allow_blank=True),
image_path=t.String(allow_blank=True),
csv_path=t.String(allow_blank=True),
new_csv_path=t.String(allow_blank=True),
batch_processing=t.Bool,
batch_size=t.Int,
save_data=t.Bool,
save_features=t.Bool,
save_csv=t.Bool,
omit_time=t.Bool,
omit_model=t.Bool,
omit_depth=t.Bool,
omit_output=t.Bool,
verbose=t.Bool,
grayscale=t.Bool
)
def featurize(self,
image_columns,
image_path='',
csv_path='',
new_csv_path='',
batch_processing=True,
batch_size=1000,
save_data=False,
save_features=False,
save_csv=False,
omit_time=False,
omit_model=False,
omit_depth=False,
omit_output=False,
verbose=True,
grayscale=False
# crop_size = (299, 299),
# number_crops = 0,
# random_crop = False,
# isotropic_scaling = True
):
"""
Load image directory and/or csv, and vectorize the images for input into the featurizer.
Then, featurize the data.
Parameters:
----------
image_columns : list of str
list of the names of the column holding the image data, if a csv exists,
or what the name of the column will be, if generating the csv
from a directory
image_path : str
the path to the folder containing the images. If using URLs, leave blank
csv_path : str
the path to the csv. If just using a directory, leave blank, and
specify the path for the generated csv in new_csv_path.
If csv exists, this is the path where the featurized csv will be
generated.
new_csv_path : str
the path to the new csv, if one is being generated from a directory.
If no csv exists, this is the path where the featurized csv will
be generated
grayscale : bool
Decides if image is grayscale or not. May get deprecated. Don't
think it works on the InceptionV3 model due to input size.
# These features haven't been implemented yet.
# isotropic_scaling : bool
# if True, images are scaled keeping proportions and then cropped
#
# crop_size: tuple
# if the image gets cropped, decides the size of the crop
#
# random_crop: bool
# If False, only take the center crop. If True, take random crop
#
Returns:
--------
full_dataframe :
Dataframe containing the features appended to the original csv.
Also writes csvs containing the features only and the full dataframe
to the same path as the csv containing the list of names
"""
if not image_path and not csv_path:
raise ValueError("Must specify either image_path or csv_path as input.")
# Set logging level
if verbose:
logger.setLevel(logging.INFO)
# Fix column headers and image path if necessary
image_columns, image_path = _input_fixer(image_columns, image_path)
# Find the full image dict and save the original dataframe. This is required early to know
# how many images exist in total, to control batch processing.
full_image_dict, df_original = _build_image_dict(image_path, csv_path,
image_columns)
# Save the fixed inputs and full image dict
self.df_original = df_original
self.image_columns = image_columns
self.image_dict = full_image_dict
# Users can turn off batch processing by either setting batch_processing to false, or
# setting batch_size to 0
if batch_processing and batch_size:
# Perform batch processing, and save the full dataframe and the full features dataframe
features_df = self._batch_processing(full_image_dict, image_columns,
image_path, csv_path,
batch_size, grayscale)
# If batch processing is turned off, load the images in one big batch and features them all
else:
logger.info("Loading full data tensor without batch processing. If you "
"experience a memory error, make sure batch processing is enabled.")
full_data = self.load_data(image_columns, image_path, full_image_dict, csv_path,
grayscale, save_data)
features_df = \
self.featurize_preloaded_data(full_data, image_columns=image_columns,
features_only=True)
# Save the full dataframe with the features
full_df = pd.concat([df_original, features_df], axis=1)
self.full_dataframe = full_df
# Save features and csv if flags are enabled
if save_features:
self.features = features_df
if save_csv:
self.save_csv(new_csv_path=new_csv_path, omit_model=omit_model, omit_depth=omit_depth,
omit_output=omit_output, omit_time=omit_time, save_features=save_features)
# Return the full featurized dataframe
return full_df
def save_csv(self, new_csv_path='', omit_model=False, omit_depth=False,
omit_output=False, omit_time=False, save_features=False):
"""
"""
if self.full_dataframe.empty:
raise AttributeError('No dataframe has been featurized.')
# Save the name and extension separately, for robust naming
if not new_csv_path:
new_csv_path = self.csv_path or DEFAULT_NEW_CSV_PATH
csv_name, ext = os.path.splitext(new_csv_path)
name_path = _named_path_finder("{}_featurized".format(csv_name), self.model_name,
self.depth, self.num_features, omit_model, omit_depth,
omit_output, omit_time)
else:
name_path, ext = os.path.splitext(new_csv_path)
_create_csv_path(name_path)
logger.warning("Saving full dataframe to csv as {}{}".format(name_path, ext))
self.full_dataframe.to_csv("{}{}".format(name_path, ext), index=False)
if save_features:
logger.warning("Saving features to csv as {}_features_only{}".format(name_path, ext))
self.df_features.to_csv("{}_features_only{}".format(name_path, ext),
index=False)
@t.guard(confirm=t.Bool)
def clear_input(self, confirm=False):
"""
Clear all input for the model. Requires the user to confirm with an additional "confirm"
argument in order to run.
Parameters:
----------
confirm : bool
Users are required to modify this to true in order to clear all attributes
from the featurizer
"""
if not confirm:
raise ValueError('If you\'re sure you would like to clear the inputs of this model, '
'rerun the function with the following argument: '
'clear_input(confirm=True). This operation cannot be reversed.')
self.data = np.zeros((1))
self.features = pd.DataFrame()
self.full_dataframe = pd.DataFrame()
self.csv_path = ''
self.image_list = ''
self.image_columns = ''
self.image_path = ''
# ###################
# Helper Functions! #
# ###################
def _load_data_helper(self,
model_name,
image_columns,
image_path,
image_dict,
csv_path,
grayscale):
"""
This function helps load the image data from the image directory and/or csv.
It can be called by either batch processing, where each column is handled separately in the
parent function and the data is loaded in batches, or it can be called without batch
processing, where the columns must each be loaded and concatenated here.
Parameters:
----------
model_name : str
The name of the model type, which determines scaling size
image_columns : list
A list of the image column headers
image_path : str
Path to the image directory
image_dict : dict
This is a dictionary containing the names of each image column as a key, along with
all of the image paths for that column.
csv_path : str
Path to the csv
grayscale : bool
Whether the images are grayscale or not
"""
# Save size that model scales to
scaled_size = SIZE_DICT[model_name]
# Save the full image tensor, the path to the csv, and the list of image paths
image_data, list_of_image_paths = \
preprocess_data(image_columns[0], model_name,
image_dict[image_columns[0]],
image_path, csv_path, scaled_size, grayscale)
image_data_list = [np.expand_dims(image_data, axis=0)]
# If there is more than one image column, repeat this process for each
if len(image_columns) > 1:
for column in image_columns[1:]:
image_data, list_of_image_paths = \
preprocess_data(column, model_name, image_dict[column], image_path,
csv_path, scaled_size, grayscale)
image_data_list.append(np.expand_dims(image_data, axis=0))
full_image_data = np.concatenate(image_data_list)
return scaled_size, full_image_data
def _featurize_helper(self, features, image_columns, batch_data):
"""
This function featurizes the data for each image column, and creates the features array
from all of the featurized columns
Parameters:
----------
features : array
Array of features already computed
image_columns : list
A list of the image column headers
batch_data : array
The batch loaded image data (which may be the full array if not running with batches)
"""
# Save the initial features list
features_list = []
# For each image column, perform the full featurization and add the features to the df
for column in range(batch_data.shape[0]):
# Featurize the data, and save it to the appropriate columns
partial_features = featurize_data(self.featurizer, batch_data[column])
features[:, self.num_features * column:self.num_features * column + self.num_features]\
= partial_features
# Save the full dataframe
df_features = \
create_features(batch_data[column],
partial_features,
image_columns[column])
features_list.append(df_features)
df_features = pd.concat(features_list, axis=1)
return df_features
def _batch_processing(self,
full_image_dict,
image_columns,
image_path='',
csv_path='',
batch_size=1000,
grayscale=False):
"""
This function handles batch processing. It takes the full list of images that need
to be processed and loads/featurizes the images in batches.
Parameters:
----------
full_image_dict : dict
This is a dictionary containing the names of each image column as a key, along with
all of the image paths for that column.
image_columns : list
A list of the image column headers
df_original : pandas.DataFrame
The original dataframe (not containing the image features)
image_path : str
Path to the image directory
csv_path : str
Path to the csv
batch_size : int
The number of images processed per batch
grayscale : bool
Whether the images are grayscale or not
"""
features_df = pd.DataFrame()
features_df_columns_list = []
# Iterate through each image column
for column_index in range(len(image_columns)):
# Initialize the batch index and save the column name
index = 0
batch_number = 0
column = image_columns[column_index]
batch_features_df = pd.DataFrame()
# Get the list of image paths and the number of images in this column
list_of_image_paths = full_image_dict[column]
num_images = len(list_of_image_paths)
batch_features_list = []
# Loop through the images, featurizing each batch
if len(image_columns) > 1:
logger.info("Featurizing column #{}".format(column_index + 1))
while index < num_images:
tic = time.clock()
# Cap the batch size against the total number of images left to prevent overflow
if index + batch_size > num_images:
batch_size = num_images - index
# Create a dictionary for just the batch of images
batch_image_dict = {column: full_image_dict[column][index:index + batch_size]}
# Load the images
logger.info("Loading image batch.")
batch_data = self.load_data(column, image_path,
batch_image_dict, csv_path,
grayscale, save_data=False)
logger.info("\nFeaturizing image batch.")
# If this is the first batch, the batch features will be saved alone.
# Otherwise, they are concatenated to the last batch
batch_features_list.append(self.featurize_preloaded_data(batch_data, column,
features_only=True,
batch_processing=True))
# Increment index by batch size
index += batch_size
batch_number += 1
# Give update on time and number of images left in column
remaining_batches = int(math.ceil(num_images - index) / batch_size)
logger.info("Featurized batch #{}. Number of images left: {}\n"
"Estimated total time left: {} seconds\n".format(
batch_number, num_images - index,
int((time.clock() - tic) * remaining_batches))
)
# After the full column's features are calculated, concatenate them all and append them
# to the full DataFrame list
batch_features_df = pd.concat(batch_features_list, ignore_index=True)
features_df_columns_list.append(batch_features_df)
# Once all the features are created for each column, concatenate them together for both
# the features dataframe and the full dataframe
features_df = pd.concat(features_df_columns_list, axis=1)
# Return the full dataframe and features dataframe
return features_df
def _build_image_dict(image_path, csv_path, image_columns):
"""
This function creates the image dictionary that maps each image column to the images
in that column
Parameters
----------
image_path : str
Path to the image directory
csv_path : str
Path to the csv
image_columns : list
A list of the image column headers
"""
full_image_dict = {}
for column in image_columns:
list_of_image_paths, df = _image_paths_finder(image_path, csv_path,
column)
full_image_dict[column] = list_of_image_paths
return full_image_dict, df
def _input_fixer(image_columns, image_path):
"""
This function turns image_columns into a list of a single element if there is only
one image column. It also fixes the image path to contain a trailing `/` if the path to the
directory is missing one.
Parameters
----------
image_columns : list
A list of the image column headers
image_path : str
Path to the image directory
"""
# Convert column header to list if it's passed a single string
if isinstance(image_columns, str):
image_columns = [image_columns]
# Add backslash to end of image path if it is not there
if image_path != '' and image_path[-1] != "/":
image_path = '{}/'.format(image_path)
return image_columns, image_path
def _create_csv_path(new_csv_path):
"""
Create the necessary csv along with the appropriate directories
"""
# Create the filepath to the new csv
path_to_new_csv = os.path.dirname(new_csv_path)
if not os.path.isdir(path_to_new_csv) and path_to_new_csv != '':
os.makedirs(path_to_new_csv)
def _named_path_finder(csv_name, model_str, model_depth, model_output,
omit_model, omit_depth, omit_output, omit_time):
"""
Create the named path from the robust naming configuration available.
Parameters:
-----------
omit_model : Bool
Boolean to omit the model name from the CSV name
omit_depth : Bool
Boolean to omit the model depth from the CSV name
omit_output : Bool
Boolean to omit the model output size from the CSV name
omit_time : Bool
Boolean to omit the time of creation from the CSV name
model_str : Str
The model name
model_depth : Str
The model depth
model_output : Str
The model output size
Returns:
--------
named_path : Str
The full name of the CSV file
"""
# Naming switches! Can turn on or off to remove time, model, depth, or output size
# from output filename
if not omit_time:
saved_time = "_({})".format(time.strftime("%d-%b-%Y-%H.%M.%S", time.gmtime()))
else:
saved_time = ""
if not omit_model:
saved_model = "_{}".format(model_str)
else:
saved_model = ""
if not omit_depth:
saved_depth = "_depth-{}".format(model_depth)
else:
saved_depth = ""
if not omit_output:
saved_output = "_output-{}".format(model_output)
else:
saved_output = ""
named_path = "{}{}{}{}{}".format(csv_name, saved_model, saved_depth, saved_output, saved_time)
return named_path
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Make Cozmo look around for a cube.
Cozmo looks around, reacts, and picks up and puts down a cube if found.
'''
import asyncio
import cozmo
from cozmo.util import degrees
def cozmo_program(robot: cozmo.robot.Robot):
look_around = robot.start_behavior(cozmo.behavior.BehaviorTypes.LookAroundInPlace)
# try to find a block
cube = None
try:
cube = robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube", cube)
except asyncio.TimeoutError:
print("Didn't find a cube :-(")
finally:
# whether we find it or not, we want to stop the behavior
look_around.stop()
if cube is None:
robot.play_anim_trigger(cozmo.anim.Triggers.MajorFail)
return
print("Yay, found cube")
cube.set_lights(cozmo.lights.green_light.flash())
anim = robot.play_anim_trigger(cozmo.anim.Triggers.BlockReact)
anim.wait_for_completed()
action = robot.pickup_object(cube)
print("got action", action)
result = action.wait_for_completed(timeout=30)
print("got action result", result)
robot.turn_in_place(degrees(90)).wait_for_completed()
action = robot.place_object_on_ground_here(cube)
print("got action", action)
result = action.wait_for_completed(timeout=30)
print("got action result", result)
anim = robot.play_anim_trigger(cozmo.anim.Triggers.MajorWin)
cube.set_light_corners(None, None, None, None)
anim.wait_for_completed()
cozmo.run_program(cozmo_program)
|
python
|
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path, delimiter=',', skip_header=1)
print(data, data.shape)
census = np.concatenate((data, np.array(new_record)), axis=0)
print(census, census.shape)
# --------------
#Code starts here
age = census[:,0]
max_age = age.max()
min_age = age.min()
age_mean = age.mean()
age_std = age.std()
print('Max age = {} \n Min age = {} \n mean age = {} \n std deviation = {}'.format(max_age, min_age, age_mean, age_std))
# --------------
#Code starts here
race_0 = census[census[:,2]==0]
len_0 = len(race_0)
race_1 = census[census[:,2]==1]
len_1 = len(race_1)
race_2 = census[census[:,2]==2]
len_2 = len(race_2)
race_3 = census[census[:,2]==3]
len_3 = len(race_3)
race_4 = census[census[:,2]==4]
len_4 = len(race_4)
minority = min(len_0, len_1, len_2, len_3, len_4)
if minority==len_0:
minority_race = 0
elif minority==len_1:
minority_race = 1
elif minority==len_2:
minority_race = 2
elif minority==len_3:
minority_race = 3
elif minority==len_4:
minority_race = 4
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = senior_citizens[:,6].sum()
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
print(avg_working_hours, avg_working_hours<=25)
# --------------
#Code starts here
high = census[census[:, 1]>10]
low = census[census[:, 1]<=10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
print(avg_pay_high, avg_pay_low)
|
python
|
import sys
sys.path.append(r'F:\geostats')
from geostats import Scraping
from get_groupinfo import *
from get_eventsInfo import *
from urllib.error import HTTPError
import time,random,os
def generate_groupdf(groups):
groups_df = []
for j,group in enumerate(groups):
try:
record = get_groupsInfo(group_url[j])
print('Get into %s' %group_url[j])
record.update({'group': group})
groups_df.append(record)
new_path = os.path.join(path, str(group).strip())
os.makedirs(new_path)
events_df = get_eventsInfo(group_url[j])
events_path = os.path.join(new_path,'events.xlsx')
try:
df1 = pd.read_excel(events_path)
df1 = Scraping.clean_index(df1)
events_df_new = Scraping.dfmerge(df1, events_df, ['Name','Hold Date'], 'rbind')
except:
events_df_new = events_df
events_df_new.to_excel(events_path)
print("Updates%s successful" %group)
except(Exception):
continue
groups_df = pd.DataFrame(groups_df)
return groups_df
opener = Scraping.setProxy()
urllib.request.install_opener(opener)
url = "https://www.meetup.com/"
content = Scraping.parseHtml(url)
Categories = content.xpath('//*[@id="mupMain"]/div[3]/div/section[3]/div[2]/ul/li/div/div/a/h4/text()')
Sub_Url_10miles = list(content.xpath('//*[@id="mupMain"]/div[3]/div/section[3]/div[2]/ul/li/div/div/a/@href'))
Sub_Url = [url_10miles + '?allMeetups=false&radius=50&userFreeform=Dallas%2C+TX&mcId=z75201&mcName=Dallas%2C+TX&sort=default' for url_10miles in Sub_Url_10miles]
random_lst = list(range(0,len(Sub_Url)))
random.shuffle(random_lst)
for random_index in random_lst:
url1 = Sub_Url[random_index]
type = str(Categories[random_index])
path = os.path.join("./Data/", type)
content1 = Scraping.parseHtml(url1)
group1 = content1.xpath('//*[@id="simple-view"]/div[1]/ul/li/div/a[2]/div[2]/h3/text()')
groups = Scraping.clean_punctuationList(group1)
group_url = content1.xpath('//*[@id="simple-view"]/div/ul/li/div/a[@itemprop="url"]/@href')
groups_df = generate_groupdf(groups)
group_excel = os.path.join(path, type + ".xlsx")
try:
df1 = pd.read_excel(group_excel)
df1 = Scraping.clean_index(df1)
groups_df_new = Scraping.dfmerge(df1,groups_df,'Name','rbind')
except(HTTPError):
continue
except:
groups_df_new = groups_df
groups_df_new.to_excel(group_excel)
time.sleep(0.1)
|
python
|
# Pulls in images from different sources
# Thomas Lloyd
import numpy as np
import flickrapi
import urllib.request
# make private
api_key = '55d426a59efdae8b630aaa3afbac4000'
api_secret = '72f4bde28a867f41'
keyword1 = 'toulouse'
def initialize(api_key, api_secret):
flickr = flickrapi.FlickrAPI(api_key, api_secret)
return flickr
def pullimages(flickr):
# photos = flickr.photos.search(user_id='60027860@N06', per_page='10')
photos = flickr.walk(text=keyword1,
tag_mode='all',
tags=keyword1,
extras='url_c',
per_page=500,
sort='relevance')
urls = []
for i, photo in enumerate(photos):
url = photo.get('url_c')
urls.append(url)
# get 50 urls
if i > 5000:
break
return urls
def fakeurls():
urls = []
urls.append('https://live.staticflickr.com/7858/47443394111_c9b79def1b_c.jpg')
urls.append('https://live.staticflickr.com/4181/34268611090_aa1b6cd86f_c.jpg')
urls.append('https://live.staticflickr.com/4226/33953994894_7213c010f4_c.jpg')
urls.append('https://live.staticflickr.com/4902/44209156090_48c2861574_c.jpg')
urls.append('https://live.staticflickr.com/7328/27511837520_12d32ef9bb_c.jpg')
for n in range(0, len(urls)):
url = urls[n]
if type(url) == str:
print("url" + str(n) + ": " + url)
return urls
def saveimages(urls, keyword1):
print('beginning url download')
for n in range(0, len(urls)):
url = urls[n]
if type(url) == str:
# urllib.request.urlretrieve(url, '/mnt/f/amsterdam/ams' + str(n) + '.jpg')
# urllib.request.urlretrieve(url, '/mnt/f/newyork/ny' + str(n) + '.jpg') # zero indexed
# urllib.request.urlretrieve(url, '/Dropbox/Documents//tko' + str(n) + '.jpg') # zero indexed
urllib.request.urlretrieve(url, '/Volumes/2018_SSD_TL/GlobalColorImages/' + keyword1 + '_flickr/' + keyword1 + '_flickr_' + str(n) + '.jpg')
# urllib.request.urlretrieve(url, '/Users/thomaslloyd/Desktop/colorFinderMultiImages/' + str(n) + '.jpg')
# else raise Exception('url type is not a string')
# main
flickr = initialize(api_key, api_secret)
# urls = fakeurls()
urls = pullimages(flickr)
saveimages(urls, keyword1)
print('number of urls stored: ' + str(len(urls)))
print(keyword1 + ' images downloaded.')
|
python
|
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from sis_provisioner.management.commands import SISProvisionerCommand
from sis_provisioner.models.group import Group
class Command(SISProvisionerCommand):
help = "Prioritize groups for importing"
def handle(self, *args, **options):
Group.objects.update_priority_by_modified_date()
self.update_job()
|
python
|
from usuarios import Usuario
class Admin(Usuario):
def __init__(self, first_name, last_name, username, email):
super().__init__(first_name, last_name, username, email)
# self.priv = []
self.privileges = privileges()
# def show_privileges(self):
# print("\nPrivilegios:")
# for priv in self.priv:
# print(priv)
class privileges():
def __init__(self, privileges=[]):
self.privileges = privileges
def show_privileges(self):
print("\nPrivilegios:")
if self.privileges:
for privilege in self.privileges:
print("- " + privilege)
else:
print("Este usuario no tiene privilegios.")
|
python
|
import FWCore.ParameterSet.Config as cms
from RecoVertex.BeamSpotProducer.BeamSpot_cfi import *
|
python
|
# Copyright (c) 2015-2019 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import pytest
from botogram.callbacks import Buttons, parse_callback_data, get_callback_data
from botogram.callbacks import hashed_callback_name
from botogram.components import Component
from botogram.context import Context
from botogram.crypto import TamperedMessageError
from botogram.hooks import Hook
def test_buttons(bot, sample_update):
component = Component("test")
hook = Hook(lambda: None, component)
buttons = Buttons()
buttons[0].url("test 1", "http://example.com")
buttons[0].callback("test 2", "test_callback")
buttons[3].callback("test 3", "another_callback", "data")
buttons[2].switch_inline_query("test 4")
buttons[2].switch_inline_query("test 5", "wow", current_chat=True)
with Context(bot, hook, sample_update):
assert buttons._serialize_attachment(sample_update.chat()) == {
"inline_keyboard": [
[
{"text": "test 1", "url": "http://example.com"},
{
"text": "test 2",
"callback_data": get_callback_data(
bot, sample_update.chat(), "test:test_callback",
),
},
],
[
{"text": "test 4", "switch_inline_query": ""},
{
"text": "test 5",
"switch_inline_query_current_chat": "wow"
},
],
[
{
"text": "test 3",
"callback_data": get_callback_data(
bot, sample_update.chat(), "test:another_callback",
"data",
),
},
],
],
}
def test_parse_callback_data(bot, sample_update):
c = sample_update.chat()
raw = get_callback_data(bot, c, "test_callback", "this is some data!")
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
"this is some data!",
)
raw = get_callback_data(bot, c, "test_callback")
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
None,
)
with pytest.raises(TamperedMessageError):
raw = get_callback_data(bot, c, "test_callback", "data") + "!"
parse_callback_data(bot, c, raw)
# Now test with disabled signature verification
bot.validate_callback_signatures = False
raw = get_callback_data(bot, c, "test_callback", "data") + "!"
assert parse_callback_data(bot, c, raw) == (
hashed_callback_name("test_callback"),
"data!"
)
|
python
|
#!/usr/bin/python
# Copyright 2022 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import pytest
from utils.common import build_image, latest_build_artifact
class TestBootImg:
@pytest.mark.min_mender_version("1.0.0")
def test_bootimg_creation(
self, request, bitbake_variables, prepared_test_build, bitbake_image
):
"""Test that we can build a bootimg successfully."""
build_image(
prepared_test_build["build_dir"],
prepared_test_build["bitbake_corebase"],
bitbake_image,
['IMAGE_FSTYPES = "bootimg"'],
)
built_img = latest_build_artifact(
request, prepared_test_build["build_dir"], "core-image*.bootimg"
)
distro_features = bitbake_variables["MENDER_FEATURES"].split()
if "mender-grub" in distro_features and "mender-image-uefi" in distro_features:
output = subprocess.check_output(
["mdir", "-i", built_img, "-b", "/grub-mender-grubenv"]
).decode()
assert "mender_grubenv1" in output.split("/")
|
python
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sparse_ho.utils_plot import configure_plt, discrete_cmap
save_fig = True
# save_fig = False
configure_plt()
fontsize = 18
current_palette = sns.color_palette("colorblind")
algorithms = ['grid_search10', 'random', 'bayesian', 'grad_search']
dict_title = {}
dict_title['grid_search10'] = 'Grid-search'
dict_title['random'] = 'Random-search'
dict_title['bayesian'] = 'Bayesian'
dict_title['grad_search'] = '1st order method'
plt.close('all')
fig, axarr = plt.subplots(
1, len(algorithms), sharex=True, sharey=True,
figsize=[10.67, 3])
objs_full = np.load("results/objs_grid_search100.npy", allow_pickle=True)
log_alphas_full = np.load(
"results/log_alphas_grid_search100.npy", allow_pickle=True)
cmap = discrete_cmap(10, 'Reds')
c = np.linspace(1, 10, 10)
for i, algorithm in enumerate(algorithms):
objs = np.load("results/objs_%s.npy" % algorithm, allow_pickle=True)
log_alphas = np.load(
"results/log_alphas_%s.npy" % algorithm, allow_pickle=True)
axarr[i].plot(
log_alphas_full, objs_full / objs_full[0], color=current_palette[0],
zorder=1)
pcm = axarr[i].scatter(
log_alphas, objs / objs_full[0], c=c, cmap=cmap, marker='x', zorder=10)
axarr[i].scatter(
log_alphas, np.zeros(len(log_alphas)), c=c, cmap=cmap, marker='x',
# zorder=10)
clip_on=False, zorder=10)
axarr[i].set_title(dict_title[algorithm])
axarr[i].set_xlabel("$\lambda - \lambda_{\max}$", fontsize=fontsize)
axarr[i].set_ylim((0, 1))
print(objs.min())
axarr[0].set_ylabel(r"$\mathcal{C}(\beta^{(\lambda)})$", fontsize=fontsize)
cba = fig.colorbar(pcm, ax=axarr[3], ticks=np.linspace(1, 10, 10))
cba.set_label('Iterations', fontsize=fontsize)
fig.tight_layout()
if save_fig:
fig_dir = "../../../CD_SUGAR/tex/journal/prebuiltimages/"
fig_dir_svg = "../../../CD_SUGAR/tex/journal/images/"
fig.savefig(
fig_dir + "intro_lassoCV.pdf", bbox_inches="tight")
fig.savefig(
fig_dir_svg + "intro_lassoCV.svg", bbox_inches="tight")
plt.show(block=False)
fig.show()
|
python
|
# -*- coding: utf-8 -*-
from rest_framework.permissions import BasePermission
from v1.models.Board import Boards
from v1.models.Permissions import READ, WRITE, DELETE
class BoardPermission(BasePermission):
def has_object_permission(self, request, view, obj):
if view.action in ['get_states', 'retrieve']:
permissions_name = [READ]
elif view.action == 'destroy':
permissions_name = [READ, DELETE]
elif view.action in [
'change_name',
'create'
]:
permissions_name = [READ, WRITE]
else:
permissions_name = [READ, WRITE, DELETE]
user = request.user
return Boards.permissions.has_boards_access(user, obj, permissions_name)
|
python
|
import socket
import struct
import time
import thread
from nc_config import *
NC_PORT = 8888
CLIENT_IP = "10.0.0.1"
SERVER_IP = "10.0.0.2"
CONTROLLER_IP = "10.0.0.3"
path_reply = "reply.txt"
len_key = 16
counter = 0
def counting():
last_counter = 0
while True:
print (counter - last_counter), counter
last_counter = counter
time.sleep(1)
thread.start_new_thread(counting, ())
#f = open(path_reply, "w")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((CLIENT_IP, NC_PORT))
while True:
packet, addr = s.recvfrom(1024)
counter = counter + 1
#op = struct.unpack("B", packet[0])
#key_header = struct.unpack(">I", packet[1:5])[0]
#f.write(str(op) + ' ')
#f.write(str(key_header) + '\n')
#f.flush()
#print counter
#f.close()
|
python
|
import logging
import numpy as np
import kubric as kb
from kubric.renderer.blender import Blender as KubricBlender
logging.basicConfig(level="INFO") # < CRITICAL, ERROR, WARNING, INFO, DEBUG
world_matrix = {
"bunny": np.array(
(
(-1.0, 3.2584136988589307e-07, 0.0, 0.7087775468826294),
(-3.2584136988589307e-07, -1.0, 0.0, -1.2878063917160034),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
),
),
"suzanne": np.array(
(
(1.0, 0.0, 0.0, -0.8567398190498352),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
)
),
"teapot": np.array(
(
(1.0, 0.0, 0.0, -0.9078792333602905),
(0.0, 1.0, 0.0, 1.2115877866744995),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
)
),
}
points = {
"bunny": np.array(
(
(
0.044713765382766724,
-1.0193415880203247,
0.8044384121894836,
1.0,
),
(
0.056191492825746536,
-0.31232786178588867,
0.8044384121894836,
1.0,
),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
),
),
"suzanne": np.array(
(
(-1.0, 0.0, 0.0, 1.0),
(-0.2928931713104248, 2.9802322387695312e-08, 0.0, 1.0),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
)
),
"teapot": np.array(
(
(
0.044713765382766724,
-1.0193415880203247,
0.8044384121894836,
1.0,
),
(
0.056191492825746536,
-0.31232786178588867,
0.8044384121894836,
1.0,
),
(0.0, 0.0, 0.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
),
),
}
def interpolate_position(
t: np.ndarray, handles: np.ndarray, world_matrix: np.ndarray
) -> np.ndarray:
p0, p1, p2, p3 = handles[:, np.newaxis]
t = t[..., np.newaxis]
r = 1 - t
out = r ** 3 * p0 + 3 * r ** 2 * t * p1 + 3 * r * t ** 2 * p2 + t ** 3 * p3
out = out / out[..., [-1]]
return (world_matrix @ out.T).T[..., :-1]
# --- create scene and attach a renderer and simulator
num_frames = 480
scene = kb.Scene(resolution=(256, 256), background=kb.get_color("white"))
scene.frame_end = num_frames # < numbers of frames to render
scene.frame_rate = 24 # < rendering framerate
scene.ambient_illumination = kb.Color(0.05, 0.05, 0.05)
renderer = KubricBlender(scene)
# --- populate the scene with objects, lights, cameras
rng = np.random.RandomState(0)
wall_material = kb.FlatMaterial(
color=kb.get_color("white"), indirect_visibility=True
)
bunny = kb.FileBasedObject(
render_filename="objects/bunny.obj",
name="bunny",
scale=(4.89, 4.89, 4.89),
position=(0, -1, -0.47044),
quaternion=(0.0, 0.0, 0.707, 0.707),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
suzanne = kb.FileBasedObject(
render_filename="objects/suzanne.obj",
name="suzanne",
scale=(0.316, 0.316, 0.316),
position=(0, 0, 0.001821),
quaternion=(0.5, 0.5, 0.5, 0.5),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
teapot = kb.FileBasedObject(
render_filename="objects/teapot.obj",
name="teapot",
scale=(0.19, 0.19, 0.19),
position=(0, 1, -0.28363),
quaternion=(0.707, 0.70, 0.0, 0.0),
material=kb.PrincipledBSDFMaterial(color=kb.random_hue_color(rng=rng)),
)
scene += bunny
scene += suzanne
scene += teapot
scene += kb.Cube(
scale=(0.1, 100, 100),
position=(-4, 0, 0),
material=wall_material,
static=True,
background=True,
)
scene += kb.DirectionalLight(
name="sun", position=(4, 0, 3), look_at=(0, 0, 0), intensity=1.5
)
camera = kb.PerspectiveCamera(
name="camera",
position=(0, 0.0, 6.0),
quaternion=(1.0, 0.0, 0.0, 1.0),
)
scene.camera = camera
xs = np.linspace(-np.pi / 2, np.pi / 2, num_frames)
positions = {
"bunny": interpolate_position(
np.abs(np.cos(xs * 8.33)), points["bunny"], world_matrix["bunny"]
),
"teapot": interpolate_position(
np.abs(np.cos(xs * 5.13)), points["teapot"], world_matrix["teapot"]
),
"suzanne": interpolate_position(
np.abs(np.cos(xs * 7.11)), points["suzanne"], world_matrix["suzanne"]
),
}
for frame in range(1, num_frames + 1):
bunny.position = positions["bunny"][frame - 1]
bunny.keyframe_insert("position", frame)
teapot.position = positions["teapot"][frame - 1]
teapot.keyframe_insert("position", frame)
suzanne.position = positions["suzanne"][frame - 1]
suzanne.keyframe_insert("position", frame)
# --- renders the output
kb.as_path("output_top").mkdir(exist_ok=True)
np.save("output_top/suzanne.npy", positions["suzanne"])
np.save("output_top/teapot.npy", positions["teapot"])
np.save("output_top/bunny.npy", positions["bunny"])
np.save("output_top/camera_pos.npy", np.array(camera.position))
renderer.save_state("output_top/trio_top.blend")
frames_dict = renderer.render()
kb.write_image_dict(frames_dict, "output_top")
|
python
|
import pygame
def compare_surfaces(surf_a: pygame.Surface, surf_b: pygame.Surface):
if surf_a.get_size() != surf_b.get_size():
return False
for x in range(surf_a.get_size()[0]):
for y in range(surf_a.get_size()[1]):
if surf_a.get_at((x, y)) != surf_b.get_at((x, y)):
return False
return True
|
python
|
from django import forms
from django.contrib import admin
from emoji_picker.widgets import EmojiPickerTextarea
from .attachment import DisplayImageWidgetStackedInline
class TranslationModelForm(forms.ModelForm):
text = forms.CharField(
required=True,
label="Text übersetzt",
help_text="Hier nur den Meldungstext in der ausgewählten Sprache eintragen.",
widget=EmojiPickerTextarea,
max_length=305)
delivered = forms.BooleanField(
label='Versendet',
help_text="Wurde diese Meldung bereits vom Bot versendet?",
disabled=True,
required=False)
class TranslationAdminInline(DisplayImageWidgetStackedInline):
image_display_fields = ['media']
extra = 1
|
python
|
#coding = utf-8
import os
import Config
from Function import Function
class File(object):
def __init__(self, srcFileName, isKeep = False, dstFileName = None):
self.srcFileName = srcFileName
self.isKeep = isKeep
self.dstFileName = dstFileName
self.testFuncs = []
self.codeLines = None
self.__readCode()
def __readCode(self):
if not os.path.exists(self.srcFileName):
raise Exception('Invalid file paht\n')
with open(self.srcFileName, 'r') as f:
self.codeLines = f.readlines()
def __generateSourceCode(self):
if not self.isKeep:
os.remove(self.srcFileName)
self.dstFileName = self.srcFileName
elif self.dstFileName == None:
self.dstFileName = Config.COMPILING_FILE_PREFIX + self.srcFileName
with open(self.dstFileName, 'w') as f:
#header and function
codeStr = Config.COMPILING_FILE_HEADER + '\n' + ''.join(self.codeLines)
#test code
for testFunc in self.testFuncs:
codeStr += '\n' + testFunc[1]
#driver
codeStr += Config.COMPILING_FILE_DRIVER_PREFIX
for testFunc in self.testFuncs:
codeStr += '\t' + testFunc[0] + '();\n\n'
codeStr += Config.COMPILING_FILE_DRIVER_POSTFIX
f.write(codeStr)
def parse(self):
isFunc = False
funcStr = ""
for line in self.codeLines:
if(line.strip().startswith(Config.FUNCTION) ):
#begin one function
isFunc = True
funcStr = ""
elif isFunc:
funcStr += line
if line.strip().startswith('*/'):
isFunc = False
with Function(funcStr) as func:
func.parse()
self.testFuncs.append( (func.testFuncName, func.testCode) )
self.__generateSourceCode()
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractUser
import uuid
# esta clase define el perfil de usuario y extiende de AbstractUser
# por que solo se necesitaba eliminar los campos de first_name y last_name
# el resto del contenido se podia conservar
class profile(AbstractUser):
"""Define el modelo del usuario, hereda de AbstractUser y elimina algunos campos"""
first_name = None
last_name = None
nombre = models.CharField(max_length=100,blank=True)
a_paterno = models.CharField(max_length=100, blank=True)
a_materno = models.CharField(max_length=100,blank=True)
img = models.ImageField(upload_to = 'user/', blank= True, null = True)
clave_confirmacion = models.UUIDField(default=uuid.uuid4,editable=False)
genero = models.CharField(max_length=20, default='Prefiero no decirlo',blank=True)
comentario = models.TextField(blank= True, null = True)
# class Meta(AbstractUser.Meta):
# swappable = 'AUTH_USER_MODEL'
|
python
|
# -*- coding: utf-8 -*-
import subprocess
import pytest
# Use an empty temporary HOME and unset CASA_BASE_DIRECTORY (see
# conftest.py)
pytestmark = pytest.mark.usefixtures("isolate_from_home")
def test_help():
retval = subprocess.call(['casa_distro', '--help'])
assert retval == 0
def test_help_subcommand():
retval = subprocess.call(['casa_distro', 'help'])
assert retval == 0
@pytest.mark.parametrize("subcommand", [
'help',
'distro',
'list',
'run',
'pull_image',
'list_images',
'shell',
'mrun',
'bv_maker',
'clean_images',
])
def test_help_of_subcommands(subcommand):
p = subprocess.Popen(['casa_distro', 'help', subcommand],
stdout=subprocess.PIPE, bufsize=-1,
universal_newlines=True)
stdoutdata, _ = p.communicate()
assert p.returncode == 0
assert subcommand in stdoutdata
def test_list():
retval = subprocess.call(['casa_distro', 'list'])
assert retval == 0
def test_distro_subcommand():
p = subprocess.Popen(['casa_distro', 'distro'],
stdout=subprocess.PIPE, bufsize=-1,
universal_newlines=True)
stdoutdata, _ = p.communicate()
assert p.returncode == 0
assert 'brainvisa' in stdoutdata
|
python
|
import yaml
from typing import List, Union, List, Any, Dict, Tuple
import typing
import enum
import attr
import attrs_strict
from attrs_strict import type_validator
class MissingAttribute(Exception):
pass
def yaml_dump(d):
return yaml.dump(d, Dumper=yaml.Dumper)
def self_attributes(self, attrs):
return {attr.name: getattr(self, attr.name) for attr in attrs}
class Base:
def _semantic_check(self):
pass
@attr.s
class Ref:
name: str = attr.ib(default=None, validator=type_validator())
@attr.s
class FileObject(Base):
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
description: Union[str, None] = attr.ib(default=None, validator=type_validator())
apiVersion: str = attr.ib(default="tekton.dev/v1beta1", validator=type_validator())
def asdict(self):
def get_delete(d, key):
v = d[key]
del d[key]
return v
def rewrite_if_fileobject(d):
if "apiVersion" in d:
# If there is an apiVersion it is a file object. Rearrange attributes
# Move all keys to the spec
spec = {}
for (key, val) in d.items():
spec[key] = val
for (key, val) in spec.items():
del d[key]
# create the file level attributes
d.update(
{
"metadata": {"name": get_delete(spec, "name")},
"kind": get_delete(spec, "kind"),
"apiVersion": get_delete(spec, "apiVersion"),
}
)
if len(spec) > 0:
d["spec"] = spec
if "description" in spec:
d["metadata"]["description"] = get_delete(spec, "description")
def rewrite_fileobjects(d):
if isinstance(d, dict):
rewrite_if_fileobject(d)
for (key, val) in d.items():
rewrite_fileobjects(val)
if isinstance(d, list):
for i in d:
rewrite_fileobjects(i)
root = attr.asdict(self, filter=lambda attr, value: value != None)
# asdict returned a dictionary that is specified correctly except the Fileobjects
rewrite_fileobjects(root)
return root
def to_yaml(self, **kwargs):
if kwargs.get("check", True):
self._semantic_check()
return yaml_dump(self.asdict())
def _semantic_check(self):
if self.name == None:
raise MissingAttribute(f"{str(self.__class__)} must have a name")
def ref(self) -> Ref:
return Ref(self.name)
@attr.s
class FileObjectAlpha(FileObject):
apiVersion: str = attr.ib(default="tekton.dev/v1alpha1", validator=type_validator())
@attr.s
class EnvVar:
name: str = attr.ib(default=None, validator=type_validator())
value: Union[str, None] = attr.ib(default=None, validator=type_validator())
@attr.s
class Step:
image: Union[str, None] = attr.ib(default=None, validator=type_validator())
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
workingDir: Union[str, None] = attr.ib(default=None, validator=type_validator())
args: Union[List[str], None] = attr.ib(default=None, validator=type_validator())
command: Union[List[str], None] = attr.ib(default=None, validator=type_validator())
# EnvFrom []EnvFromSource
env: Union[List[EnvVar], None] = attr.ib(default=None, validator=type_validator())
# VolumeMounts []VolumeMount
class ParamEnum(enum.Enum):
str = enum.auto()
list = enum.auto()
@attr.s
class Param:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
value: Union[str, None] = attr.ib(default=None, validator=type_validator())
@attr.s
class ParamSpec:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
description: Union[str, None] = attr.ib(default=None, validator=type_validator())
default: Union[str, None] = attr.ib(default=None, validator=type_validator())
type: Union[ParamEnum, None] = attr.ib(default=None, validator=type_validator())
def ref(self) -> str:
return f"$(params.{self.name})"
class Inputs:
pass
class Resources:
pass
@attr.s
class TaskSpec:
steps: Union[None, List[Step]] = attr.ib(default=None, validator=type_validator())
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
resources: Union[None, List[Resources]] = attr.ib(
default=None, validator=type_validator()
)
class TaskRun(FileObject):
pass
@attr.s
class Task(FileObject, TaskSpec):
kind: str = attr.ib(default="Task", validator=type_validator())
def _semantic_check(self):
if self.steps == None or len(self.steps) == 0:
raise MissingAttribute("Task object must have at least one step")
@attr.s
class PipelineTask:
name: Union[str, None] = attr.ib(default=None, validator=type_validator())
taskRef: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
params: Union[None, List[Param]] = attr.ib(default=None, validator=type_validator())
@attr.s
class PipelineSpec:
tasks: Union[None, List[PipelineTask]] = attr.ib(
default=None, validator=type_validator()
)
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class Pipeline(FileObject, PipelineSpec):
kind: str = attr.ib(default="Pipeline", validator=type_validator())
@attr.s
class PipelineRunSpec:
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
pipelineRef: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
pipelineSpec: Union[None, PipelineSpec] = attr.ib(
default=None, validator=type_validator()
)
serviceAccountName: Union[None, str] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class PipelineRun(FileObject, PipelineRunSpec):
kind: str = attr.ib(default="PipelineRun", validator=type_validator())
# TriggerResourceTemplates = Union[PipelineRun, ...]
TriggerResourceTemplates = PipelineRun
@attr.s
class TriggerTemplateSpec:
resourcetemplates: Union[None, List[TriggerResourceTemplates]] = attr.ib(
default=None, validator=type_validator()
)
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class TriggerTemplate(FileObjectAlpha, TriggerTemplateSpec):
kind: str = attr.ib(default="TriggerTemplate", validator=type_validator())
@attr.s
class TriggerBindingSpec:
params: Union[None, List[ParamSpec]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class TriggerBinding(FileObjectAlpha, TriggerBindingSpec):
kind: str = attr.ib(default="TriggerBinding", validator=type_validator())
@attr.s
class EventListenerTrigger:
binding: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
template: Union[None, Ref] = attr.ib(default=None, validator=type_validator())
@attr.s
class EventListenerSpec:
triggers: Union[None, List[EventListenerTrigger]] = attr.ib(
default=None, validator=type_validator()
)
@attr.s
class EventListener(FileObjectAlpha, EventListenerSpec):
kind: str = attr.ib(default="EventListener", validator=type_validator())
|
python
|
import requests
from requests.exceptions import HTTPError, ConnectionError
from .resources.functions import validate_protocol
class WordPressAPI:
""" WP API Object Definition """
def __init__(self, domain, username, password, protocol="https", namespace="wp-json"):
""" Object constructor """
self.domain = domain
self.username = username
self.password = password
self.protocol = protocol
self.namespace = namespace
self.connected = False
self.headers = {
"Content-Type": "application/json"
}
def __repr__(self):
""" Object representation (for developers) """
return f"WordPressAPI({self.domain}, {self.username}, {self.password})"
def __str__(self):
""" String representation """
return f"WordPressAPI Object : {self.url}"
@property
def url(self):
""" URL Builder for the API """
return f"{self.protocol}://{self.domain}/{self.namespace}"
@property
def protocol(self):
""" Getter for the protocol so that it's read only """
return self.__protocol
@protocol.setter
def protocol(self, proto):
""" Setter for the protocol verifying it's correct (either http or https) """
self.__protocol = validate_protocol(proto)
@staticmethod
def parse_json(response):
return response.json()
@staticmethod
def parse_wp_error(response):
data = response.json()
print(f"STATUS={data['data']['status']}\nCODE={data['code']}\nMESSAGE={data['message']}")
def build_authentication_url(self):
return f"{self.url}/jwt-auth/v1/token?username={self.username}&password={self.password}"
def connect(self):
""" Connect to the actual WP API. Returns None if connection wasn't successful """
try:
response = requests.post(self.build_authentication_url(), headers=self.headers)
response.raise_for_status()
self.connected = True
self.headers.update({"Authorization": f"Bearer {self.parse_json(response)['token']}"})
return response
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
def get(self, endpoint, data=None, get_response=False):
""" Attempt a GET action. Returns None if request wasn't successful or raise Exception if attempted to GET when API is not connected """
try:
if self.connected:
response = requests.get(self.url + endpoint, params=data, headers=self.headers)
response.raise_for_status()
return response if get_response else self.parse_json(response)
else:
raise Exception("API is not connected!")
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
def post(self, endpoint, data, get_response=False):
""" Attempt a POST action. Returns None if request wasn't successful or raise Exception if attempted to GET when API is not connected """
try:
if self.connected:
response = requests.post(self.url + endpoint, data=data, headers=self.headers)
response.raise_for_status()
return response if get_response else self.parse_json(response)
else:
raise Exception("API is not connected!")
except HTTPError as error:
self.parse_wp_error(error.response)
except ConnectionError as error:
print(error)
# TODO: Need to implement other methods (PUT, DELETE, etc.)
|
python
|
# The MIT License (MIT)
#
# Copyright 2020 Barbara Barros Carlos, Tommaso Sartor
#
# This file is part of crazyflie_nmpc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
from os.path import dirname, join, abspath
ACADOS_PATH = join(dirname(abspath(__file__)), "../../../acados")
# create render arguments
ra = acados_ocp_nlp()
# export model
model = export_ode_model()
Tf = 0.75
N = 50
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
# set ocp_nlp_dimensions
nlp_dims = ra.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = nu
nlp_dims.nbx_e = 0
nlp_dims.nu = model.u.size()[0]
nlp_dims.N = N
# parameters
g0 = 9.8066 # [m.s^2] accerelation of gravity
mq = 33e-3 # [kg] total mass (with one marker)
Ct = 3.25e-4 # [N/krpm^2] Thrust coef
# bounds
hov_w = np.sqrt((mq*g0)/(4*Ct))
max_thrust = 22
# set weighting matrices
nlp_cost = ra.cost
Q = np.eye(nx)
Q[0,0] = 120.0 # x
Q[1,1] = 100.0 # y
Q[2,2] = 100.0 # z
Q[3,3] = 1.0e-3 # qw
Q[4,4] = 1.0e-3 # qx
Q[5,5] = 1.0e-3 # qy
Q[6,6] = 1.0e-3 # qz
Q[7,7] = 7e-1 # vbx
Q[8,8] = 1.0 # vby
Q[9,9] = 4.0 # vbz
Q[10,10] = 1e-5 # wx
Q[11,11] = 1e-5 # wy
Q[12,12] = 10.0 # wz
R = np.eye(nu)
R[0,0] = 0.06 # w1
R[1,1] = 0.06 # w2
R[2,2] = 0.06 # w3
R[3,3] = 0.06 # w4
nlp_cost.W = scipy.linalg.block_diag(Q, R)
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
Vx[4,4] = 1.0
Vx[5,5] = 1.0
Vx[6,6] = 1.0
Vx[7,7] = 1.0
Vx[8,8] = 1.0
Vx[9,9] = 1.0
Vx[10,10] = 1.0
Vx[11,11] = 1.0
Vx[12,12] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[13,0] = 1.0
Vu[14,1] = 1.0
Vu[15,2] = 1.0
Vu[16,3] = 1.0
nlp_cost.Vu = Vu
nlp_cost.W_e = 50*Q
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
Vx_e[4,4] = 1.0
Vx_e[5,5] = 1.0
Vx_e[6,6] = 1.0
Vx_e[7,7] = 1.0
Vx_e[8,8] = 1.0
Vx_e[9,9] = 1.0
Vx_e[10,10] = 1.0
Vx_e[11,11] = 1.0
Vx_e[12,12] = 1.0
nlp_cost.Vx_e = Vx_e
nlp_cost.yref = np.array([0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, hov_w, hov_w, hov_w, hov_w])
nlp_cost.yref_e = np.array([0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
nlp_con = ra.constraints
nlp_con.lbu = np.array([0,0,0,0])
nlp_con.ubu = np.array([+max_thrust,+max_thrust,+max_thrust,+max_thrust])
nlp_con.x0 = np.array([0,0,0,1,0,0,0,0,0,0,0,0,0])
nlp_con.idxbu = np.array([0, 1, 2, 3])
## set QP solver
#ra.solver_options.qp_solver = 'FULL_CONDENSING_QPOASES'
ra.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM'
ra.solver_options.hessian_approx = 'GAUSS_NEWTON'
ra.solver_options.integrator_type = 'ERK'
# set prediction horizon
ra.solver_options.tf = Tf
ra.solver_options.nlp_solver_type = 'SQP_RTI'
#ra.solver_options.nlp_solver_type = 'SQP'
# set header path
ra.acados_include_path = f'{ACADOS_PATH}/include'
ra.acados_lib_path = f'{ACADOS_PATH}/lib'
ra.model = model
acados_solver = generate_solver(ra, json_file = 'acados_ocp.json')
print('>> NMPC exported')
|
python
|
#!/usr/bin/env python3
import sys
# read input file
with open(sys.argv[1], 'r') as fd:
partitions = fd.readlines()
# init
part1 = 0
part2 = 0
seat_ids = []
# part1
for partition in partitions:
# row
left = 128
row = 0
for letter in partition[:7]:
left = int(left / 2)
if letter == "B":
row += left
# column
column = 0
left = 8
for letter in partition[7:]:
left = int(left / 2)
if letter == "R":
column += left
# seat_id
seat_ids.append(row * 8 + column)
part1 = max(seat_ids)
# part2
index = 0
seat_ids.sort()
for seat_id in seat_ids[0:len(seat_ids) - 1]:
if seat_ids[index + 1] - seat_ids[index] > 1:
part2 = seat_id + 1
break
index += 1
# done
print(f"part1: {part1}")
print(f"part2: {part2}")
|
python
|
#!/usr/bin/env python
# coding: utf-8
import logging
def get_logger(obj, level=None):
logging.basicConfig()
logger = logging.getLogger(obj.__name__)
if level is not None:
logger.setLevel(level)
return logger
|
python
|
from django.test import TestCase
from .mixins import TwoUserMixin, ProposalMixin
from consensus_engine.models import ConsensusHistory
from django.utils import timezone
# models test
class ConsensusHistoryTest(TwoUserMixin, ProposalMixin, TestCase):
def test_snapshot(self):
p = self.create_proposal_with_two_proposal_choices()
dt = timezone.now()
ss = ConsensusHistory.build_snapshot(p)
ss.save()
self.assertTrue(ss is not None)
self.assertTrue(ss.snapshot_date >= dt and ss.snapshot_date <= timezone.now())
self.assertTrue(ss.proposal.id == p.id)
self.assertTrue(ss.consensus is None)
no_votes_data = [
{'choice_id': 1, 'text': "Yes", 'count': 0},
{'choice_id': 2, 'text': "No", 'count': 0}
]
self.assertTrue(ss.get_consensus_data() == no_votes_data)
pc = p.proposalchoice_set.first()
p.publish()
pc.vote(self.user)
ss2 = ConsensusHistory.objects.at_date(proposal=p, at_date=timezone.now())
one_vote_data = [
{'choice_id': 1, 'text': "Yes", 'count': 1},
{'choice_id': 2, 'text': "No", 'count': 0}
]
self.assertTrue(ss2.get_consensus_data() == one_vote_data)
all_history = ConsensusHistory.objects.all_history_for_proposal(p)
self.assertTrue(all_history.count() == 2)
|
python
|
"""
Script for performing a fit to a histogramm of recorded
time differences for the use with QNet
"""
from __future__ import print_function
import sys
from matplotlib import pylab
import numpy
import scipy.optimize as optimize
def fit(bincontent=None, binning=(0, 10, 21), fitrange=None):
"""
Fit function
:param bincontent:
:param binning:
:param fitrange:
:returns:
"""
def decay(p, x):
return p[0] * numpy.exp(-x / p[1]) + p[2]
def error(p, x, y):
return decay(p, x) - y
if bincontent is None:
nbins = 10
xmin = 1.0
xmax = 20.0
times = [float(l) for l in open(sys.argv[1]).readlines()
if xmin < float(l) < xmax]
print(len(times), "decay times")
# nbins = optimalbins.optbinsize(times, 1, 80)
# print(nbins, "Optimalbins selects nbins")
# nbins = optimalbins.optbinsize(times, 1, 30)
print("Nbins:", nbins)
bin_edges = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bin_edges[:-1] + 0.5 * (bin_edges[1] - bin_edges[0])
hist, edges = numpy.histogram(times, bin_edges)
# hist = hist[:-1]
p0 = numpy.array([200, 2.0, 5])
output = optimize.leastsq(error, p0, args=(bin_centers, hist),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare=0.
deviations=error(p, bin_centers, hist)
for i, d in enumerate(deviations):
chisquare += d * d / decay(p, bin_centers[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
fitx = numpy.linspace(xmin, xmax, 100)
pylab.plot(bin_centers, hist, "b^", fitx, decay(p, fitx), "b-")
pylab.ylim(0, max(hist) + 100)
pylab.xlabel("Decay time in microseconds")
pylab.ylabel("Events in time bin")
# pylab.legend(("Data","Fit: (%4.2f +- %4.2f) microsec," +
# "chisq / ndf=%4.2f" % (p[1], numpy.sqrt(covar[1][1]),
# chisquare / (nbins - len(p)))))
pylab.legend(("Data","Fit: (%4.2f) microsec," +
"chisq/ndf=%4.2f" % p[1], chisquare / (nbins - len(p))))
pylab.grid()
pylab.savefig("fit.png")
else:
# this is then used for the mudecay window in muonic.
# we have to adjust the bins to the values of the used histogram.
if len(bincontent) == 0:
print("WARNING: Empty bins.")
return None
bins = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bins[:-1] + 0.5 * (bins[1] - bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0], binning[1])
bin_mask = [(bin_centers <= fitrange[1]) &
(bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers
if fitrange[0] <= x <= fitrange[1]])
if len(bin_centers_) < 3:
print("WARNING: fit range too small. " +
"Skipping fitting. Try with larger fit range.")
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# we cut the leading edge of the distribution away for the fit
glob_max = max(bincontent)
cut = 0
for i in enumerate(bincontent):
if i[1] == glob_max:
cut = i[0]
cut_bincontent = bincontent[cut:]
# cut_bincenter = bin_centers[cut]
cut_bincenters = bin_centers[cut:]
# # maybe something for the future..
# nbins = optimalbins.optbinsize(cut_bincontent, 1, 20)
# fit_bins = numpy.linspace(cut_bincenter, 20, nbins)
# fit_bin_centers = fit_bins[:-1] + 0.5 * (fit_bins[1] - fit_bins[0])
# fit_bincontent = numpy.zeros(len(fit_bin_centers))
# # the bincontent must be redistributed to fit_bincontent
# for binindex_fit in xrange(len(fit_bincontent)):
# for binindex,content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
p0 = numpy.array([200, 2.0, 5])
# output = optimize.leastsq(error, p0,
# args=(fit_bin_centers,fitbincontent),
# full_output=1)
output = optimize.leastsq(error, p0,
args=(cut_bincenters, cut_bincontent),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare = 0.
deviations = error(p, cut_bincenters, cut_bincontent)
for i, d in enumerate(deviations):
chisquare += d * d / decay(p, cut_bincenters[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
# nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx = numpy.linspace(xmin, xmax, 100)
# return (bin_centers, bincontent, fitx,
# decay, p, covar, chisquare, nbins)
return (cut_bincenters, cut_bincontent, fitx,
decay, p, covar, chisquare, nbins)
def gaussian_fit(bincontent, binning=(0, 2, 10), fitrange=None):
"""
Guassian fit function
:param bincontent:
:param binning:
:param fitrange:
:returns:
"""
def gauss(p, x):
return (p[0] * (1 / (p[1] * numpy.sqrt(2 * numpy.pi))) *
numpy.exp(-0.5 * (((x - p[2]) / p[1]) ** 2)))
def error(p, x, y):
return gauss(p, x) - y
if len(bincontent) == 0:
print("WARNING: Empty bins.")
return None
# this is then used for the mudecay window in muonic.
# we have to adjust the bins to the values of the used histogram.
bins = numpy.linspace(binning[0], binning[1], binning[2])
bin_centers = bins[:-1] + 0.5 * (bins[1] - bins[0])
if fitrange is not None:
if fitrange[0] < binning[0]:
fitrange = (binning[0], fitrange[1])
if fitrange[1] > binning[1]:
fitrange = (fitrange[0], binning[1])
bin_mask = [(bin_centers <= fitrange[1]) &
(bin_centers >= fitrange[0])]
bin_centers_ = numpy.asarray([x for x in bin_centers
if fitrange[0] <= x <= fitrange[1]])
if len(bin_centers_) < 3:
print("WARNING: fit range too small. " +
"Skipping fitting. Try with larger fit range.")
return None
else:
bin_centers = bin_centers_
bincontent = bincontent[bin_mask]
# # we cut the leading edge of the distribution away for the fit
# glob_max = max(bincontent)
# cut = 0
# for i in enumerate(bincontent):
# if i[1] == glob_max:
# cut = i[0]
cut_bincontent = bincontent # [cut:]
# cut_bincenter = bin_centers # [cut]
cut_bincenters = bin_centers # [cut:]
# # maybe something for the future..
# nbins = optimalbins.optbinsize(cut_bincontent, 1, 20)
# fit_bins = numpy.linspace(cut_bincenter, 20, nbins)
# fit_bin_centers = fit_bins[:-1] + 0.5 * (fit_bins[1] - fit_bins[0])
# fit_bincontent = numpy.zeros(len(fit_bin_centers))
# # the bincontent must be redistributed to fit_bincontent
# for binindex_fit in xrange(len(fit_bincontent)):
# for binindex, content in enumerate(bincontent):
# if bin_centers[binindex] <= fit_bin_centers[binindex_fit]:
# fit_bincontent[binindex_fit] += content
wsum = cut_bincontent.sum()
mean = (cut_bincontent * cut_bincenters).sum() / wsum
meansquared = (cut_bincontent * cut_bincenters ** 2).sum() / wsum
var = meansquared - mean ** 2
# p0 = numpy.array([20, 1.0, 5])
p0 = numpy.array([max(cut_bincontent), var, mean])
# output = optimize.leastsq(error, p0,
# args=(fit_bin_centers, fitbincontent),
# full_output=1)
output = optimize.leastsq(error, p0,
args=(cut_bincenters, cut_bincontent),
full_output=1)
p = output[0]
covar = output[1]
print("Fit parameters:", p)
print("Covariance matrix:", covar)
chisquare = 0.
deviations = error(p, cut_bincenters, cut_bincontent)
for i, d in enumerate(deviations):
chisquare += d*d/gauss(p,cut_bincenters[i])
params = {"legend.fontsize": 13}
pylab.rcParams.update(params)
# nbins = 84
nbins = len(bins)
xmin = cut_bincenters[0]
xmax = cut_bincenters[-1]
fitx = numpy.linspace(xmin, xmax, 100)
# return (bin_centers, bincontent, fitx, decay, p, covar, chisquare, nbins)
return (cut_bincenters, cut_bincontent, fitx,
gauss, p, covar, chisquare, nbins)
if __name__ == '__main__':
fit()
|
python
|
from easydict import EasyDict
pong_dqn_gail_config = dict(
exp_name='pong_gail_dqn_seed0',
env=dict(
collector_env_num=8,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=20,
env_id='PongNoFrameskip-v4',
frame_stack=4,
),
reward_model=dict(
type='gail',
input_size=[4, 84, 84],
hidden_size=128,
batch_size=64,
learning_rate=1e-3,
update_per_collect=100,
collect_count=1000,
action_size=6,
# Users should add their own model path here. Model path should lead to a model.
# Absolute path is recommended.
# In DI-engine, it is ``exp_name/ckpt/ckpt_best.pth.tar``.
expert_model_path='model_path_placeholder',
# Path where to store the reward model
reward_model_path='data_path_placeholder+/reward_model/ckpt/ckpt_best.pth.tar',
# Users should add their own data path here. Data path should lead to a file to store data or load the stored data.
# Absolute path is recommended.
# In DI-engine, it is usually located in ``exp_name`` directory
# e.g. 'exp_name/expert_data.pkl'
data_path='data_path_placeholder',
),
policy=dict(
cuda=True,
priority=False,
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
encoder_hidden_size_list=[128, 128, 512],
),
nstep=1,
discount_factor=0.99,
learn=dict(
update_per_collect=10,
batch_size=32,
learning_rate=0.0001,
target_update_freq=500,
),
collect=dict(n_sample=96, ),
eval=dict(evaluator=dict(eval_freq=4000, )),
other=dict(
eps=dict(
type='exp',
start=1.,
end=0.05,
decay=250000,
),
replay_buffer=dict(replay_buffer_size=100000, ),
),
),
)
pong_dqn_gail_config = EasyDict(pong_dqn_gail_config)
main_config = pong_dqn_gail_config
pong_dqn_gail_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='dqn'),
)
pong_dqn_gail_create_config = EasyDict(pong_dqn_gail_create_config)
create_config = pong_dqn_gail_create_config
if __name__ == '__main__':
# or you can enter `ding -m serial_gail -c pong_gail_dqn_config.py -s 0`
# then input the config you used to generate your expert model in the path mentioned above
# e.g. pong_dqn_config.py
from ding.entry import serial_pipeline_gail
from dizoo.atari.config.serial.pong import pong_dqn_config, pong_dqn_create_config
expert_main_config = pong_dqn_config
expert_create_config = pong_dqn_create_config
serial_pipeline_gail(
(main_config, create_config), (expert_main_config, expert_create_config),
max_env_step=1000000,
seed=0,
collect_data=True
)
|
python
|
class MyClass:
def method1(self):
print('myClass method1')
def method2(self, someString):
print("myclass method2 " + someString)
class MyOtherClass(MyClass):
def method1(self):
MyClass.method1(self)
print("anotherClass method1")
def main():
c = MyClass()
c.method1()
c.method2("hello python")
c2 = MyOtherClass()
c2.method1()
c2.method2("hello python")
if __name__ == "__main__":
main()
|
python
|
# WikiBot
#
# Made by Aryan Takalkar
import speech_recognition as speech
import wikipedia
import pyttsx3
engine = pyttsx3.init()
running = True
def speech_init():
engine.setProperty('rate', 175)
engine.setProperty('volume' , 2)
voices = engine.getPropertyvoices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speech_output(x):
engine.say(x)
engine.runAndWait()
def start_listening():
global command
try:
r = speech.Recognizer()
with speech.Microphone() as source:
r.adjust_for_ambient_noise(source)
speech_output("Say something")
audio = r.listen(source)
command = r.recognize_google(audio)
speech_output("You said: " + command)
except speech.UnknownValueError:
speech_output("Sorry, I didn't get that. Try again.")
start_listening()
return str(command)
### Main Code ###
speech_init()
speech_output("I am Wikibot. I can give you information on just about anything, as long as it's in a Wikipedia article.")
userinp = start_listening()
while running:
y = wikipedia.summary(userinp, sentences = 5)
try:
speech_output(y)
userinp = start_listening()
except wikipedia.DisambiguationError:
speech_output("Sorry, I didn't get that. Try again.")
userinp = start_listening()
|
python
|
"""
Provides utility functions for creating plots in exercise 8.
"""
from typing import Union
def organize_kwargs(
user_kwargs: Union[dict, None], default_kwargs: dict = None
) -> dict:
"""
Update default keyword argument configuration with user provided
configuration.
Parameters
----------
user_kwargs: Union[dict, None]
Dictionary of user provided keyword argument configurations, or
None
default_kwargs: dict
Default keyword argument configuration to be updated with user
configuration
Returns
-------
dict
Complete keyword argument configuration
"""
kwargs = user_kwargs or {}
default_kwargs = default_kwargs or {}
return {**default_kwargs, **kwargs}
|
python
|
import s3fs
import numpy as np
import pandas as pd
import xarray as xr
from pyproj import Proj
def isin_merra_cell(lat, lon, latm, lonm):
dlat, dlon = 0.5, 0.625
lat1, lat2 = latm - dlat/2, latm + dlat/2
lon1, lon2 = lonm - dlon/2, lonm + dlon/2
lon_slices = [(lon1, lon2)]
if lon2 > 180:
lon_slices.append((lon1, 180))
lon_slices.append((-180, lon2 - 360))
elif lon1 <= -180:
lon_slices.append((-180, lon2))
lon_slices.append((lon1 + 360, 180))
for slc in lon_slices:
lon1, lon2 = slc
isin_cell = (lat1 <= lat <= lat2) & (lon1 <= lon <= lon2)
if isin_cell:
return True
return False
def merra2_idx2(lat, lon, latmg, lonmg):
dlat, dlon = 0.25, 0.3125
lat1, lat2 = lat - dlat, lat + dlat
lon1, lon2 = lon - dlon, lon + dlon
lonmask = (lonmg >= lon1) & (lonmg <= lon2)
if lon2 > 180:
lonmask |= (lonmg <= lon2 + dlon - 360)
mask = lonmask & (latmg >= lat1) & (latmg <= lat2)
iidx = np.arange(latmg.size).reshape(latmg.shape)
for i in iidx[mask]:
if isin_merra_cell(lat, lon, latmg.flat[i], lonmg.flat[i]):
return i
return np.nan
fs = s3fs.S3FileSystem(anon=True)
sats = [16, 17]
domains = ['C', 'F']
for sat in sats:
for domain in domains:
f = fs.open(fs.ls(f'noaa-goes{sat}/ABI-L2-DSI{domain}/2020/001/01')[0])
ds = xr.open_dataset(f)
h = ds.goes_imager_projection.perspective_point_height[0]
lon_0 = ds.goes_imager_projection.longitude_of_projection_origin[0]
sweep = ds.goes_imager_projection.sweep_angle_axis
p = Proj(proj='geos', h=h, lon_0=lon_0, sweep=sweep)
x, y = np.meshgrid(h*ds.x, h*ds.y)
lon, lat = p(x, y, inverse=True)
lon[lon == 1e30] = np.nan
lat[lat == 1e30] = np.nan
ds = ds.assign_coords(lat=(('y', 'x'), lat), lon=(('y', 'x'), lon))
npts = ds.x.size * ds.y.size
latm = np.arange(-90, 90.5, 0.5)
lonm = np.arange(-180, 180, 0.625)
lonmg, latmg = np.meshgrid(lonm, latm)
m2i = [merra2_idx2(ds.lat.values.flat[i], ds.lon.values.flat[i], latmg, lonmg) for i in range(npts)]
groups = {}
for i, v in enumerate(m2i):
if np.isnan(v):
continue
v = int(v)
if v not in groups:
groups[v] = []
groups[v].append(i)
group_idx = np.asarray(list(groups.keys())).astype(int)
pixel_count = np.zeros((latmg.size), dtype=int)
pixel_count[group_idx] = np.asarray([len(g) for g in groups.values()])
merra_grid = np.zeros((pixel_count.max(), latmg.size))
for i, g in groups.items():
merra_grid[:len(g), int(i)] = g
space = pd.MultiIndex.from_product([latm, lonm], names=['lat', 'lon'])
idx = xr.Dataset(coords=dict(space=space))
idx['pixel_index'] = ('pix', 'space'), merra_grid.astype(int)
idx['pixel_count'] = ('space'), pixel_count
idx.unstack('space').to_netcdf(f'idx_{sat}_{domain}.nc')
print(f'Saved: idx_{sat}_{domain}.nc')
|
python
|
"""Sets (expansions) information
"""
import datetime
from typing import Hashable
from . import utils
class Set(utils.i18nMixin, utils.NamedMixin):
"""A class representing a V:tES Set (expansion)."""
def __init__(self, **kwargs):
super().__init__()
self.id = 0
self.abbrev = kwargs.get("abbrev", None)
self.release_date = kwargs.get("release_date", None)
self.name = kwargs.get("name", None)
self.company = kwargs.get("abbrev", None)
def from_vekn(self, data: dict):
"""Load info from VEKN CSV dict."""
self.id = int(data["Id"])
self.abbrev = data["Abbrev"]
self.release_date = (
datetime.datetime.strptime(data["Release Date"], "%Y%m%d")
.date()
.isoformat()
)
self.name = data["Full Name"]
self.company = data["Company"]
class SetMap(dict):
"""A dict of all sets, index by Abbreviation and English name."""
PROMOS = {
"Promo-20210709": ["2021 Kickstarter Promo", "2021-07-09"],
"Promo-20210701": ["2021 Kickstarter Promo", "2021-07-01"],
"Promo-20210331": ["2021 Mind’s Eye Theatre Promo", "2021-03-31"],
"Promo-20210310": ["2021 Resellers Promo", "2021-03-31"],
"Promo-20191123": ["2020 GP Promo", "2020-11-23"],
"Promo-20201030": ["V5 Polish Edition promo", "2020-10-30"],
"Promo-20201123": ["2020 GP Promo", "2020-11-23"],
"Promo-20200511": ["2020 Promo Pack 2", "2020-05-11"],
"Promo-20191027": ["2019 ACC Promo", "2019-10-27"],
"Promo-20191005": ["2019 AC Promo", "2019-10-05"],
"Promo-20190818": ["2019 EC Promo", "2019-08-18"],
"Promo-20190816": ["2019 DriveThruCards Promo", "2019-08-16"],
"Promo-20190614": ["2019 Promo", "2019-06-14"],
"Promo-20190601": ["2019 SAC Promo", "2019-06-01"],
"Promo-20190615": ["2019 NAC Promo", "2019-06-15"],
"Promo-20190629": ["2019 Grand Prix Promo", "2019-06-15"],
"Promo-20190408": ["2019 Promo Pack 1", "2019-04-08"],
"Promo-20181004": ["2018 Humble Bundle", "2018-10-04"],
"Promo-20150219": ["2015 Storyline Rewards", "2015-02-19"],
"Promo-20150221": ["2015 Storyline Rewards", "2015-02-21"],
"Promo-20150215": ["2015 Storyline Rewards", "2015-02-15"],
"Promo-20150214": ["2015 Storyline Rewards", "2015-02-14"],
"Promo-20150211": ["2015 Storyline Rewards", "2015-02-11"],
"Promo-20150216": ["2015 Storyline Rewards", "2015-02-16"],
"Promo-20150220": ["2015 Storyline Rewards", "2015-02-20"],
"Promo-20150218": ["2015 Storyline Rewards", "2015-02-18"],
"Promo-20150217": ["2015 Storyline Rewards", "2015-02-17"],
"Promo-20150213": ["2015 Storyline Rewards", "2015-02-13"],
"Promo-20150212": ["2015 Storyline Rewards", "2015-02-12"],
"Promo-20100510": ["2010 Storyline promo", "2010-05-10"],
"Promo-20090929": ["2009 Tournament / Storyline promo", "2009-09-29"],
"Promo-20090401": ["2009 Tournament / Storyline promo", "2009-04-01"],
"Promo-20081119": ["2008 Tournament promo", "2008-11-19"],
"Promo-20081023": ["2008 Tournament promo", "2008-10-23"],
"Promo-20080810": ["2008 Storyline promo", "2008-08-10"],
"Promo-20080203": ["2008 Tournament promo", "2008-08-10"],
"Promo-20070601": ["2007 Promo", "2007-06-01"],
"Promo-20070101": ["Sword of Caine promo", "2007-01-01"],
"Promo-20061126": ["2006 EC Tournament promo", "2006-11-26"],
"Promo-20061101": ["2006 Storyline promo", "2006-11-01"],
"Promo-20061026": ["2006 Tournament promo", "2006-10-26"],
"Promo-20060902": ["2006 Tournament promo", "2006-09-02"],
"Promo-20060710": ["Third Edition promo", "2006-07-10"],
"Promo-20060417": ["2006 Championship promo", "2006-04-17"],
"Promo-20060213": ["2006 Tournament promo", "2006-02-13"],
"Promo-20060123": ["2006 Tournament promo", "2006-01-23"],
"Promo-20051026": ["Legacies of Blood promo", "2005-10-26"],
"Promo-20051001": ["2005 Storyline promo", "2005-10-01"],
"Promo-20050914": ["Legacies of Blood promo", "2005-09-14"],
"Promo-20050611": ["2005 Tournament promo", "2005-06-11"],
"Promo-20050122": ["2005 Tournament promo", "2005-01-22"],
"Promo-20050115": ["Kindred Most Wanted promo", "2005-01-15"],
"Promo-20041015": ["Fall 2004 Storyline promo", "2004-10-15"],
"Promo-20040411": ["Gehenna promo", "2004-04-11"],
"Promo-20040409": ["2004 promo", "2004-04-09"],
"Promo-20040301": ["Prophecies league promo", "2004-03-01"],
"Promo-20031105": ["Black Hand promo", "2003-11-05"],
"Promo-20030901": ["Summer 2003 Storyline promo", "2003-09-01"],
"Promo-20030307": ["Anarchs promo", "2003-03-07"],
"Promo-20021201": ["2003 Tournament promo", "2002-12-01"],
"Promo-20021101": ["Fall 2002 Storyline promo", "2002-11-01"],
"Promo-20020811": ["Sabbat War promo", "2002-08-11"],
"Promo-20020704": ["Camarilla Edition promo", "2002-07-04"],
"Promo-20020201": ["Winter 2002 Storyline promo", "2002-02-01"],
"Promo-20011201": ["Bloodlines promo", "2001-12-01"],
"Promo-20010428": ["Final Nights promo", "2001-04-28"],
"Promo-20010302": ["Final Nights promo", "2001-03-02"],
"Promo-19960101": ["1996 Promo", "1996-01-01"],
}
def __init__(self):
super().__init__()
self.add(Set(abbrev="POD", name="Print on Demand"))
for abbrev, (name, release_date) in self.PROMOS.items():
self.add(Set(abbrev=abbrev, name=name, release_date=release_date))
def add(self, set_: Set) -> None:
"""Add a set to the map."""
self[set_.abbrev] = set_
self[set_.name] = set_
def i18n_set(self, set_: Set) -> None:
"""Add a translation for a set."""
self[set_.abbrev].i18n_set()
class DefaultSetMap(dict):
"""A default map with no information other than the set abbreviation.
Can be used to enable card information parsing when no set info is available.
"""
def __getitem__(self, k: Hashable) -> Set:
return Set(id=1, abbrev=k, name=k)
#: Use the default set map to parse cards information with no set information available
DEFAULT_SET_MAP = DefaultSetMap()
|
python
|
from SimPEG import *
import simpegEM as EM
from scipy.constants import mu_0
import matplotlib.pyplot as plt
plotIt = False
cs, ncx, ncz, npad = 5., 25, 15, 15
hx = [(cs,ncx), (cs,npad,1.3)]
hz = [(cs,npad,-1.3), (cs,ncz), (cs,npad,1.3)]
mesh = Mesh.CylMesh([hx,1,hz], '00C')
active = mesh.vectorCCz<0.
layer = (mesh.vectorCCz<0.) & (mesh.vectorCCz>=-100.)
actMap = Maps.ActiveCells(mesh, active, np.log(1e-8), nC=mesh.nCz)
mapping = Maps.ExpMap(mesh) * Maps.Vertical1DMap(mesh) * actMap
sig_half = 2e-3
sig_air = 1e-8
sig_layer = 1e-3
sigma = np.ones(mesh.nCz)*sig_air
sigma[active] = sig_half
sigma[layer] = sig_layer
mtrue = np.log(sigma[active])
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (3, 6))
plt.semilogx(sigma[active], mesh.vectorCCz[active])
ax.set_ylim(-600, 0)
ax.set_xlim(1e-4, 1e-2)
ax.set_xlabel('Conductivity (S/m)', fontsize = 14)
ax.set_ylabel('Depth (m)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.show()
rxOffset=1e-3
rx = EM.TDEM.RxTDEM(np.array([[rxOffset, 0., 30]]), np.logspace(-5,-3, 31), 'bz')
src = EM.TDEM.SrcTDEM_VMD_MVP([rx], np.array([0., 0., 80]))
survey = EM.TDEM.SurveyTDEM([src])
prb = EM.TDEM.ProblemTDEM_b(mesh, mapping=mapping)
prb.Solver = SolverLU
prb.timeSteps = [(1e-06, 20),(1e-05, 20), (0.0001, 20)]
prb.pair(survey)
dtrue = survey.dpred(mtrue)
survey.dtrue = dtrue
std = 0.05
noise = std*abs(survey.dtrue)*np.random.randn(*survey.dtrue.shape)
survey.dobs = survey.dtrue+noise
survey.std = survey.dobs*0 + std
survey.Wd = 1/(abs(survey.dobs)*std)
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (10, 6))
ax.loglog(rx.times, dtrue, 'b.-')
ax.loglog(rx.times, survey.dobs, 'r.-')
ax.legend(('Noisefree', '$d^{obs}$'), fontsize = 16)
ax.set_xlabel('Time (s)', fontsize = 14)
ax.set_ylabel('$B_z$ (T)', fontsize = 16)
ax.set_xlabel('Time (s)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.show()
dmisfit = DataMisfit.l2_DataMisfit(survey)
regMesh = Mesh.TensorMesh([mesh.hz[mapping.maps[-1].indActive]])
reg = Regularization.Tikhonov(regMesh)
opt = Optimization.InexactGaussNewton(maxIter = 5)
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
# Create an inversion object
beta = Directives.BetaSchedule(coolingFactor=5, coolingRate=2)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1e0)
inv = Inversion.BaseInversion(invProb, directiveList=[beta,betaest])
m0 = np.log(np.ones(mtrue.size)*sig_half)
reg.alpha_s = 1e-2
reg.alpha_x = 1.
prb.counter = opt.counter = Utils.Counter()
opt.LSshorten = 0.5
opt.remember('xc')
mopt = inv.run(m0)
if plotIt:
fig, ax = plt.subplots(1,1, figsize = (3, 6))
plt.semilogx(sigma[active], mesh.vectorCCz[active])
plt.semilogx(np.exp(mopt), mesh.vectorCCz[active])
ax.set_ylim(-600, 0)
ax.set_xlim(1e-4, 1e-2)
ax.set_xlabel('Conductivity (S/m)', fontsize = 14)
ax.set_ylabel('Depth (m)', fontsize = 14)
ax.grid(color='k', alpha=0.5, linestyle='dashed', linewidth=0.5)
plt.legend(['$\sigma_{true}$', '$\sigma_{pred}$'])
plt.show()
|
python
|
"""
Support for EnOcean sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.enocean/
"""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME, CONF_ID)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components import enocean
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'EnOcean sensor'
DEPENDENCIES = ['enocean']
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up an EnOcean sensor device."""
dev_id = config.get(CONF_ID)
devname = config.get(CONF_NAME)
add_devices([EnOceanSensor(dev_id, devname)])
class EnOceanSensor(enocean.EnOceanDevice, Entity):
"""Representation of an EnOcean sensor device such as a power meter."""
def __init__(self, dev_id, devname):
"""Initialize the EnOcean sensor device."""
enocean.EnOceanDevice.__init__(self)
self.stype = "powersensor"
self.power = None
self.dev_id = dev_id
self.which = -1
self.onoff = -1
self.devname = devname
@property
def name(self):
"""Return the name of the device."""
return 'Power %s' % self.devname
def value_changed(self, value):
"""Update the internal state of the device."""
self.power = value
self.schedule_update_ha_state()
@property
def state(self):
"""Return the state of the device."""
return self.power
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return 'W'
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Yanis Guenane <[email protected]>
# Copyright (c) 2019, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_os_info
short_description: Get information about the Vultr OSes available.
description:
- Get infos about OSes available to boot servers.
author:
- "Yanis Guenane (@Spredzy)"
- "René Moser (@resmo)"
extends_documentation_fragment:
- community.general.vultr
'''
EXAMPLES = r'''
- name: Get Vultr OSes infos
vultr_os_info:
register: results
- name: Print the gathered infos
debug:
var: results.vultr_os_info
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_retry_max_delay:
description: Exponential backoff delay in seconds between retries up to this max delay value.
returned: success
type: int
sample: 12
version_added: '2.9'
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_os_info:
description: Response from Vultr API as list
returned: available
type: complex
contains:
arch:
description: OS Architecture
returned: success
type: str
sample: x64
family:
description: OS family
returned: success
type: str
sample: openbsd
name:
description: OS name
returned: success
type: str
sample: OpenBSD 6 x64
windows:
description: OS is a MS Windows
returned: success
type: bool
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.general.plugins.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrOSInfo(Vultr):
def __init__(self, module):
super(AnsibleVultrOSInfo, self).__init__(module, "vultr_os_info")
self.returns = {
"OSID": dict(key='id', convert_to='int'),
"arch": dict(),
"family": dict(),
"name": dict(),
"windows": dict(convert_to='bool')
}
def get_oses(self):
return self.api_query(path="/v1/os/list")
def parse_oses_list(oses_list):
return [os for id, os in oses_list.items()]
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
os_info = AnsibleVultrOSInfo(module)
result = os_info.get_result(parse_oses_list(os_info.get_oses()))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
python
|
# Ensomniac 2022 Ryan Martin, [email protected]
# Andrew Stet, [email protected]
import os
import sys
class GlobalSpacing:
group: bool
ignore: str
source_code: list
iter_limit_range: range
starts_with_keyword: str
line_break_quantity: int
GetIndentSpaceCount: callable
line_end_keyword_strings: list
def __init__(self):
super().__init__()
def RemoveExtraLinesAtStartOfFile(self):
for _ in self.iter_limit_range:
try:
if not len(self.source_code[0]):
self.source_code.pop(0)
else:
break
except IndexError:
break
def RemoveExtraLinesAtEndOfFile(self):
for _ in self.iter_limit_range:
try:
if not len(self.source_code[-1]) and not len(self.source_code[-2]):
self.source_code.pop()
else:
break
except IndexError:
break
def CheckSpecificSpacing(self, starts_with_keyword, line_break_quantity=1, group=False, ignore=""):
finished = False
for _ in self.iter_limit_range:
if finished:
break
self.starts_with_keyword = starts_with_keyword
self.line_break_quantity = line_break_quantity
self.group = group
self.ignore = ignore
finished = self.fix_specific_spacing()
self.fix_comments_separated_from_top_of_blocks()
def AddNeededLineBreaks(self):
finished = False
for keyword in self.line_end_keyword_strings:
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
if index == len(self.source_code) - 1:
finished = True
break
if not line.strip().endswith(keyword) or not len(self.source_code[index + 1]):
continue
if self.GetIndentSpaceCount(line) != self.GetIndentSpaceCount(self.source_code[index - 1]):
continue
self.source_code.insert(index + 1, "")
def RemoveExtraLinesBetweenStatements(self, exception_strings=[]):
finished = False
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
altered = False
stripped = line.strip()
next_statement_index = None
for exception in exception_strings:
if stripped.startswith(exception):
continue
if not len(stripped):
continue
for num in self.iter_limit_range:
if num < 1:
continue
try:
if len(self.source_code[index + num].strip()):
next_statement_index = index + num
break
except:
break
if not type(next_statement_index) == int:
continue
spaces = (next_statement_index - 1) - index
for _ in self.iter_limit_range:
if spaces > 1:
try:
self.source_code.pop(index + 1)
altered = True
spaces -= 1
except:
break
else:
break
if altered:
break
if index == len(self.source_code) - 1:
finished = True
def fix_comments_separated_from_top_of_blocks(self):
finished = False
for _ in self.iter_limit_range:
if finished:
break
for index, line in enumerate(self.source_code):
if index == len(self.source_code) - 1:
finished = True
break
try:
two_strip = self.source_code[index + 2].strip()
except:
finished = True
break
if line.strip().startswith("#") \
and not self.source_code[index - 1].strip().startswith("#") \
and not self.source_code[index + 1].strip().startswith("#"):
if two_strip.startswith("def ") or two_strip.startswith("class "):
self.source_code.pop(index + 1)
break
elif not len(two_strip):
for num in self.iter_limit_range:
if num <= 2:
continue
try:
next_strip = self.source_code[index + num].strip()
except:
break
if not len(next_strip):
continue
elif next_strip.startswith("def ") or next_strip.startswith("class "):
for n in range(0, num - 1):
self.source_code.pop(index + 1)
break
else:
break
if line.strip().startswith("#"):
next_line_strip = self.source_code[index + 1].strip()
prev1_line_strip = self.source_code[index - 1].strip()
prev2_line_strip = self.source_code[index - 2].strip()
if (next_line_strip.startswith("class ") or (next_line_strip.startswith("def ") and self.GetIndentSpaceCount(line) == 0)) and (len(prev1_line_strip) or len(prev2_line_strip)):
self.source_code.insert(index, "")
break
def fix_specific_spacing(self):
last_index_before_line_breaks = 0
altered = False
occurrence = 0
indented_keyword = ""
for index, line in enumerate(self.source_code):
indent = self.GetIndentSpaceCount(line)
if self.starts_with_keyword == "def ":
if indent == 0:
self.line_break_quantity = 2
else:
self.line_break_quantity = 1
if line.startswith(" ") and self.starts_with_keyword in line:
indented_keyword = f"{indent * ' '}{self.starts_with_keyword}"
if line.startswith(self.starts_with_keyword) or \
(len(indented_keyword) and line.startswith(indented_keyword)):
if len(self.ignore) and self.ignore in line:
continue
line_break_count = index - (last_index_before_line_breaks + 1)
occurrence += 1
if occurrence > 1 and self.group:
self.line_break_quantity = 0
if line_break_count == self.line_break_quantity:
last_index_before_line_breaks = index
continue
for _ in self.iter_limit_range:
if line_break_count != self.line_break_quantity:
if line_break_count > self.line_break_quantity:
self.source_code.pop(index - 1)
index -= 1
line_break_count -= 1
altered = True
elif line_break_count < self.line_break_quantity:
self.source_code.insert(index, "")
index += 1
line_break_count += 1
altered = True
else:
if altered:
return False
break
elif len(line):
last_index_before_line_breaks = index
if index == len(self.source_code) - 1:
return True
|
python
|
import cvxpy as cp
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_X_y
from sklearn.metrics.pairwise import euclidean_distances
from wdwd.utils import pm1
from wdwd.linear_model import LinearClassifierMixin
class DWD(BaseEstimator, LinearClassifierMixin):
def __init__(self, C=1.0, solver_kws={}):
self.C = C
self.solver_kws = solver_kws
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
sample_weight : array-like, shape = [n_samples], optional
Array of weights that are assigned to individual
samples. If not provided,
then each sample is given unit weight.
Returns
-------
self : object
"""
# TODO: what to do about multi-class
self.classes_ = np.unique(y)
if self.C == 'auto':
self.C = auto_dwd_C(X, y)
# fit DWD
self.coef_, self.intercept_, self.eta_, self.d_, self.problem_ = \
solve_dwd_socp(X, y, C=self.C,
sample_weight=sample_weight,
solver_kws=self.solver_kws)
self.coef_ = self.coef_.reshape(1, -1)
self.intercept_ = self.intercept_.reshape(-1)
return self
def solve_dwd_socp(X, y, C=1.0, sample_weight=None, solver_kws={}):
"""
Solves distance weighted discrimination optimization problem.
Solves problem (2.7) from https://arxiv.org/pdf/1508.05913.pdf
Parameters
----------
X: (n_samples, n_features)
y: (n_samples, )
C: float
Strictly positive tuning parameter.
sample_weight: None, (n_samples, )
Weights for samples.
solver_kws: dict
Keyword arguments to cp.solve
Returns
------
beta: (n_features, )
DWD normal vector.
intercept: float
DWD intercept.
eta, d: float
Optimization variables.
problem: cp.Problem
"""
if C < 0:
raise ValueError("Penalty term must be positive; got (C={})".format(C))
# TODO: add sample weights
if sample_weight is not None:
raise NotImplementedError
X, y = check_X_y(X, y,
accept_sparse='csr',
dtype='numeric')
# convert y to +/- 1
y = pm1(y)
n_samples, n_features = X.shape
# problem data
X = cp.Parameter(shape=X.shape, value=X)
y = cp.Parameter(shape=y.shape, value=y)
C = cp.Parameter(value=C, nonneg=True)
# optimization variables
beta = cp.Variable(shape=n_features)
intercept = cp.Variable()
eta = cp.Variable(shape=n_samples, nonneg=True)
rho = cp.Variable(shape=n_samples)
sigma = cp.Variable(shape=n_samples)
# objective funtion
# TODO: check this is correct way to do sample weighting
if sample_weight is None:
v = np.ones(n_samples)
else:
v = np.array(sample_weight).reshape(-1)
assert len(v) == n_samples
objective = v.T @ (rho + sigma + C * eta)
# setup constraints
# TODO: do we need explicit SOCP constraints?
Y_tilde = cp.diag(y) # TODO: make sparse
constraints = [rho - sigma == Y_tilde @ X @ beta + intercept * y + eta,
cp.SOC(cp.Parameter(value=1), beta)] # ||beta||_2^2 <= 1
# rho^2 - sigma^2 >= 1
constraints.extend([cp.SOC(rho[i], cp.vstack([sigma[i], 1]))
for i in range(n_samples)])
# solve problem
problem = cp.Problem(cp.Minimize(objective),
constraints=constraints)
problem.solve(**solver_kws)
# d = rho - sigma
# rho = (1/d + d), sigma = (1/d - d)/2
d = rho.value - sigma.value
return beta.value, intercept.value, eta.value, d, problem
def auto_dwd_C(X, y, const=100):
"""
Automatic choice of C from Distance-Weighted Discrimination by Marron et al, 2007. Note this only is for the SOCP formulation of DWD.
C = 100 / d ** 2
Where d is the median distance between points in either class.
Parameters
----------
X: array-like, (n_samples, n_features)
The input data.
y: array-like, (n_samples, )
The vector of binary class labels.
const: float
The constanted used to determine C. Originally suggested to be 100.
"""
labels = np.unique(y)
assert len(labels) == 2
# pariwise distances between points in each class
D = euclidean_distances(X[y == labels[0], :],
X[y == labels[1], :])
d = np.median(D.ravel())
return const / d ** 2
def dwd_obj(X, y, C, beta, offset, eta):
"""
Objective function for DWD.
"""
d = y * (X.dot(beta) + offset) + eta
return sum(1.0 / d) + C * sum(eta)
|
python
|
from psyrun import Param
pspace = Param()
python = 'true'
def execute():
return {}
|
python
|
import numpy as np
def PlotPlanetXZ(fig,R=1.0,Center=[0.0,0.0,0.0],zorder=10,NoBlack=False,NoonTop=True):
a = 2*np.pi*np.arange(361,dtype='float32')/360
x = R*np.sin(a) + Center[0]
z = R*np.cos(a) + Center[2]
if NoonTop:
fig.fill(z,x,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(z,x,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(z[180:360],x[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
else:
fig.fill(x,z,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(x,z,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(x[180:360],z[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
def PlotPlanetXY(fig,R=1.0,Center=[0.0,0.0,0.0],zorder=10,NoBlack=False,NoonTop=True):
a = 2*np.pi*np.arange(361,dtype='float32')/360
x = R*np.sin(a) + Center[0]
y = R*np.cos(a) + Center[1]
if NoonTop:
fig.fill(y,x,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(y,x,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(y[180:360],x[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
else:
fig.fill(x,y,color=[1.0,1.0,1.0],zorder=zorder)
fig.plot(x,y,color=[0,0,0],zorder=zorder+1)
if NoBlack == False:
fig.fill(x[180:360],y[180:360],color=[0.0,0.0,0.0],zorder=zorder+1)
def PlotPlanetYZ(fig,R=1.0,Center=[0.0,0.0,0.0],Side='day',zorder=10,NoFill=False,Color=[0.0,0.0,0.0],linestyle='-'):
a = 2*np.pi*np.arange(361,dtype='float32')/360
y = R*np.sin(a) + Center[1]
z = R*np.cos(a) + Center[2]
if NoFill == False:
if Side == 'day':
fig.fill(y,z,color=[1.0,1.0,1.0],zorder=zorder)
else:
fig.fill(y,z,color=[0.0,0.0,0.0],zorder=zorder)
fig.plot(y,z,color=Color,zorder=zorder+1,linestyle='-')
def PlotPlanetCoreXZ(ax,R=1.0,Center=[0.0,0.0,0.0],Colors=([1.0,0.7,0.0,0.5],[1.0,0.2,0.0,0.5],[0.5,0.5,0.5,0.5]),Layers=(0.0,0.832,1.0),zorder=1.0,NoFill=False,linestyle='-',linewidth=2.0):
'''
Plots the different layers of hte planet
'''
a = 2*np.pi*np.arange(361,dtype='float32')/360
nl = len(Layers)
if NoFill:
for i in range(0,nl):
x = Layers[i]*R*np.sin(a) + Center[0]
z = Layers[i]*R*np.cos(a) + Center[2]
ax.plot(x,z,color=Colors[i],zorder=zorder,linestyle=linestyle,linewidth=linewidth)
else:
for i in range(0,nl-1):
l0 = Layers[i]
l1 = Layers[i+1]
x0 = l0*R*np.sin(a) + Center[0]
z0 = l0*R*np.cos(a) + Center[2]
x1 = l1*R*np.sin(a) + Center[0]
z1 = l1*R*np.cos(a) + Center[2]
if l0 == 0.0:
ax.fill(x1,z1,color=Colors[i],zorder=zorder,linewidth=0.0)
else:
x = np.append(x0,x1[::-1])
z = np.append(z0,z1[::-1])
ax.fill(x,z,color=Colors[i],zorder=zorder,linewidth=0.0)
x = R*np.sin(a) + Center[0]
z = R*np.cos(a) + Center[2]
ax.plot(x,z,color=Colors[-1],linestyle=linestyle,linewidth=linewidth,zorder=zorder)
|
python
|
def text(result):
output = [
f"active rolls: {result['active_rolls']}",
f"cycles: {result['cycles']}",
"",
]
for key in ["bakes", "endorsements", "total"]:
output.append(key)
col1 = "mean"
col2 = "max"
header = " " * 10 + f"{col1:>10} {col2:>10}"
output.append("-" * len(header))
output.append(header)
d = result[key]
for key2 in ["count", "deposits", "rewards"]:
mean = d["mean"][key2]
max = d["max"][key2]
output.append(f"{key2:>8}: {mean:10.2f} {max:10.2f}")
output.append("\n")
return "\n".join(output)
|
python
|
from app.runner.setup import setup
app = setup()
|
python
|
import os
from datetime import datetime, timezone
from typing import Dict
import click
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
class Config:
@property
def repository(self) -> str:
return os.getenv("GITHUB_REPOSITORY")
@property
def actor(self) -> str:
return os.getenv("GITHUB_ACTOR")
@property
def run_id(self) -> str:
return os.getenv("GITHUB_RUN_ID")
@property
def workflow(self) -> str:
return os.getenv("GITHUB_WORKFLOW")
@property
def job_id(self) -> str:
return os.getenv("GITHUB_JOB")
@property
def ref(self) -> str:
return os.getenv("GITHUB_REF", "")
@property
def branch(self) -> str:
try:
return self.ref.split("/", 2)[2]
except IndexError:
return "unknown branch"
config = Config()
def build_status_block(
job_status: str, actor: str, flow: str, branch: str, run_id: str, repository: str
) -> Dict:
if job_status.lower() == "success":
message = ":white_check_mark: *Success*"
elif job_status.lower() == "cancelled":
message = ":large_blue_circle: *Cancelled*"
else:
message = ":x: *Failed*"
message = (
message
+ f" *{repository}* <https://github.com/{repository}/actions/runs/{run_id}|View Job>\n"
+ f"[ {flow} ] [ {branch} ]\n"
)
message = message + f"Triggered by {actor}"
return {"type": "section", "text": {"type": "mrkdwn", "text": message}}
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.option("--token", required=True, help="Slack token")
@click.option("--channel", required=True, help="Channel id")
@click.option("--job-status", required=True, help="Job status")
def send_to_slack(token: str, channel: str, job_status: str):
client = WebClient(token=token)
blocks = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": datetime.now(tz=timezone.utc).strftime(
"%a %b %d %Y %H:%M:%S %Z"
),
},
},
build_status_block(
job_status,
config.actor,
config.job_id,
config.branch,
config.run_id,
config.repository,
),
]
try:
result = client.chat_postMessage(
channel=channel, text="Some text", blocks=blocks
)
# Print result, which includes information about the message (like TS)
print(result)
except SlackApiError as e:
print(f"Error: {e}")
if __name__ == "__main__":
send_to_slack()
|
python
|
import peewee
db = peewee.SqliteDatabase('./apidocs/api_doc.db')
class ApiDoc(peewee.Model):
title = peewee.CharField(default='')
url = peewee.CharField()
method = peewee.CharField()
description = peewee.CharField(default='')
class Meta:
database = db
|
python
|
# -*- coding:utf-8 -*-
import peewee
from torcms.core import tools
from torcms.model.core_tab import TabPost
from torcms.model.core_tab import TabRel
from torcms.model.core_tab import TabPost2Tag
from torcms.model.post2catalog_model import MPost2Catalog as MInfor2Catalog
from torcms.model.abc_model import Mabc
class MRelation(Mabc):
# def __init__(self):
# super(MRelation, self).__init__()
@staticmethod
def add_relation(app_f, app_t, weight=1):
recs = TabRel.select().where(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
if recs.count() > 1:
for record in recs:
MRelation.delete(record.uid)
if recs.count() == 0:
uid = tools.get_uuid()
entry = TabRel.create(
uid=uid,
post_f_id=app_f,
post_t_id=app_t,
count=1,
)
return entry.uid
elif recs.count() == 1:
MRelation.update_relation(app_f, app_t, weight)
else:
return False
@staticmethod
def delete(uid):
entry = TabRel.delete().where(
TabRel.uid == uid
)
entry.execute()
@staticmethod
def update_relation(app_f, app_t, weight=1):
try:
postinfo = TabRel.get(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
except:
return False
entry = TabRel.update(
count=postinfo.count + weight
).where(
(TabRel.post_f_id == app_f) &
(TabRel.post_t_id == app_t)
)
entry.execute()
@staticmethod
def get_app_relations(app_id, num=20, kind='1'):
'''
The the related infors.
'''
info_tag = MInfor2Catalog.get_first_category(app_id)
if info_tag:
return TabPost2Tag.select(
TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid')
).join(
TabPost, on=(TabPost2Tag.post_id == TabPost.uid)
).where(
(TabPost2Tag.tag_id == info_tag.tag_id) &
(TabPost.kind == kind)
).order_by(
peewee.fn.Random()
).limit(num)
else:
return TabPost2Tag.select(
TabPost2Tag, TabPost.title.alias('post_title'), TabPost.valid.alias('post_valid')
).join(TabPost, on=(TabPost2Tag.post_id == TabPost.uid)).where(
TabPost.kind == kind
).order_by(peewee.fn.Random()).limit(num)
|
python
|
#!/usr/bin/env python3
import torch
from ..distributions import MultivariateNormal
from ..lazy import InterpolatedLazyTensor
from ..utils.broadcasting import _mul_broadcast_shape
from ..utils.interpolation import Interpolation, left_interp
from ..utils.memoize import cached
from ._variational_strategy import _VariationalStrategy
class GridInterpolationVariationalStrategy(_VariationalStrategy):
"""
This strategy constrains the inducing points to a grid and applies a deterministic
relationship between :math:`\mathbf f` and :math:`\mathbf u`.
It was introduced by `Wilson et al. (2016)`_.
Here, the inducing points are not learned. Instead, the strategy
automatically creates inducing points based on a set of grid sizes and grid
bounds.
.. _Wilson et al. (2016):
https://arxiv.org/abs/1611.00336
:param ~gpytorch.models.ApproximateGP model: Model this strategy is applied to.
Typically passed in when the VariationalStrategy is created in the
__init__ method of the user defined model.
:param int grid_size: Size of the grid
:param list grid_bounds: Bounds of each dimension of the grid (should be a list of (float, float) tuples)
:param ~gpytorch.variational.VariationalDistribution variational_distribution: A
VariationalDistribution object that represents the form of the variational distribution :math:`q(\mathbf u)`
"""
def __init__(self, model, grid_size, grid_bounds, variational_distribution):
grid = torch.zeros(grid_size, len(grid_bounds))
for i in range(len(grid_bounds)):
grid_diff = float(grid_bounds[i][1] - grid_bounds[i][0]) / (grid_size - 2)
grid[:, i] = torch.linspace(grid_bounds[i][0] - grid_diff, grid_bounds[i][1] + grid_diff, grid_size)
inducing_points = torch.zeros(int(pow(grid_size, len(grid_bounds))), len(grid_bounds))
prev_points = None
for i in range(len(grid_bounds)):
for j in range(grid_size):
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, i].fill_(grid[j, i])
if prev_points is not None:
inducing_points[j * grid_size ** i : (j + 1) * grid_size ** i, :i].copy_(prev_points)
prev_points = inducing_points[: grid_size ** (i + 1), : (i + 1)]
super(GridInterpolationVariationalStrategy, self).__init__(
model, inducing_points, variational_distribution, learn_inducing_locations=False
)
object.__setattr__(self, "model", model)
self.register_buffer("grid", grid)
def _compute_grid(self, inputs):
n_data, n_dimensions = inputs.size(-2), inputs.size(-1)
batch_shape = inputs.shape[:-2]
inputs = inputs.reshape(-1, n_dimensions)
interp_indices, interp_values = Interpolation().interpolate(self.grid, inputs)
interp_indices = interp_indices.view(*batch_shape, n_data, -1)
interp_values = interp_values.view(*batch_shape, n_data, -1)
if (interp_indices.dim() - 2) != len(self._variational_distribution.batch_shape):
batch_shape = _mul_broadcast_shape(interp_indices.shape[:-2], self._variational_distribution.batch_shape)
interp_indices = interp_indices.expand(*batch_shape, *interp_indices.shape[-2:])
interp_values = interp_values.expand(*batch_shape, *interp_values.shape[-2:])
return interp_indices, interp_values
@property
@cached(name="prior_distribution_memo")
def prior_distribution(self):
out = self.model.forward(self.inducing_points)
res = MultivariateNormal(out.mean, out.lazy_covariance_matrix.add_jitter())
return res
def forward(self, x, inducing_points, inducing_values, variational_inducing_covar=None):
if variational_inducing_covar is None:
raise RuntimeError(
"GridInterpolationVariationalStrategy is only compatible with Gaussian variational "
f"distributions. Got ({self.variational_distribution.__class__.__name__}."
)
variational_distribution = self.variational_distribution
# Get interpolations
interp_indices, interp_values = self._compute_grid(x)
# Compute test mean
# Left multiply samples by interpolation matrix
predictive_mean = left_interp(interp_indices, interp_values, inducing_values.unsqueeze(-1))
predictive_mean = predictive_mean.squeeze(-1)
# Compute test covar
predictive_covar = InterpolatedLazyTensor(
variational_distribution.lazy_covariance_matrix,
interp_indices,
interp_values,
interp_indices,
interp_values,
)
output = MultivariateNormal(predictive_mean, predictive_covar)
return output
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.