content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python3
import os
import ssl
from base64 import b64decode, b64encode
from hashlib import sha256
from time import time
from urllib import parse
from hmac import HMAC
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
from paho.mqtt import client as mqtt
def generate_device_sas_token(uri, device, key, policy_name, expiry=3600):
ttl = time() + expiry
sign_key = '%s\n%d' % ((parse.quote_plus(uri+'%2Fdevices%2F'+device_id)), int(ttl))
print(sign_key)
signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
raw_token = {
'sr': uri + '%2Fdevices%2F' + device,
'sig': signature,
'se': str(int(ttl))
}
if policy_name is not None:
raw_token['skn'] = policy_name
return 'SharedAccessSignature ' + parse.urlencode(raw_token)
kv_name = 'kvDeepNet'
kv_uri = f'https://{kv_name}.vault.azure.net'
credentials = DefaultAzureCredential()
secret_client = SecretClient(vault_url=kv_uri, credential=credentials)
device_id = "simDevice"
key = secret_client.get_secret(f'{device_id}PK')
iot_hub_name = "rtpos"
sas_token = generate_device_sas_token(iot_hub_name + '.azure-devices.net',device_id, key.value, None)
def on_connect(client, userdata, flags, rc):
print("Device connected with result code: " + str(rc))
def on_disconnect(client, userdata, rc):
print("Device disconnected with result code: " + str(rc))
def on_publish(client, userdata, mid):
print("Device sent message")
client = mqtt.Client(client_id=device_id, protocol=mqtt.MQTTv311, clean_session=0)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_publish = on_publish
client.username_pw_set(
username=iot_hub_name
+ ".azure-devices.net/"
+ device_id
+ "/?api-version=2018-06-30",
password=sas_token,
)
client.tls_set(
certfile=None,
keyfile=None,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=ssl.PROTOCOL_TLSv1_2,
ciphers=None,
)
client.tls_insecure_set(False)
client.reconnect_delay_set(5)
client.connect(iot_hub_name + ".azure-devices.net", port=8883)
message = 'ÆØÅ~#%2Fñ'
client.publish("devices/" + device_id + "/messages/events/", bytes(message, 'utf-8'), qos=1)
client.loop_forever()
|
python
|
objConstructors = {'flow_emap.get_key' : {'constructor' : 'FlowIdc',
'type' : 'FlowIdi',
'fields' : ['sp', 'dp', 'sip',
'dip', 'prot']}}
typeConstructors = {'FlowIdc' : 'FlowIdi',
'emap' : 'emap<FlowIdi>'}
stateObjects = {'flow_emap' : emap,
'int_devices' : vector}
|
python
|
from django.conf.urls import patterns, include, url
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView, RedirectView
from django.core.urlresolvers import reverse_lazy as reverse
from .views import reroute
home = TemplateView.as_view(template_name='home.html')
#: IHM URL: language sensitive patterns.
ihm_patterns = patterns(
'',
url(_(r'^home/'), home, name='home'),
)
urlpatterns = patterns(
'',
url(r'', include('i18nurl.urls')),
url(r'^$', reroute),
url(_(r'^en/'), include(ihm_patterns)),
)
|
python
|
from math import ceil
from utils import hrsize
from json import dumps
from . import Sample
class SampleSet:
round = 5
def __init__(self, samples) -> None:
if not (isinstance(samples, list) and all(isinstance(sample, Sample) for sample in samples)):
raise Exception("samples parameter invalid.")
self.samples = samples
def _average(self, samples, rnd=None):
if rnd:
return round(sum(samples) / len(samples), rnd)
return sum(samples) / len(samples)
def _max(self, samples, rnd=None):
if rnd:
return round(max(samples), rnd)
return max(samples)
def _percentile(self, samples, perecntile, rnd=0):
# check borders
perecntile = 1 if perecntile < 1 else perecntile
perecntile = 99 if perecntile > 99 else perecntile
samples.sort()
index = ((perecntile / 100.0) * len(samples))
res = -1
if index % 1 == 0:
# index not floating point
index = int(index) # index must be strictly int
res = 1.0/2 * (samples[index-1] + samples[index])
else:
index = ceil(index)
res = samples[index-1]
return round(res, rnd)
def __str__(self) -> str:
data = self.export()
return (
f"CPU (max/avg/80%) {data['cpu']['max'] * 100.0}% / {data['cpu']['average'] * 100.0}% / {data['cpu']['80%'] * 100.0}% \n"
f"MEM (max/avg/80%) {hrsize(data['memory']['max'])} / {hrsize(data['memory']['average'])} / {hrsize(data['memory']['80%'])}"
)
def export(self):
cpu_samples = [sample.cpu for sample in self.samples]
mem_samples = [sample.memory for sample in self.samples]
time_samples = [sample.timestamp for sample in self.samples]
return {
"timestamp" : {
"duration" : time_samples[-1] - time_samples[0] if len(time_samples) >= 2 else 0,
"delta" : time_samples[1] - time_samples[0] if len(time_samples) >= 2 else 0, # adjust for dynamic approach
"all" : time_samples
},
"cpu" : {
"max" : self._max(cpu_samples, self.round),
"80%" : self._percentile(cpu_samples, 80, self.round),
"average" : self._average(cpu_samples, self.round),
"samples" : cpu_samples,
},
"memory" : {
"max" : self._max(mem_samples, self.round),
"80%" : ceil(self._percentile(mem_samples, 80)),
"average" : ceil(self._average(mem_samples)),
"samples" : mem_samples,
},
}
def json(self):
return dumps(self.export(), separators=(',', ':'))
|
python
|
import pytest
from django.core.exceptions import PermissionDenied
from datahub.admin_report.report import get_report_by_id, get_reports_by_model
from datahub.core.test_utils import create_test_user
pytestmark = pytest.mark.django_db
@pytest.mark.parametrize(
'permission_codenames,expected_result',
(
((), {}),
(('change_metadatamodel',), {'MetadataModel': ['MetadataReport']}),
),
)
def test_get_reports_by_model(permission_codenames, expected_result):
"""Test get_reports_by_model() for various cases."""
user = create_test_user(permission_codenames=permission_codenames)
result = get_reports_by_model(user)
assert {
model.__name__: [report.__class__.__name__ for report in reports]
for model, reports in result.items()
} == expected_result
@pytest.mark.parametrize(
'permission_codenames,should_raise',
(
((), True),
(('change_metadatamodel',), False),
),
)
def test_get_report_by_id(permission_codenames, should_raise):
"""Test get_report_by_id() for various cases."""
user = create_test_user(permission_codenames=permission_codenames)
if should_raise:
with pytest.raises(PermissionDenied):
get_report_by_id('test-report', user)
else:
assert get_report_by_id('test-report', user)
|
python
|
def factorial(n):
if n < 1:
return 1
else:
return n * factorial(n-1)
end
end
print factorial(5) # should output 120
|
python
|
"""
@author: acfromspace
"""
# Make a function aardvark that, given a string, returns 'aardvark'
# if the string starts with an a. Otherwise, return 'zebra'.
#
# >>>> aardvark("arg")
# aardvark
# >>>> aardvark("Trinket")
# zebra
def aardvark(string):
# Add code here that returns the answer.
if string[0] == "a" or string[0] == "A":
return "aardvark"
else:
return "zebra"
# Add print statements here to test what your code does:
print(aardvark("arg"))
print(aardvark("Trinket"))
|
python
|
import datetime
from django.test import TestCase
from trojsten.contests.models import Competition, Round, Semester, Task
from trojsten.people.models import User, UserProperty, UserPropertyKey
from .model_sanitizers import (
GeneratorFieldSanitizer,
TaskSanitizer,
UserPropertySanitizer,
UserSanitizer,
)
class GeneratorFieldSanitizerTest(TestCase):
def test_data_replaced_by_generated_data(self):
def fake_generator():
return "generated_data"
sanitized_data = GeneratorFieldSanitizer(fake_generator).sanitize("original_data")
self.assertEquals(sanitized_data, "generated_data")
class TaskSanitizerTest(TestCase):
def test_task_data_sanitized(self):
c = Competition.objects.create(name="ABCD")
s = Semester.objects.create(year=47, competition=c, number=1)
r = Round.objects.create(number=3, semester=s, visible=True, solutions_visible=True)
Task.objects.create(number=2, name="foo", round=r)
TaskSanitizer().sanitize()
sanitized_task = Task.objects.get()
self.assertNotEquals(sanitized_task.name, "foo")
class UserSanitizerTest(TestCase):
def test_user_data_sanitized(self):
User.objects.create(
username="foo",
password="pwd",
first_name="Ferko",
last_name="Mrkvicka",
birth_date=datetime.date(year=2000, month=1, day=1),
email="[email protected]",
)
UserSanitizer().sanitize()
sanitized_user = User.objects.get()
self.assertNotEquals(sanitized_user.username, "foo")
self.assertEquals(sanitized_user.password, "")
self.assertNotEquals(sanitized_user.first_name, "Ferko")
self.assertNotEquals(sanitized_user.last_name, "Mrkvicka")
self.assertNotEquals(sanitized_user.birth_date, datetime.date(year=2000, month=1, day=1))
self.assertNotEquals(sanitized_user.last_name, "[email protected]")
class UserPropertySanitizerTest(TestCase):
def test_userproperty_data_sanitized(self):
key = UserPropertyKey.objects.create(key_name="foo")
user = User.objects.create(username="user")
UserProperty.objects.create(user=user, key=key, value="bar")
UserPropertySanitizer().sanitize()
sanitized_userproperty = UserProperty.objects.get()
self.assertEquals(sanitized_userproperty.key, key)
self.assertNotEquals(sanitized_userproperty.value, "bar")
self.assertEquals(len(sanitized_userproperty.value), 3)
|
python
|
#!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_voip_profile
short_description: Configure VoIP profiles.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
voip_profile:
description: the top level parameters set
required: false
type: dict
suboptions:
comment:
type: str
description: 'Comment.'
name:
type: str
description: 'Profile name.'
sccp:
description: no description
type: dict
required: false
suboptions:
block-mcast:
type: str
description: 'Enable/disable block multicast RTP connections.'
choices:
- 'disable'
- 'enable'
log-call-summary:
type: str
description: 'Enable/disable log summary of SCCP calls.'
choices:
- 'disable'
- 'enable'
log-violations:
type: str
description: 'Enable/disable logging of SCCP violations.'
choices:
- 'disable'
- 'enable'
max-calls:
type: int
description: 'Maximum calls per minute per SCCP client (max 65535).'
status:
type: str
description: 'Enable/disable SCCP.'
choices:
- 'disable'
- 'enable'
verify-header:
type: str
description: 'Enable/disable verify SCCP header content.'
choices:
- 'disable'
- 'enable'
sip:
description: no description
type: dict
required: false
suboptions:
ack-rate:
type: int
description: 'ACK request rate limit (per second, per policy).'
block-ack:
type: str
description: 'Enable/disable block ACK requests.'
choices:
- 'disable'
- 'enable'
block-bye:
type: str
description: 'Enable/disable block BYE requests.'
choices:
- 'disable'
- 'enable'
block-cancel:
type: str
description: 'Enable/disable block CANCEL requests.'
choices:
- 'disable'
- 'enable'
block-geo-red-options:
type: str
description: 'Enable/disable block OPTIONS requests, but OPTIONS requests still notify for redundancy.'
choices:
- 'disable'
- 'enable'
block-info:
type: str
description: 'Enable/disable block INFO requests.'
choices:
- 'disable'
- 'enable'
block-invite:
type: str
description: 'Enable/disable block INVITE requests.'
choices:
- 'disable'
- 'enable'
block-long-lines:
type: str
description: 'Enable/disable block requests with headers exceeding max-line-length.'
choices:
- 'disable'
- 'enable'
block-message:
type: str
description: 'Enable/disable block MESSAGE requests.'
choices:
- 'disable'
- 'enable'
block-notify:
type: str
description: 'Enable/disable block NOTIFY requests.'
choices:
- 'disable'
- 'enable'
block-options:
type: str
description: 'Enable/disable block OPTIONS requests and no OPTIONS as notifying message for redundancy either.'
choices:
- 'disable'
- 'enable'
block-prack:
type: str
description: 'Enable/disable block prack requests.'
choices:
- 'disable'
- 'enable'
block-publish:
type: str
description: 'Enable/disable block PUBLISH requests.'
choices:
- 'disable'
- 'enable'
block-refer:
type: str
description: 'Enable/disable block REFER requests.'
choices:
- 'disable'
- 'enable'
block-register:
type: str
description: 'Enable/disable block REGISTER requests.'
choices:
- 'disable'
- 'enable'
block-subscribe:
type: str
description: 'Enable/disable block SUBSCRIBE requests.'
choices:
- 'disable'
- 'enable'
block-unknown:
type: str
description: 'Block unrecognized SIP requests (enabled by default).'
choices:
- 'disable'
- 'enable'
block-update:
type: str
description: 'Enable/disable block UPDATE requests.'
choices:
- 'disable'
- 'enable'
bye-rate:
type: int
description: 'BYE request rate limit (per second, per policy).'
call-keepalive:
type: int
description: 'Continue tracking calls with no RTP for this many minutes.'
cancel-rate:
type: int
description: 'CANCEL request rate limit (per second, per policy).'
contact-fixup:
type: str
description: 'Fixup contact anyway even if contacts IP:port doesnt match sessions IP:port.'
choices:
- 'disable'
- 'enable'
hnt-restrict-source-ip:
type: str
description: 'Enable/disable restrict RTP source IP to be the same as SIP source IP when HNT is enabled.'
choices:
- 'disable'
- 'enable'
hosted-nat-traversal:
type: str
description: 'Hosted NAT Traversal (HNT).'
choices:
- 'disable'
- 'enable'
info-rate:
type: int
description: 'INFO request rate limit (per second, per policy).'
invite-rate:
type: int
description: 'INVITE request rate limit (per second, per policy).'
ips-rtp:
type: str
description: 'Enable/disable allow IPS on RTP.'
choices:
- 'disable'
- 'enable'
log-call-summary:
type: str
description: 'Enable/disable logging of SIP call summary.'
choices:
- 'disable'
- 'enable'
log-violations:
type: str
description: 'Enable/disable logging of SIP violations.'
choices:
- 'disable'
- 'enable'
malformed-header-allow:
type: str
description: 'Action for malformed Allow header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-call-id:
type: str
description: 'Action for malformed Call-ID header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-contact:
type: str
description: 'Action for malformed Contact header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-content-length:
type: str
description: 'Action for malformed Content-Length header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-content-type:
type: str
description: 'Action for malformed Content-Type header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-cseq:
type: str
description: 'Action for malformed CSeq header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-expires:
type: str
description: 'Action for malformed Expires header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-from:
type: str
description: 'Action for malformed From header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-max-forwards:
type: str
description: 'Action for malformed Max-Forwards header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-p-asserted-identity:
type: str
description: 'Action for malformed P-Asserted-Identity header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-rack:
type: str
description: 'Action for malformed RAck header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-record-route:
type: str
description: 'Action for malformed Record-Route header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-route:
type: str
description: 'Action for malformed Route header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-rseq:
type: str
description: 'Action for malformed RSeq header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-a:
type: str
description: 'Action for malformed SDP a line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-b:
type: str
description: 'Action for malformed SDP b line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-c:
type: str
description: 'Action for malformed SDP c line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-i:
type: str
description: 'Action for malformed SDP i line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-k:
type: str
description: 'Action for malformed SDP k line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-m:
type: str
description: 'Action for malformed SDP m line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-o:
type: str
description: 'Action for malformed SDP o line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-r:
type: str
description: 'Action for malformed SDP r line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-s:
type: str
description: 'Action for malformed SDP s line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-t:
type: str
description: 'Action for malformed SDP t line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-v:
type: str
description: 'Action for malformed SDP v line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-sdp-z:
type: str
description: 'Action for malformed SDP z line.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-to:
type: str
description: 'Action for malformed To header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-via:
type: str
description: 'Action for malformed VIA header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-request-line:
type: str
description: 'Action for malformed request line.'
choices:
- 'pass'
- 'discard'
- 'respond'
max-body-length:
type: int
description: 'Maximum SIP message body length (0 meaning no limit).'
max-dialogs:
type: int
description: 'Maximum number of concurrent calls/dialogs (per policy).'
max-idle-dialogs:
type: int
description: 'Maximum number established but idle dialogs to retain (per policy).'
max-line-length:
type: int
description: 'Maximum SIP header line length (78-4096).'
message-rate:
type: int
description: 'MESSAGE request rate limit (per second, per policy).'
nat-port-range:
type: str
description: 'RTP NAT port range.'
nat-trace:
type: str
description: 'Enable/disable preservation of original IP in SDP i line.'
choices:
- 'disable'
- 'enable'
no-sdp-fixup:
type: str
description: 'Enable/disable no SDP fix-up.'
choices:
- 'disable'
- 'enable'
notify-rate:
type: int
description: 'NOTIFY request rate limit (per second, per policy).'
open-contact-pinhole:
type: str
description: 'Enable/disable open pinhole for non-REGISTER Contact port.'
choices:
- 'disable'
- 'enable'
open-record-route-pinhole:
type: str
description: 'Enable/disable open pinhole for Record-Route port.'
choices:
- 'disable'
- 'enable'
open-register-pinhole:
type: str
description: 'Enable/disable open pinhole for REGISTER Contact port.'
choices:
- 'disable'
- 'enable'
open-via-pinhole:
type: str
description: 'Enable/disable open pinhole for Via port.'
choices:
- 'disable'
- 'enable'
options-rate:
type: int
description: 'OPTIONS request rate limit (per second, per policy).'
prack-rate:
type: int
description: 'PRACK request rate limit (per second, per policy).'
preserve-override:
type: str
description: 'Override i line to preserve original IPS (default: append).'
choices:
- 'disable'
- 'enable'
provisional-invite-expiry-time:
type: int
description: 'Expiry time for provisional INVITE (10 - 3600 sec).'
publish-rate:
type: int
description: 'PUBLISH request rate limit (per second, per policy).'
refer-rate:
type: int
description: 'REFER request rate limit (per second, per policy).'
register-contact-trace:
type: str
description: 'Enable/disable trace original IP/port within the contact header of REGISTER requests.'
choices:
- 'disable'
- 'enable'
register-rate:
type: int
description: 'REGISTER request rate limit (per second, per policy).'
rfc2543-branch:
type: str
description: 'Enable/disable support via branch compliant with RFC 2543.'
choices:
- 'disable'
- 'enable'
rtp:
type: str
description: 'Enable/disable create pinholes for RTP traffic to traverse firewall.'
choices:
- 'disable'
- 'enable'
ssl-algorithm:
type: str
description: 'Relative strength of encryption algorithms accepted in negotiation.'
choices:
- 'high'
- 'medium'
- 'low'
ssl-auth-client:
type: str
description: 'Require a client certificate and authenticate it with the peer/peergrp.'
ssl-auth-server:
type: str
description: 'Authenticate the servers certificate with the peer/peergrp.'
ssl-client-certificate:
type: str
description: 'Name of Certificate to offer to server if requested.'
ssl-client-renegotiation:
type: str
description: 'Allow/block client renegotiation by server.'
choices:
- 'allow'
- 'deny'
- 'secure'
ssl-max-version:
type: str
description: 'Highest SSL/TLS version to negotiate.'
choices:
- 'ssl-3.0'
- 'tls-1.0'
- 'tls-1.1'
- 'tls-1.2'
- 'tls-1.3'
ssl-min-version:
type: str
description: 'Lowest SSL/TLS version to negotiate.'
choices:
- 'ssl-3.0'
- 'tls-1.0'
- 'tls-1.1'
- 'tls-1.2'
- 'tls-1.3'
ssl-mode:
type: str
description: 'SSL/TLS mode for encryption & decryption of traffic.'
choices:
- 'off'
- 'full'
ssl-pfs:
type: str
description: 'SSL Perfect Forward Secrecy.'
choices:
- 'require'
- 'deny'
- 'allow'
ssl-send-empty-frags:
type: str
description: 'Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only).'
choices:
- 'disable'
- 'enable'
ssl-server-certificate:
type: str
description: 'Name of Certificate return to the client in every SSL connection.'
status:
type: str
description: 'Enable/disable SIP.'
choices:
- 'disable'
- 'enable'
strict-register:
type: str
description: 'Enable/disable only allow the registrar to connect.'
choices:
- 'disable'
- 'enable'
subscribe-rate:
type: int
description: 'SUBSCRIBE request rate limit (per second, per policy).'
unknown-header:
type: str
description: 'Action for unknown SIP header.'
choices:
- 'pass'
- 'discard'
- 'respond'
update-rate:
type: int
description: 'UPDATE request rate limit (per second, per policy).'
ack-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
bye-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
cancel-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
info-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
invite-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
malformed-header-no-proxy-require:
type: str
description: 'Action for malformed SIP messages without Proxy-Require header.'
choices:
- 'pass'
- 'discard'
- 'respond'
malformed-header-no-require:
type: str
description: 'Action for malformed SIP messages without Require header.'
choices:
- 'pass'
- 'discard'
- 'respond'
message-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
notify-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
options-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
prack-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
publish-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
refer-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
register-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
subscribe-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
update-rate-track:
type: str
description: 'Track the packet protocol field.'
choices:
- 'none'
- 'src-ip'
- 'dest-ip'
feature-set:
type: str
description: 'Flow or proxy inspection feature set.'
choices:
- 'flow'
- 'proxy'
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Configure VoIP profiles.
fmgr_voip_profile:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
state: <value in [present, absent]>
voip_profile:
comment: <value of string>
name: <value of string>
sccp:
block-mcast: <value in [disable, enable]>
log-call-summary: <value in [disable, enable]>
log-violations: <value in [disable, enable]>
max-calls: <value of integer>
status: <value in [disable, enable]>
verify-header: <value in [disable, enable]>
sip:
ack-rate: <value of integer>
block-ack: <value in [disable, enable]>
block-bye: <value in [disable, enable]>
block-cancel: <value in [disable, enable]>
block-geo-red-options: <value in [disable, enable]>
block-info: <value in [disable, enable]>
block-invite: <value in [disable, enable]>
block-long-lines: <value in [disable, enable]>
block-message: <value in [disable, enable]>
block-notify: <value in [disable, enable]>
block-options: <value in [disable, enable]>
block-prack: <value in [disable, enable]>
block-publish: <value in [disable, enable]>
block-refer: <value in [disable, enable]>
block-register: <value in [disable, enable]>
block-subscribe: <value in [disable, enable]>
block-unknown: <value in [disable, enable]>
block-update: <value in [disable, enable]>
bye-rate: <value of integer>
call-keepalive: <value of integer>
cancel-rate: <value of integer>
contact-fixup: <value in [disable, enable]>
hnt-restrict-source-ip: <value in [disable, enable]>
hosted-nat-traversal: <value in [disable, enable]>
info-rate: <value of integer>
invite-rate: <value of integer>
ips-rtp: <value in [disable, enable]>
log-call-summary: <value in [disable, enable]>
log-violations: <value in [disable, enable]>
malformed-header-allow: <value in [pass, discard, respond]>
malformed-header-call-id: <value in [pass, discard, respond]>
malformed-header-contact: <value in [pass, discard, respond]>
malformed-header-content-length: <value in [pass, discard, respond]>
malformed-header-content-type: <value in [pass, discard, respond]>
malformed-header-cseq: <value in [pass, discard, respond]>
malformed-header-expires: <value in [pass, discard, respond]>
malformed-header-from: <value in [pass, discard, respond]>
malformed-header-max-forwards: <value in [pass, discard, respond]>
malformed-header-p-asserted-identity: <value in [pass, discard, respond]>
malformed-header-rack: <value in [pass, discard, respond]>
malformed-header-record-route: <value in [pass, discard, respond]>
malformed-header-route: <value in [pass, discard, respond]>
malformed-header-rseq: <value in [pass, discard, respond]>
malformed-header-sdp-a: <value in [pass, discard, respond]>
malformed-header-sdp-b: <value in [pass, discard, respond]>
malformed-header-sdp-c: <value in [pass, discard, respond]>
malformed-header-sdp-i: <value in [pass, discard, respond]>
malformed-header-sdp-k: <value in [pass, discard, respond]>
malformed-header-sdp-m: <value in [pass, discard, respond]>
malformed-header-sdp-o: <value in [pass, discard, respond]>
malformed-header-sdp-r: <value in [pass, discard, respond]>
malformed-header-sdp-s: <value in [pass, discard, respond]>
malformed-header-sdp-t: <value in [pass, discard, respond]>
malformed-header-sdp-v: <value in [pass, discard, respond]>
malformed-header-sdp-z: <value in [pass, discard, respond]>
malformed-header-to: <value in [pass, discard, respond]>
malformed-header-via: <value in [pass, discard, respond]>
malformed-request-line: <value in [pass, discard, respond]>
max-body-length: <value of integer>
max-dialogs: <value of integer>
max-idle-dialogs: <value of integer>
max-line-length: <value of integer>
message-rate: <value of integer>
nat-port-range: <value of string>
nat-trace: <value in [disable, enable]>
no-sdp-fixup: <value in [disable, enable]>
notify-rate: <value of integer>
open-contact-pinhole: <value in [disable, enable]>
open-record-route-pinhole: <value in [disable, enable]>
open-register-pinhole: <value in [disable, enable]>
open-via-pinhole: <value in [disable, enable]>
options-rate: <value of integer>
prack-rate: <value of integer>
preserve-override: <value in [disable, enable]>
provisional-invite-expiry-time: <value of integer>
publish-rate: <value of integer>
refer-rate: <value of integer>
register-contact-trace: <value in [disable, enable]>
register-rate: <value of integer>
rfc2543-branch: <value in [disable, enable]>
rtp: <value in [disable, enable]>
ssl-algorithm: <value in [high, medium, low]>
ssl-auth-client: <value of string>
ssl-auth-server: <value of string>
ssl-client-certificate: <value of string>
ssl-client-renegotiation: <value in [allow, deny, secure]>
ssl-max-version: <value in [ssl-3.0, tls-1.0, tls-1.1, ...]>
ssl-min-version: <value in [ssl-3.0, tls-1.0, tls-1.1, ...]>
ssl-mode: <value in [off, full]>
ssl-pfs: <value in [require, deny, allow]>
ssl-send-empty-frags: <value in [disable, enable]>
ssl-server-certificate: <value of string>
status: <value in [disable, enable]>
strict-register: <value in [disable, enable]>
subscribe-rate: <value of integer>
unknown-header: <value in [pass, discard, respond]>
update-rate: <value of integer>
ack-rate-track: <value in [none, src-ip, dest-ip]>
bye-rate-track: <value in [none, src-ip, dest-ip]>
cancel-rate-track: <value in [none, src-ip, dest-ip]>
info-rate-track: <value in [none, src-ip, dest-ip]>
invite-rate-track: <value in [none, src-ip, dest-ip]>
malformed-header-no-proxy-require: <value in [pass, discard, respond]>
malformed-header-no-require: <value in [pass, discard, respond]>
message-rate-track: <value in [none, src-ip, dest-ip]>
notify-rate-track: <value in [none, src-ip, dest-ip]>
options-rate-track: <value in [none, src-ip, dest-ip]>
prack-rate-track: <value in [none, src-ip, dest-ip]>
publish-rate-track: <value in [none, src-ip, dest-ip]>
refer-rate-track: <value in [none, src-ip, dest-ip]>
register-rate-track: <value in [none, src-ip, dest-ip]>
subscribe-rate-track: <value in [none, src-ip, dest-ip]>
update-rate-track: <value in [none, src-ip, dest-ip]>
feature-set: <value in [flow, proxy]>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/voip/profile',
'/pm/config/global/obj/voip/profile'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/voip/profile/{profile}',
'/pm/config/global/obj/voip/profile/{profile}'
]
url_params = ['adom']
module_primary_key = 'name'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'voip_profile': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'comment': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'name': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'sccp': {
'required': False,
'type': 'dict',
'options': {
'block-mcast': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'log-call-summary': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'log-violations': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'max-calls': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'verify-header': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
}
}
},
'sip': {
'required': False,
'type': 'dict',
'options': {
'ack-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'block-ack': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-bye': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-cancel': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-geo-red-options': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-info': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-invite': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-long-lines': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-message': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-notify': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-options': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-prack': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-publish': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-refer': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-register': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-subscribe': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-unknown': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'block-update': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'bye-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'call-keepalive': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'cancel-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'contact-fixup': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'hnt-restrict-source-ip': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'hosted-nat-traversal': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'info-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'invite-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'ips-rtp': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'log-call-summary': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'log-violations': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'malformed-header-allow': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-call-id': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-contact': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-content-length': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-content-type': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-cseq': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-expires': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-from': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-max-forwards': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-p-asserted-identity': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-rack': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-record-route': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-route': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-rseq': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-a': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-b': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-c': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-i': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-k': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-m': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-o': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-r': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-s': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-t': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-v': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-sdp-z': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-to': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-via': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-request-line': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'max-body-length': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'max-dialogs': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'max-idle-dialogs': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'max-line-length': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'message-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'nat-port-range': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'nat-trace': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'no-sdp-fixup': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'notify-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'open-contact-pinhole': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'open-record-route-pinhole': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'open-register-pinhole': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'open-via-pinhole': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'options-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'prack-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'preserve-override': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'provisional-invite-expiry-time': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'publish-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'refer-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'register-contact-trace': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'register-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'rfc2543-branch': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'rtp': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-algorithm': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'high',
'medium',
'low'
],
'type': 'str'
},
'ssl-auth-client': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ssl-auth-server': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ssl-client-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ssl-client-renegotiation': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'allow',
'deny',
'secure'
],
'type': 'str'
},
'ssl-max-version': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'ssl-3.0',
'tls-1.0',
'tls-1.1',
'tls-1.2',
'tls-1.3'
],
'type': 'str'
},
'ssl-min-version': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'ssl-3.0',
'tls-1.0',
'tls-1.1',
'tls-1.2',
'tls-1.3'
],
'type': 'str'
},
'ssl-mode': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'off',
'full'
],
'type': 'str'
},
'ssl-pfs': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'require',
'deny',
'allow'
],
'type': 'str'
},
'ssl-send-empty-frags': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'ssl-server-certificate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'status': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'strict-register': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'subscribe-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'unknown-header': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'update-rate': {
'required': False,
'revision': {
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'ack-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'bye-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'cancel-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'info-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'invite-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'malformed-header-no-proxy-require': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'malformed-header-no-require': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'pass',
'discard',
'respond'
],
'type': 'str'
},
'message-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'notify-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'options-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'prack-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'publish-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'refer-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'register-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'subscribe-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
},
'update-rate-track': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'none',
'src-ip',
'dest-ip'
],
'type': 'str'
}
}
},
'feature-set': {
'required': False,
'revision': {
'7.0.0': True
},
'choices': [
'flow',
'proxy'
],
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'voip_profile'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
|
python
|
import torch.nn
from gatelfpytorchjson.CustomModule import CustomModule
import sys
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
streamhandler = logging.StreamHandler(stream=sys.stderr)
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
class LayerLSTM(CustomModule):
"""
LayerCNN handles a single input of shape (batchsize, maxseqlen, embdims)
and creates everything to get a final output of hidden units
(including batch normalization, dropout and non-linearity)
The number of output units is in self.dim_outputs after initialisation.
"""
def __init__(self, emb_dims, config={}):
super(LayerLSTM, self).__init__(config=config)
logger.debug("Building LayerLSTM module, config=%s" % (config, ))
self.emb_dims = emb_dims
self.channels_out = config.get("channels_out", 100)
self.batch_first = config.get("batch_first", True)
self.bidirectional = config.get("bidirectional", True)
self.dropout_prob = config.get("dropout", 0.6)
self.use_batchnorm = config.get("use_batchnorm", True)
self.lstm = torch.nn.LSTM(self.emb_dims, self.channels_out, batch_first=self.batch_first,
bidirectional=self.bidirectional)
# Note: the log-softmax function is used directly in forward, we do not define a layer for that
# logger.info("Layer created: %s" % (self, ))
if self.bidirectional:
self.out_dims = self.channels_out*2
else:
self.out_dims = self.channels_out
def forward(self, batch):
# batch is assumed to already be a tensor of the correct shape
# batchsize, maxseq, embdims
if self.on_cuda():
batch.cuda()
lstmed, hidden = self.lstm(batch)
# logger.debug("output tensor is if size %s: %s" % (out.size(), out, ))
return lstmed
|
python
|
class TimeFrame(object):
_string_repr = [[" day", " hour", " min", " sec"],[" d", " h", " m", " s"]]
def __init__(self, seconds = 0.0):
self._seconds = 0.0
self._minutes = 0
self._hours = 0
self._days = 0
self._use_short = int(False)
self._total_seconds = seconds
self.set_with_seconds(seconds)
def use_short_repr(self, use_short = True):
self._use_short = int(use_short)
def set_with_seconds(self, seconds):
self._total_seconds = seconds
seconds = float(seconds)
if seconds >= 0.0:
if seconds > 86400:
self._days = int(seconds / 86400)
self._hours = int((seconds - self._days * 86400) / 3600)
self._minutes = int((seconds - self._days * 86400 - self._hours * 3600) / 60)
self._seconds = seconds % 60.0
elif seconds > 3600.0:
self._hours = int(seconds / 3600)
self._minutes = int((seconds - self._hours * 3600) / 60)
self._seconds = seconds % 60.0
elif seconds > 60.0:
self._hours = 0
self._minutes = int(seconds / 60.0)
self._seconds = seconds % 60.0
else:
self._hours, self._minutes = 0, 0
self._seconds = seconds
def __str__(self):
if self._total_seconds < 1.0 and self._total_seconds > 0:
return "{:1.5f}".format(self._seconds) + TimeFrame._string_repr[self._use_short][3]
result = ""
if self._days > 0:
result += str(self._days) + TimeFrame._string_repr[self._use_short][0]
if self._use_short == 0 and self._days > 1:
result += "s"
if self._hours > 0 or self._minutes > 0 or self._seconds > 0:
result += " "
if self._hours > 0:
result += str(self._hours) + TimeFrame._string_repr[self._use_short][1]
if self._use_short == 0 and self._hours > 1:
result += "s"
if self._minutes > 0 or self._seconds > 0:
result += " "
if self._minutes > 0:
result += str(self._minutes) + TimeFrame._string_repr[self._use_short][2]
if self._seconds > 0:
result += " "
if self._seconds > 0:
result += "{:2.2f}".format(self._seconds) + TimeFrame._string_repr[self._use_short][3]
if len(result) == 0:
result = "0" + TimeFrame._string_repr[self._use_short][3]
return result
if __name__ == '__main__':
tf = TimeFrame()
print(tf)
tf = TimeFrame(5)
print(TimeFrame(0.111))
print(tf)
tf = TimeFrame(1000)
print(tf)
tf = TimeFrame(4002.2)
print(tf)
print(TimeFrame(90320.21))
print(TimeFrame(190402.8))
|
python
|
import subprocess
import sys
from optparse import OptionParser
from django.core.management import LaxOptionParser
from django.core.management.base import BaseCommand, CommandError
from cas_dev_server.management import subprocess_environment
class Command(BaseCommand):
option_list = BaseCommand.option_list[1:]
args = '[command] [arguments]'
help = 'Runs a management command on the CAS development server.'
def handle(self, *args, **options):
if not args:
raise CommandError('Must specify a command.')
subprocess_args = (sys.executable, '-m', 'django.bin.django-admin', args[0])
if options['settings']:
subprocess_args += '--settings=' + options['settings'],
if options['pythonpath']:
subprocess_args += '--pythonpath=' + options['pythonpath'],
if options['traceback']:
subprocess_args += '--traceback',
if options['no_color']:
subprocess_args += '--no-color',
subprocess.call(subprocess_args + args[1:], stdout=self.stdout, stderr=self.stderr,
env=subprocess_environment())
def create_parser(self, prog_name, subcommand, parser_class=LaxOptionParser):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return parser_class(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand, OptionParser)
parser.print_help()
|
python
|
#!/usr/bin/env python3
from dotenv import load_dotenv
load_dotenv()
import os
import logging
import argparse
import pugsql
import json
import pandas as pd
from uuid import UUID
from articleparser.drive import GoogleDrive
logging.basicConfig(level=os.getenv("LOG_LEVEL", "INFO"))
logger = logging.getLogger(__name__)
queries = pugsql.module("queries")
def create_producer_id_mapping(queries):
d = list(queries.get_producers())
df = pd.DataFrame(d)[["name", "url", "producer_id"]]
df["producer_id"] = df["producer_id"].apply(lambda pid: str(UUID(pid)))
df.to_csv("producers_id.csv", index=False)
def create_public_file_id_mapping(queries):
d = queries.get_drive_by_name(name="public")
data = json.loads(d["data"])
producer_months = data["files"]["producers"]
json.dump(producer_months, open("public_file_mapping.json", "w"))
def main(args):
queries.connect(os.getenv("DB_URL"))
create_producer_id_mapping(queries)
create_public_file_id_mapping(queries)
# upload to gdrive
drive = GoogleDrive("", "", "{}", args.service_account)
parent_dir_id = os.getenv("GDRIVE_PUBLIC_DATASETS_ID")
drive.upload(
parent_dir_id,
"public_file_mapping.json",
file_id=os.getenv("GDRIVE_PUBLIC_FILE_MAPPING_ID"),
)
drive.upload(
parent_dir_id, "producers_id.csv", file_id=os.getenv("GDRIVE_PRODUCERS_CSV_ID")
)
queries.disconnect()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--service-account",
help="Service account file to access Google Drive",
default="service.json",
)
args = parser.parse_args()
main(args)
|
python
|
import warnings
from collections import namedtuple, defaultdict
from functools import partial
from typing import Optional, Tuple, List, Callable, Any
import torch
from torch import Tensor
from torch import distributions
from torch import nn
class TemporalDifference(nn.Module):
def __init__(
self,
num_actions: int = 4,
gamma: float = 0.9,
) -> None:
"""
MonteCarlo main class
Args:
features : Module specifying the input layer to use
num_actions (int): Number of classes
"""
super().__init__()
def _forward(self, x: Tensor) -> List[Tensor]:
return x
def forward(self, x: Tensor) -> Tensor:
outputs = self._forward(x)
return outputs
class Sarsa(nn.Module):
def __init__(
self,
num_actions: int,
epsilon: float = 0.4,
gamma: float = 0.2,
alpha: float = 0.4
) -> None:
"""
Sarsa main class
Args:
num_actions (int): Number of classes
gamma (float): Reward propagation factor
"""
super().__init__()
self.num_actions = num_actions
self.epsilon = epsilon
self.gamma = gamma
self.alpha = alpha
self.state_values = defaultdict(lambda: torch.zeros(num_actions))
def _forward(self, x: Tensor) -> Tensor:
state_action_values = self.state_values[x]
policy = torch.where(state_action_values == torch.max(state_action_values), 1 - self.epsilon + (self.epsilon / self.num_actions), self.epsilon / self.num_actions)
return policy
def forward(self, x: Tensor) -> Tensor:
policy = self._forward(x)
if self.training:
policy = distributions.binomial.Binomial(10, probs=policy).sample()
outputs = torch.argmax(policy)
return outputs
def push(self, state, new_state, action, reward, done):
discounted_reward = reward + self.gamma * self.state_values[new_state][self(new_state)]
loss = (discounted_reward - self.state_values[state][action])
self.state_values[state][action] += self.alpha * loss
class Qlearning(nn.Module):
def __init__(
self,
num_actions: int,
epsilon: float = 0.4,
gamma: float = 0.2,
alpha: float = 0.4
) -> None:
"""
Sarsa main class
Args:
num_actions (int): Number of classes
gamma (float): Reward propagation factor
"""
super().__init__()
self.num_actions = num_actions
self.epsilon = epsilon
self.gamma = gamma
self.alpha = alpha
self.state_values = defaultdict(lambda: torch.zeros(num_actions))
def _forward(self, x: Tensor) -> Tensor:
state_action_values = self.state_values[x]
policy = torch.where(state_action_values == torch.max(state_action_values), 1 - self.epsilon + (self.epsilon / self.num_actions), self.epsilon / self.num_actions)
return policy
def forward(self, x: Tensor) -> Tensor:
policy = self._forward(x)
if self.training:
policy = distributions.binomial.Binomial(10, probs=policy).sample()
outputs = torch.argmax(policy)
return outputs
def push(self, state, new_state, action, reward, done):
best_state_value = torch.argmax(self.state_values[new_state])
discounted_reward = reward + self.gamma * self.state_values[new_state][best_state_value]
loss = (discounted_reward - self.state_values[state][action])
self.state_values[state][action] += self.alpha * loss
|
python
|
import unittest
from httpretty import HTTPretty
from notification_log import NotificationLog
import requests
import json
class NotificationLogTests(unittest.TestCase):
def setUp(self):
HTTPretty.enable()
self.client = NotificationLog(hostname='test.example.com', protocol='http')
def tearDown(self):
HTTPretty.disable()
def _api_url(self):
return 'http://test.example.com/notification_logs.json'
def test_create_notification_log_makes_a_post_request(self):
HTTPretty.register_uri(
HTTPretty.POST,
self._api_url(),
body='{"status": "Success"}',
content_type="application/json"
)
self.client.create_notification_log(['ENABLED_TOPIC_ID'], ['DISABLED_TOPIC_ID'], 'content_id', 'updated_at', 'request_id')
assert HTTPretty.last_request.method == 'POST'
self.assertEqual(json.loads(HTTPretty.last_request.body),
{
"gov_delivery_ids": ["ENABLED_TOPIC_ID", "DISABLED_TOPIC_ID"],
"enabled_gov_delivery_ids": ["ENABLED_TOPIC_ID"],
"disabled_gov_delivery_ids": ["DISABLED_TOPIC_ID"],
"publishing_app": "whitehall",
"emailing_app": "gov_uk_delivery",
"govuk_request_id": "request_id",
"public_updated_at": "updated_at",
"content_id": "content_id"
}
)
def test_create_notification_log_makes_a_post_request_with_multiple_matching_topics(self):
HTTPretty.register_uri(
HTTPretty.POST,
self._api_url(),
body='{"status": "Success"}',
content_type="application/json"
)
self.client.create_notification_log(['TOPIC_1', 'TOPIC_2'], [], 'content_id', 'updated_at', 'request_id')
assert HTTPretty.last_request.method == 'POST'
self.assertEqual(json.loads(HTTPretty.last_request.body),
{
"gov_delivery_ids": ["TOPIC_1", "TOPIC_2"],
"emailing_app": "gov_uk_delivery",
"enabled_gov_delivery_ids": ["TOPIC_1", "TOPIC_2"],
"disabled_gov_delivery_ids": [],
"publishing_app": "whitehall",
"govuk_request_id": "request_id",
"public_updated_at": "updated_at",
"content_id": "content_id"
}
)
def test_raise_error_on_non_2XX_status_code(self):
HTTPretty.register_uri(
HTTPretty.POST,
self._api_url(),
status=404
)
self.assertRaises(requests.HTTPError, self.client.create_notification_log, ['ENABLED_TOPIC_ID'], ['DISABLED_TOPIC_ID'], 'content_id', 'updated_at', 'request_id')
if __name__ == '__main__':
unittest.main()
|
python
|
from unittest.mock import patch
from main import main_handler
import sys
@patch("main.hello_world")
def test_mock_external_incorrect(mock_hello_world):
mock_hello_world.return_value = "Hello Dolly!"
assert main_handler() == "Hello Dolly!"
for path in sys.path:
print(path)
|
python
|
# Character field ID when accessed: 925070100
# ObjectID: 0
# ParentID: 925070100
|
python
|
y,z,n=[int(input()) for _ in range(3)]
print("The 1-3-sum is",str( y+z*3+n+91))
|
python
|
import xarray as xr
import numpy as np
import pytest
import pathlib
from xarrayutils.file_handling import (
temp_write_split,
maybe_create_folder,
total_nested_size,
write,
)
@pytest.fixture
def ds():
data = np.random.rand()
time = xr.cftime_range("1850", freq="1AS", periods=12)
ds = xr.DataArray(data, dims=["x", "y", "time"], coords={"time": time}).to_dataset(
name="data"
)
return ds
@pytest.mark.parametrize("dask", [True, False])
@pytest.mark.parametrize("verbose", [True, False])
@pytest.mark.parametrize("already_exists", [True, False])
@pytest.mark.parametrize("method", ["dimension", "variables", "wrong"])
def test_temp_write_split(ds, dask, method, verbose, already_exists, tmpdir):
folder = tmpdir.mkdir("sub")
folder = pathlib.Path(folder)
# create test dataset
if dask:
ds = ds.chunk({"time": 1})
# write a manual copy (with wrong data) to test the erasing
(ds.isel(time=0) + 100).to_zarr(
folder.joinpath("temp_write_split_0.zarr"), consolidated=True
)
if method == "wrong":
with pytest.raises(ValueError):
temp_write_split(
ds,
folder,
method=method,
split_interval=1,
)
else:
ds_reloaded, filelist = temp_write_split(
ds,
folder,
method=method,
verbose=verbose,
split_interval=1,
)
xr.testing.assert_allclose(ds, ds_reloaded)
@pytest.mark.parametrize("sub", ["sub", "nested/sub/path"])
def test_maybe_create_folder(sub, tmp_path):
folder = pathlib.Path(tmp_path)
subfolder = folder.joinpath(sub)
maybe_create_folder(subfolder)
assert subfolder.exists()
with pytest.warns(UserWarning):
maybe_create_folder(subfolder)
def test_total_nested_size(ds):
# create a bunch of broadcasted copies of a dataset
a = ds.copy(deep=True).expand_dims(new=2)
b = ds.copy(deep=True).expand_dims(new=5)
c = ds.copy(deep=True).expand_dims(new=4, new_new=10)
# nest them into a dict
nested_dict = {"experiment_a": a, "experiment_b": {"label_x": b, "label_y": c}}
size_nested = total_nested_size(nested_dict)
assert size_nested == np.sum(np.array([i.nbytes for i in [a, b, c]]))
@pytest.mark.parametrize("strpath", [True, False])
@pytest.mark.parametrize("reload_saved", [True, False])
@pytest.mark.parametrize("overwrite", [True, False])
@pytest.mark.parametrize("filetype", [".nc", ".zarr"])
def test_write(ds, strpath, reload_saved, overwrite, filetype, tmpdir):
def _load(path):
if filetype == ".nc":
return xr.open_dataset(path, use_cftime=True)
else:
return xr.open_zarr(path, use_cftime=True)
folder = pathlib.Path(tmpdir)
path = folder.joinpath("file" + filetype)
if strpath:
path_write = str(path)
else:
path_write = path
write(ds, path)
assert path.exists()
xr.testing.assert_allclose(ds, _load(path))
# create modified
ds_modified = ds * 4
dummy = write(
ds_modified, path_write, overwrite=overwrite, reload_saved=reload_saved
)
if not overwrite:
# this should not overwrite
xr.testing.assert_allclose(_load(path_write), ds)
else:
# this should
xr.testing.assert_allclose(_load(path_write), ds_modified)
# check the reloaded file
dummy = dummy.load()
if reload_saved:
xr.testing.assert_allclose(dummy, _load(path_write))
else:
xr.testing.assert_allclose(dummy, ds_modified)
|
python
|
from snovault.elasticsearch.searches.interfaces import SEARCH_CONFIG
from snovault.elasticsearch.searches.configs import search_config
def includeme(config):
config.scan(__name__)
config.registry[SEARCH_CONFIG].add_aliases(ALIASES)
config.registry[SEARCH_CONFIG].add_defaults(DEFAULTS)
@search_config(
name='custom'
)
def custom_search_config():
return {
'facets': {
'assay_title': {
'title': 'Assay title',
'type': 'typeahead',
'open_on_load': True
},
'status': {
'title': 'Status',
"open_on_load": True
},
'target.label': {
'title': 'Target of assay',
'type': 'typeahead',
'length': 'long',
'open_on_load': True
},
'biosample_ontology.term_name' : {
'title': 'Biosample term name',
'type': 'typeahead',
'length': 'long',
"open_on_load": True
},
},
'columns': {},
'matrix': {},
'boost_values': {},
'fields': {},
}
@search_config(
name='custom-matrix'
)
def custom_matrix_config():
return {
'matrix': {
'y': {
'group_by': [
'award.rfa',
'lab.title',
],
'label': 'Lab',
},
'x': {
'group_by': 'assay_title',
'label': 'Assay',
}
}
}
@search_config(
name='custom-columns'
)
def custom_columns_config():
return {
'columns': {
'award.rfa': {
'title': 'Project',
},
'assay_title': {
'title': 'Assay',
},
'lab.title': {
'title': 'Lab',
},
'assembly': {
'title': 'Assembly',
},
'status': {
'title': 'Status',
},
}
}
# To allow FunctionalCharacterizationSeries columns properties to pass through for the search:
# type=FunctionalCharacterizationExperiment&type=FunctionalCharacterizationSeries&type=TransgenicEnhancerExperiment
# with effectively the FunctionalCharacterizationExperiment config.
@search_config(
name='FunctionalCharacterization'
)
def functional_characterization_data_view():
return {
'facets': {
'assay_title': {
'title': 'Assay title',
'open_on_load': True
},
'status': {
'title': 'Status',
'open_on_load': True
},
'elements_references.examined_loci.symbol': {
'title': 'Targeted loci',
'type': 'typeahead',
'open_on_load': True
},
'elements_references.elements_selection_method': {
'title': 'Elements selection method'
},
'elements_references.crispr_screen_tiling': {
'title': 'CRISPR screen tiling'
},
'examined_loci.gene.symbol': {
'title': 'Examined loci',
'type': 'typeahead',
'open_on_load': True
},
'perturbation_type': {
'title': 'Perturbation type'
},
'replicates.library.biosample.applied_modifications.guide_type': {
'title': 'Guide type'
},
'crispr_screen_readout': {
'title': 'CRISPR screen readout'
},
'replicates.library.biosample.applied_modifications.reagents.promoter_details': {
'title': 'Promoter details'
},
'replicates.library.biosample.applied_modifications.MOI': {
'title': 'Multiplicity of infection'
},
'replicates.library.biosample.donor.organism.scientific_name': {
'title': 'Organism',
'open_on_load': True
},
'biosample_ontology.classification': {
'title': 'Biosample classification'
},
'biosample_ontology.term_name' : {
'title': 'Biosample',
'type': 'typeahead',
'length': 'long',
'open_on_load': True
},
'biosample_ontology.organ_slims': {
'title': 'Organ',
'type': 'typeahead'
},
'biosample_ontology.cell_slims': {
'title': 'Cell',
'type': 'typeahead'
},
'replicates.library.biosample.disease_term_name': {
'title': 'Disease'
},
'replicates.library.biosample.treatments.treatment_term_name': {
'title': 'Biosample treatment'
},
'control_type': {
'type': 'exists',
'title': 'Hide control experiments'
},
'award.project': {
'title': 'Project'
},
'assembly': {
'title': 'Genome assembly'
},
'files.file_type': {
'title': 'Available file types'
},
'files.platform.term_name': {
'title': 'Platform'
},
'replicates.library.nucleic_acid_term_name': {
'title': 'Library material'
},
'date_released': {
'title': 'Date released'
},
'date_submitted': {
'title': 'Date submitted'
},
'lab.title': {
'title': 'Lab'
},
'replication_type': {
'title': 'Replication type'
},
'replicates.library.biosample.subcellular_fraction_term_name': {
'title': 'Cellular component'
},
'replicates.library.construction_platform.term_name': {
'title': 'Library construction platform'
}
},
'facet_groups': [
{
'title': 'Assay',
'facet_fields': [
'assay_slims',
'assay_title',
'control_type',
'perturbation_type',
'examined_loci.expression_measurement_method',
'crispr_screen_return eadout',
'elements_references.crispr_screen_tiling',
'replicates.library.biosample.applied_modifications.guide_type',
'replicates.library.biosample.applied_modifications.MOI',
'replicates.library.biosample.applied_modifications.reagents.promoter_details',
'replicates.library.construction_platform.term_name'
]
},
{
'title': 'Elements',
'facet_fields': [
'elements_references.examined_loci.symbol',
'examined_loci.gene.symbol',
'elements_references.elements_selection_method'
]
},
{
'title': 'Biosample',
'facet_fields': [
'replicates.library.biosample.donor.organism.scientific_name',
'biosample_ontology.term_name',
'biosample_ontology.classification',
'biosample_ontology.organ_slims',
'biosample_ontology.cell_slims',
'replicates.library.biosample.life_stage',
'replicates.library.biosample.treatments.treatment_term_name',
'replicates.library.biosample.disease_term_name',
'replicates.library.nucleic_acid_term_name'
]
},
{
'title': 'Analysis',
'facet_fields': [
'files.platform.term_name',
'files.run_type',
'assembly',
'files.file_type'
]
},
{
'title': 'Provenance',
'facet_fields': [
'award.project',
'award.rfa',
'lab.title',
'date_submitted',
'date_released'
]
},
{
'title': 'Quality',
'facet_fields': [
'replication_type',
'replicates.library.size_range',
'files.read_length',
'files.mapped_read_length',
'status',
'internal_status',
'audit.ERROR.category',
'audit.NOT_COMPLIANT.category',
'audit.WARNING.category',
'audit.INTERNAL_ACTION.category'
]
}
]
}
ALIASES = {
'DonorSubtypes': [
'HumanDonor',
'FlyDonor',
'WormDonor',
'MouseDonor',
]
}
DEFAULTS = {
'Donor': ['DonorSubtypes'],
('Experiment', 'FunctionalCharacterizationExperiment'): ['Experiment'],
}
|
python
|
from bs4 import BeautifulSoup as bs
import requests
import pandas as pd
import random
url = 'https://www.oschina.net/widgets/index_tweet_list'
headers = {
'Accept':
'*/*',
'Accept-Encoding':
'gzip, deflate, br',
'Accept-Language':
'zh-CN,zh;q=0.9',
'Cache-Control':
'no-cache',
'Connection':
'keep-alive',
'Content-Length':
'0',
'Host':
'www.oschina.net',
'Origin':
'https://www.oschina.net',
'Pragma':
'no-cache',
'Referer':
'https://www.oschina.net/',
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36',
'X-Requested-With':
'XMLHttpRequest'
}
ip_list = []
def get_ip_list():
global ip_list
ipurl = 'http://www.xicidaili.com/nn/'
header = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
ip_data = requests.get(ipurl, headers=header)
soup = bs(ip_data.text, 'lxml')
ips = soup.find_all('tr')
for i in range(1, len(ips)):
ip_info = ips[i]
tds = ip_info.find_all('td')
ip_list.append(tds[i].text + ":" + tds[2].text)
return ip_list
def get_random_ip(iplist):
proxy_list = []
for ip in iplist:
proxy_list.append('http://' + ip)
proxy_ip = random.choice(proxy_list)
proxyies = {'http': proxy_ip}
return proxyies
def get_msg():
web_data = requests.get(url, headers=headers)
df = bs(web_data.text, 'lxml')
# print(df.prettify())
re = df.find_all('p', {'class': 'tweet-content wrap'})
lst = []
for p in re:
href = p.find('a').get('href')
autor = p.find('a').get('title')
message = p.text
if autor:
res = {'autor': autor, 'href': href, 'message': message}
lst.append(res)
head = ["autor", "message", "href"]
df = pd.DataFrame(lst, columns=head)
print(df)
if __name__ == '__main__':
get_msg()
#print(get_ip_list())
|
python
|
from datetime import datetime
from testil import Config, eq
from .. import rebuildcase as mod
def test_should_sort_sql_transactions():
def test(sql_form_ids, couch_form_ids, expect):
sql_case = Config(
transactions=[Config(form_id=x, details={}) for x in sql_form_ids]
)
couch_json = {"actions": [{"xform_id": x} for x in couch_form_ids]}
print(sql_case)
print(couch_json)
eq(mod.should_sort_sql_transactions(sql_case, couch_json), expect)
yield test, "abc", "abc", False
yield test, "abc", "aabbcc", False
yield test, "abc", "acb", True
yield test, "abcd", "acb", False
yield test, "abd", "acb", False
def test_update_transaction_order():
def print_tx(label, txx):
print(f"transactions {label} update")
for tx in txx:
print(f" {tx.id} {tx.form_id: <1} {tx.server_date}")
def to_date(chr):
return dx((ord("z") + 1) if chr == " " else ord(chr))
def test(sql_form_ids, couch_form_ids, expect=None, n_changes=0):
if expect is None:
expect = sql_form_ids
tx_updates = []
sql_case = Config(
transactions=[
Config(id=i, form_id=x.strip(), server_date=to_date(x), details={})
for i, x in enumerate(sql_form_ids)
],
track_update=lambda tx: tx_updates.append(tx),
)
couch_json = {"actions": [{"xform_id": x.strip()} for x in couch_form_ids]}
print("couch case", couch_json)
print_tx("before", sql_case.transactions)
txx, server_dates = mod.update_transaction_order(sql_case, couch_json)
print_tx("after", txx)
eq("".join([tx.form_id if tx.form_id else " " for tx in txx]), expect)
eq(len(server_dates), n_changes, server_dates)
eq(len(tx_updates), n_changes, tx_updates)
yield test, "abc", "abc"
yield test, "abc ", "abc"
yield test, "abc", "aabbcc"
yield test, "abc", "acb", "acb", 1
yield test, "abc ", "a c b", "acb ", 1
def test_iter_ascending_dates():
def test(indices, expect):
dates = [dx(i) for i in indices]
actual_deltas = deltas(mod.iter_ascending_dates(dates))
expect_deltas = deltas(dx(i) for i in expect)
assert mod.is_strictly_ascending(expect_deltas), expect_deltas
eq(actual_deltas, expect_deltas)
yield test, [0, 1], [0, 1]
yield test, [0, 10, 20], [0, 10, 20]
yield test, [30, 10, 20], [9, 10, 20]
yield test, [30, 30, 30, 10, 20], [7, 8, 9, 10, 20]
yield test, [1, 3, 3, 3, 3, 2], [1, 1.2, 1.4, 1.6, 1.8, 2]
yield test, [0, 20, 10], [0, 5, 10]
yield test, [0, 10, 20, 10], [0, 10, 20, 21]
yield test, [40, 50, 60, 70, 10, 20, 30], [40, 50, 60, 70, 71, 72, 73]
def test_longest_increasing_subsequence_indices():
def test(seq, expect):
eq(mod.longest_increasing_subsequence_indices(seq), expect)
yield test, [], []
yield test, [2], [0]
yield test, [7, 2], [1]
yield test, [3, 7], [0, 1]
yield test, [3, 6, 9], [0, 1, 2]
yield test, [3, 9, 6], [0, 2]
yield test, [3, 6, 6], [0, 2]
yield test, [3, 6, 6, 6, 6, 9], [0, 4, 5]
yield test, [7, 2, 6, 4, 5, 1], [1, 3, 4]
yield test, [18, 12, 17, 16, 14, 15, 16, 11], [1, 4, 5, 6]
def dx(minutes_since_epoch):
return datetime.fromtimestamp(minutes_since_epoch * 60)
def deltas(dates, d0=dx(0)):
return [str(d - d0) for d in dates]
|
python
|
#!/usr/bin/env python3
import socket, time, sys
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
#TO-DO: get_remote_ip() method
def get_remote_ip(host):
print(f'Getting IP for {host}')
try:
remote_ip = socket.gethostbyname( host )
except socket.gaierror:
print ('Hostname could not be resolved. Exiting')
sys.exit()
print (f'Ip address of {host} is {remote_ip}')
return remote_ip
#TO-DO: handle_request() method
def handle_request(conn,proxy_end):
#send the data
send_full_data = conn.recv(BUFFER_SIZE)
print(f"Sending Recieved Data {send_full_data} to Google.com")
proxy_end.sendall(send_full_data)
#shutdown
proxy_end.shutdown(socket.SHUT_WR)
data = proxy_end.recv(BUFFER_SIZE)
print(f"Sending Recieved Data {data} to Client")
#send the data back
conn.send(data)
def main():
#TO-DO: establish localhost, extern_host (google), port, buffer size
extern_host = 'www.google.com'
port = 80
buffer_size = 1024
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_start: #establish "start" of proxy (connects to localhost)
#to-do bind, and set to listening mode
print("Starting Proxy Server")
proxy_start.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
proxy_start.bind((HOST, PORT))
proxy_start.listen(1)
while True:
#to-do: accept incoming connections from proxy_start, print information abotu connection
conn, addr = proxy_start.accept()
print("Connected By...", addr)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as proxy_end: #establish "end" of proxy (connects to google)
#to-do: get remote IP of google, connect proxy_end to it
print("Connecting to Google")
remote_ip = get_remote_ip(extern_host)
proxy_end.connect((remote_ip , port))
#now for the multiprocessing
#todo: allow for the muliple connections with a process daemon
#make sure to set target = handle_request when creating the process
p = Process(target = handle_request, args=(conn,proxy_end))
p.daemon = True
p.start()
print("Started Process... ", p)
#todoL close the connection
conn.close()
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 19:56:37 2017
@author: Simon Stiebellehner
https://github.com/stiebels
This file implements three classifiers:
- XGBoost (Gradient Boosted Trees)
- Random Forest
- AdaBoost (Decision Trees)
These three base classifiers are then ensembled in two ways:
(1) Weighting and combining their individual outputs to form a 'committee vote'.
(2) Using the outputs of the three models as inputs to a 4th classifier (Gradient Boosted Trees),
which then outputs the final predictions (stacking).
"""
#%%
# Package Imports
import sys
sys.path.append(“../../“)
import util
import random
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.cross_validation import train_test_split
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import cross_val_score
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from mlxtend.classifier import StackingClassifier, EnsembleVoteClassifier
from skopt import gp_minimize
seed = 1
random.seed(seed)
np.random.seed(seed)
#%% Function initialization for later
# Tuning set for last ensembling weights
def get_meta_sets():
'''
Use a meta-set (additional holdout set) for training the meta-classifier in the stacked ensemble.
'''
df_ens_eval = pd.read_csv('/specify/path/here/fold1_cleaned.csv')
df_ens_sel = df_ens_eval.drop('Unnamed: 0', axis=1)
df_sample = util.make_sample(df_ens_sel, num_q=500) # randomly sampling from the data
X_ens, Y_ens = util.sep_feat_labels(df_sample)
df_ens_sel.ix[:,1:] = StandardScaler().fit_transform(df_sample.ix[:,1:]) # rescaling features
x_train_ens, x_dev_ens, y_train_ens, y_dev_ens = train_test_split(X_ens, Y_ens, test_size=0.2, random_state=seed)
x_train_ens_qid = x_train_ens['qid'].copy()
x_train_ens = x_train_ens.ix[:,1:].copy()
x_dev_ens_qid = x_dev_ens['qid'].copy()
x_dev_ens = x_dev_ens.ix[:,1:].copy()
# In case of using cross_val_score we do not need separate train and dev, so let's merge it here again
X_ens = pd.concat([x_train_ens, x_dev_ens], axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True)
Y_ens = pd.concat([y_train_ens, y_dev_ens], axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True)
X_ens_qid = pd.concat([x_train_ens_qid, x_dev_ens_qid], axis=0, join='outer', join_axes=None, ignore_index=False,
keys=None, levels=None, names=None, verify_integrity=False,
copy=True)
return X_ens, Y_ens, X_ens_qid
def constrained_sum_sample_pos(n, total):
dividers = sorted(random.sample(range(1, total), n - 1))
return [a - b for a, b in zip(dividers + [total], [0] + dividers)]
#%%
# Data Import
# SPECIFY
directory = '/specify/path/here'
fold = 1 # 1 | 2 | 3 | 4 | 5
dataset = 'train' # 'train' | 'vali' | 'test'
path = directory+'Fold'+str(fold)+'/'+dataset+'_cleaned.csv' # importing CLEANED dataset
df_train = pd.read_csv(str(path), index_col=0)
df_test = pd.read_csv(directory+'Fold'+str(fold)+'/'+'test'+'_cleaned.csv', index_col=0)
#%%
# Splitting dataset
x_train, y_train = util.sep_feat_labels(df_train)
x_test, y_test = util.sep_feat_labels(df_test)
x_train.ix[:,1:] = StandardScaler().fit_transform(x_train.ix[:,1:]) # rescaling features
x_test.ix[:,1:] = StandardScaler().fit_transform(x_test.ix[:,1:]) # rescaling features
x_train_qid = x_train['qid'].copy()
x_train = x_train.ix[:,1:].copy()
x_test_qid = x_test['qid'].copy()
x_test = x_test.ix[:,1:].copy()
#%% Bayesian Optimization Functions
def ada_objective(params):
trees, lr = params
clf.set_params(n_estimators=trees, learning_rate=lr, random_state=1)
return -np.mean(cross_val_score(clf, X, Y, n_jobs=-1, scoring='f1_micro', verbose=2))
def xgb_objective(params):
max_depth, min_child, gamma, scale_pos_weight = params
clf.set_params(scale_pos_weight=scale_pos_weight, max_depth=max_depth, min_child_weight=min_child, gamma=gamma)
return -np.mean(cross_val_score(clf, X, Y, n_jobs=-1, scoring='f1_micro', verbose=2))
def rf_objective(params):
trees = params[0]
clf.set_params(n_estimators=trees, random_state=1)
return -np.mean(cross_val_score(clf, X, Y, n_jobs=-1, scoring='f1_micro', verbose=2))
def stacking_objective(params):
max_depth, min_child, gamma, scale_pos_weight = params
clf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=xgb.XGBClassifier(scale_pos_weight=scale_pos_weight, max_depth=max_depth, min_child_weight=min_child, gamma=gamma))
return np.mean(cross_val_score(clf, X_ens, Y_ens, n_jobs=-1, scoring='f1_micro', verbose=2))
#%%
# Dashboard
estimator = 'ensemble'
tune = False
if estimator == 'ada':
'''
AdaBoost (with Decision Stumps)
'''
clf = AdaBoostClassifier(n_estimators = 143, learning_rate = 0.9321253)
if tune == True:
params = [(20, 200), (0.3, 1.5)]
res_gp = gp_minimize(ada_objective, params, n_calls = 10, random_state=1)
'''
Optimumg Parameters:
n_estimators = 143
learning_rate = 0.9321253
'''
elif estimator == 'xgb':
'''
XGBoost (Gradient Boosted Trees)
'''
opt_max_depth = 7
opt_min_child = 2.0550232716937149
opt_gamma = 1
opt_scale_pos_weight = 0
clf = xgb.XGBClassifier(max_depth = 7, min_child_weight = 2.0550232716937149, gamma = 1, scale_pos_weight = 0)
if tune == True:
params = [(3, 10), (0.5, 5), (0, 5), (0, 1)]
res_gp = gp_minimize(xgb_objective, params, n_calls = 10, random_state=1)
'''
Optimum Parameters:
- max_depth: 7
- min_child_weight: 2.0550232716937149
- gamma: 1
- scale_pos_weight: 0
'''
elif estimator == 'rf':
'''
Random Forest
'''
opt_n_estimators = 139
clf = RandomForestClassifier(n_estimators = opt_n_estimators, random_state=1)
if tune == True:
params = [(10,200)]
res_gp = gp_minimize(rf_objective, params, n_calls = 10, random_state=1)
'''
Optimum Parameters:
- n_estimators: 139
'''
elif estimator == 'stacking':
'''
Stacked Ensemble (meta-classifier)
'''
clf1 = RandomForestClassifier(n_estimators = 139)
clf2 = xgb.XGBClassifier(max_depth = 7, min_child_weight = 2.0550232716937149, gamma = 1, scale_pos_weight = 0)
clf3 = AdaBoostClassifier(n_estimators = 143, learning_rate = 0.9321253)
mclf = xgb.XGBClassifier(max_depth = 8, min_child_weight = 0.9155236764595901, gamma = 2, scale_pos_weight = 1)
clf = StackingClassifier(classifiers=[clf1, clf2, clf3], meta_classifier=mclf)
if tune == True:
X_ens, Y_ens, X_ens_qid = get_meta_sets()
params = [(3, 10), (0.5, 5), (0, 5), (0, 1)]
res_gp = gp_minimize(stacking_objective, params, n_calls = 10, random_state=1)
'''
Optimum Parameters:
- max_depth: 8
- min_child_weight: 0.9155236764595901
- gamma: 2
- scale_pos_weight: 1
'''
elif estimator == 'ensemble':
'''
Weighted Ensemble
'''
clf1 = RandomForestClassifier(n_estimators = 139)
clf2 = xgb.XGBClassifier(max_depth = 7, min_child_weight = 2.0550232716937149, gamma = 1, scale_pos_weight = 0)
clf3 = AdaBoostClassifier(n_estimators = 143, learning_rate = 0.9321253)
clf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=[2,6,4], voting='soft')
if tune == True:
acc_list = []
param_list = []
X_ens, Y_ens, X_ens_qid = get_meta_sets()
runs = 10
for run in range(0, runs):
rand_weights = constrained_sum_sample_pos(3,12)
clf = EnsembleVoteClassifier(clfs=[clf1, clf2, clf3], weights=rand_weights)
acc_list.append(np.mean(cross_val_score(clf, x_train, y_train, n_jobs=-1, scoring='f1_micro', verbose=2)))
param_list.append(rand_weights)
best_param = param_list[acc_list.index(max(acc_list))]
print('Best Weights: '+str(best_param))
print('... with best F1 Accuracy: '+str(max(acc_list)))
'''
Optimum Weights:
- [2, 6, 4]
'''
if tune == False:
'''
3 runs of CV on train/dev set.
'''
score = np.mean(cross_val_score(clf, np.array(x_train), np.array(y_train), scoring='f1_micro', verbose=2)) # mean of 3-runs of cross-validation
print('F1 Micro Score: '+str(score))
#%%
# Computing some scores on test set
clf_fit = clf.fit(x_train, y_train)
preds = clf_fit.predict(x_test)
print('F1 Score --> '+str(f1_score(np.array(preds), np.array(y_test), average='micro')))
print('NDCG@k --> '+str(round(util.get_ndcg(x_dev_qid=x_dev_qid, preds=preds, y_dev=y_test)*100, 2))+' %')
|
python
|
from django.shortcuts import render
from .models import Entry
from rest_framework import generics
from .serializers import EntrySerializer
# Create your views here.
class EntryCreate(generics.ListCreateAPIView):
# Allows creation of a new entry
queryset = Entry.objects.all()
serializer_class = EntrySerializer
class EntryList(generics.ListAPIView):
# Allows entries to be listed and viewed
queryset = Entry.objects.filter(deleted=0).order_by('date_time')
serializer_class = EntrySerializer
class EntryListMember(generics.ListAPIView):
# Allows entries to be listed and viewed
serializer_class = EntrySerializer
def get_queryset(self):
queryset = Entry.objects.filter(deleted=0).order_by('-date_time')
user = self.request.query_params.get('user')
queryset = queryset.filter(user=user)
return queryset
class EntryListPublic(generics.ListAPIView):
# Allows entries to be listed and viewed
queryset = Entry.objects.filter(public=1, deleted=0).order_by('-date_time')
serializer_class = EntrySerializer
class EntryDetail(generics.RetrieveAPIView):
# Returns a single entry using primary key
queryset = Entry.objects.all()
serializer_class = EntrySerializer
class EntryUpdate(generics.RetrieveUpdateAPIView):
# Allows record to be updated
queryset = Entry.objects.all()
serializer_class = EntrySerializer
class EntryDelete(generics.RetrieveDestroyAPIView):
# Allows entry to be deleted
queryset = Entry.objects.all()
serializer_class = EntrySerializer
|
python
|
#!/usr/bin/python
#import sys
import boto3
import datetime
from Crypto import Random
from Crypto.Cipher import AES
import argparse
import base64
from argparse import Namespace
from keypot_exceptions.KeypotError import KeypotError
#VERSION
keypot_version='Keypot-0.3'
ddb_hash_key_name='env-variable-name'
#Pads the data to suit the AES-256 encryption requirements
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[0:-ord(str(s[-1]))]
#Static messages
string_empty_table='Table was empty - nothing to list!'
string_key_already_exists='Variable already exists in DynamoDB - please use the overwrite flag to add this to database.'
string_delete_failed='An issue occured while trying to delete - no ResponeMetadata received'
class Keypot():
kms = None
ddb = None
boto_master_key_id = None
ddb_table_name = None
parameter_key = None
parameter_value = None
parameter_file = None
overwrite_flag = False
region = None
def __init__(self, args):
if args:
if 'kms_key' in args:
if (args['kms_key']):
self.boto_master_key_id=args['kms_key']
if 'ddb_table' in args:
if (args['ddb_table']):
self.ddb_table_name=args['ddb_table']
if 'parameter_key' in args:
if (args['parameter_key']):
self.parameter_key=args['parameter_key']
if 'parameter_value' in args:
if (args['parameter_value']):
self.parameter_value=args['parameter_value']
if ('parameter_file' in args):
if (args['parameter_file']):
self.parameter_file=args['parameter_file']
if ('overwrite' in args):
if (args['overwrite']):
if (str(args['overwrite']).lower() == 'true'):
self.overwrite_flag=True
else:
self.overwrite_flag=False
if ('region' in args):
if (args['region']):
self.region=args['region']
self.setup_clients()
else:
print('Invalid input - arguments appear to be empty!')
def setup_clients(self):
if self.region is not None:
if self.region != '':
self.ddb = boto3.client('dynamodb', region_name=self.region)
self.kms = boto3.client('kms', region_name=self.region)
return
self.ddb = boto3.client('dynamodb')
self.kms = boto3.client('kms')
return
#Used to encrypt locally on this machine using the key generated from KMS
@staticmethod
def local_encrypt(message, key, key_size=256):
message = pad(str(message))
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_ECB, iv)
return iv + cipher.encrypt(message)
#Used to decrypt locally on this machine using the key decrypted from KMS
@staticmethod
def local_decrypt(ciphertext, key):
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_ECB, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return unpad(plaintext.decode('ASCII'))
def encrypt_and_store(self):
#Generate a key using KMS service
data_key = self.kms.generate_data_key(KeyId=self.boto_master_key_id,KeySpec='AES_256')
encrypted_data_key = data_key['CiphertextBlob']
plaintext_data_key = data_key['Plaintext']
#encrypt data locally and write it to Dynamo
encrypted_data = Keypot.local_encrypt(self.parameter_value,plaintext_data_key)
self.ddb.put_item(
TableName=self.ddb_table_name,
Item={
'env-variable-name': {
'S': self.parameter_key
},
'env-variable-enc-value': {
'B': encrypted_data
},
'env-variable-enc-kms-key': {
'B': encrypted_data_key
}
}
)
return
def read_value_from_file(self):
with open(self.parameter_file, 'r') as f:
read_value=f.read()
f.closed
return read_value
#Used to decrypt the data key pulled from DynamoDB using KMS
def decrypt_kms_data(self, encrypted_data):
decrypted = self.kms.decrypt(CiphertextBlob=encrypted_data)
return decrypted
#Pull data dictionary from DynamoDB
def read_from_ddb(self):
response = self.ddb.get_item(
TableName=self.ddb_table_name,
Key={
'env-variable-name': {
'S': self.parameter_key
}
}
)
return response
#Pull data dictionary from DynamoDB
def list_from_ddb(self):
response = self.ddb.scan(TableName=self.ddb_table_name,ProjectionExpression="#E",ExpressionAttributeNames={"#E": ddb_hash_key_name})
if response['Count'] > 0:
return response['Items']
#empty table
return(string_empty_table)
def delete_from_ddb(self):
#Key should always be a 'S' (String) type
response = self.ddb.delete_item(
TableName=self.ddb_table_name,
Key={
'env-variable-name': {
'S': self.parameter_key
}
}
)
return response
def do_encrypt(self):
#check if parameter already exists (only if overwrite is true, otherwise just blindly overwrite)
if self.overwrite_flag == False:
ddb_pull = self.read_from_ddb()
if ddb_pull:
if 'Item' in ddb_pull:
raise KeypotError(string_key_already_exists)
#reads the file from disk specified in args
if self.parameter_file:
self.parameter_value=self.read_value_from_file()
#perform encrypt/DDB operations
self.encrypt_and_store()
return('Parameter ' + self.parameter_key + ' uploaded successfully')
def do_decrypt(self):
#Read and decrypt
returned_variable_dict = self.read_from_ddb()
returned_db_value = returned_variable_dict['Item']['env-variable-enc-value']['B']
returned_db_kms_encrypted_key = returned_variable_dict['Item']['env-variable-enc-kms-key']['B']
kms_decrypted_key = self.decrypt_kms_data(returned_db_kms_encrypted_key)['Plaintext']
final_value = Keypot.local_decrypt(returned_db_value, kms_decrypted_key)
return(final_value)
def do_delete(self):
#Removes from DynamoDB based on DDB key
delete_result = self.delete_from_ddb()
if 'ResponseMetadata' in delete_result:
if delete_result['ResponseMetadata']['HTTPStatusCode'] == 200:
return('Successfully removed ' + str(self.parameter_key) + ' from ' + self.ddb_table_name)
else:
return('A problem occurred - unable to remove ' + str(self.parameter_key) + ' from ' + self.ddb_table_name)
return(string_delete_failed)
def do_list(self):
#get list of "String" key attributes from DynamoDB
variable_list = self.list_from_ddb()
return variable_list
#default entry point - possible future enhancement would be to turn this into a lambda function
def keypot_cli():
parser = {}
action='super'
parser[action] = argparse.ArgumentParser(description='Keypot - Encrypts, Decrypts, and Manages Secrets stored in AWS DynamoDB with KMS key')
parser[action].add_argument('-v','--version', action='version', version=(keypot_version))
subparser = parser['super'].add_subparsers(help='For more information and usage information, get help by using the {name} -h syntax')
#encrypt
action='encrypt'
parser[action] = subparser.add_parser(action, help='Keypot Encrypt - Encrypts value in DynamoDB using KMS')
#does not support both value and an input file, so using a mutually exclusive group
encrypt_mutual_exclusive = parser[action].add_mutually_exclusive_group()
encrypt_mutual_exclusive.add_argument('-f','--parameter_file', help='Location of file you want to upload (e.g. SSL private key). One of this or parameter_value required.',required=False)
parser[action].add_argument('-k','--kms_key', help='Name of AWS KMS Customer Master Key (ex: alias/test-key)',required=True)
parser[action].add_argument('-p','--parameter_key', help='Name of Parameter to put into DynamoDB',required=True)
parser[action].add_argument('-r','--region', help='Name of AWS Region to use for both KMS and DynamoDB',required=False)
parser[action].add_argument('-t','--ddb_table', help='Name of existing DynamoDB Table to use in look-up',required=True)
parser[action].add_argument('-o','--overwrite', action='store_true', help='Force overwrite of existing value in DynamoDB without prompting for overwrite',required=False,default=False)
encrypt_mutual_exclusive.add_argument('-v','--parameter_value', help='Value of Parameter to put into DynamoDB. One of this or parameter_file required.',required=False)
parser[action].set_defaults(action=action)
#decrypt
action='decrypt'
parser[action] = subparser.add_parser(action, help='Keypot Decrypt - Decrypt value in DynamoDB using KMS')
parser[action].add_argument('-k','--kms_key', help='Name of AWS KMS Customer Master Key (ex: alias/test-key)',required=True)
parser[action].add_argument('-p','--parameter_key', help='Name of Parameter to put into DynamoDB',required=True)
parser[action].add_argument('-r','--region', help='Name of AWS Region to use for both KMS and DynamoDB',required=False)
parser[action].add_argument('-t','--ddb_table', help='Name of existing DynamoDB Table to use in look-up',required=True)
parser[action].set_defaults(action=action)
#list
action='list'
parser[action] = subparser.add_parser(action, help='Keypot List - List all keys available in DynamoDB - NOT YET IMPLEMENTED')
parser[action].add_argument('-r','--region', help='Name of AWS Region to use for both KMS and DynamoDB',required=False)
parser[action].add_argument('-t','--ddb_table', help='Name of existing DynamoDB Table to use in look-up',required=True)
parser[action].set_defaults(action=action)
#delete
action='delete'
parser[action] = subparser.add_parser(action, help='Keypot Delete - Removes a key from DynamoDB - NOT YET IMPLEMENTED')
parser[action].add_argument('-p','--parameter_key', help='Name of Parameter to put into DynamoDB',required=True)
parser[action].add_argument('-r','--region', help='Name of AWS Region to use for both KMS and DynamoDB',required=False)
parser[action].add_argument('-t','--ddb_table', help='Name of existing DynamoDB Table to use in look-up',required=True)
parser[action].set_defaults(action=action)
#based on sub-argument, send to correct function
#change Namespace args back to dictionary so that we get consistent behavior between Lambda and CLI versions
super_args = parser['super'].parse_args()
result=None
if "action" in vars(super_args):
if super_args.action == 'encrypt':
result=Keypot(vars(super_args)).do_encrypt()
if super_args.action == 'decrypt':
result=Keypot(vars(super_args)).do_decrypt()
if super_args.action == 'list':
list_result=Keypot(vars(super_args)).do_list()
if list_result:
if isinstance(list_result,str):
print(list_result)
elif isinstance(list_result,list):
for var in list_result:
print(var[ddb_hash_key_name]['S'])
if super_args.action == 'delete':
result=Keypot(vars(super_args)).do_delete()
if result:
print(result)
return
#entry point for the lambda function
#This function is to massage input to match the rest of the CLI function, and customize any output for Lambda consumption
def keypot_lambda_handler(event, context):
if ('action' not in event) or ('options' not in event):
raise KeypotError('Invalid Input - missing either action or options!')
lambda_keypot=Keypot(event['options'])
#ENCRYPT
if event['action'] == 'encrypt':
#Put note about using file method - will implement an "upload from S3" option
if ('file' in event['options']):
return('File upload is not supported by Lambda invocation. Please use upload from S3')
output_string=lambda_keypot.do_encrypt()
return(output_string)
#DECRYPT
if event['action'] == 'decrypt':
output_string=lambda_keypot.do_decrypt()
return output_string
#LIST
if event['action'] == 'list':
variable_list=lambda_keypot.do_list()
output_string=''
if variable_list:
for var in variable_list:
output_string+=var[ddb_hash_key_name]['S']
output_string+='\n'
return output_string
#DELETE
if event['action'] == 'delete':
output_string=lambda_keypot.do_delete()
return output_string
#primary method when executed directly
if __name__ == '__main__':
keypot_cli()
|
python
|
import os
from dotenv import load_dotenv,find_dotenv
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir)
load_dotenv(find_dotenv())
|
python
|
from ..views import add, configure, delete
|
python
|
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
# Copyright (c) 2014 The New York Times Company
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='statsd-rabbitmq',
version='0.0.2',
description="A statsd plugin, written in python, to"
"collect statistics from RabbitMQ.",
long_description=readme + '\n\n' + history,
author="Mike Buzzetti",
author_email='[email protected]',
url='https://github.com/NYTimes/statsd-rabbitmq',
packages=[
'statsd_rabbitmq',
],
package_dir={'statsd_rabbitmq': 'statsd_rabbitmq'},
include_package_data=True,
install_requires=requirements,
license="Apache",
zip_safe=False,
keywords='statsd-rabbitmq',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
],
test_suite='tests',
tests_require=test_requirements,
data_files=[('share/statsd-rabbitmq/', ['config/types.db.custom'])],
)
|
python
|
# Author: Xinshuo Weng
# Email: [email protected]
# this file includes general help functions for MUGSY data
import init_paths
init_paths.main()
from check import isstring, isscalar
from file_io import get_sheet_service, update_patchs2sheet, get_data_from_sheet, update_row2sheet
# # number of points for all parts
# num_pts = dict()
# num_pts['face_old'] = 20
# num_pts['face'] = 26
# num_pts['left_upper_eyelid'] = 30
# num_pts['right_upper_eyelid'] = 30
# num_pts['upper_eyelid'] = 30
# num_pts['left_lower_eyelid'] = 17
# num_pts['right_lower_eyelid'] = 17
# num_pts['lower_eyelid'] = 17
# num_pts['nose'] = 10
# num_pts['outer_lip'] = 16
# num_pts['inner_lip'] = 16
# num_pts['upper_teeth'] = 18
# num_pts['lower_teeth'] = 18
# num_pts['left_ears'] = 19
# num_pts['right_ears'] = 19
# num_pts['ears'] = 19
# num_pts['mouth'] = 68
# # num_pts['iris'] = 3
# # num_pts['pupil'] = 3
# num_pts['overall'] = 236
# # index offset of keypoints for all parts
# index_offset = dict()
# index_offset['face_old'] = 0
# index_offset['face'] = 0
# index_offset['left_upper_eyelid'] = 26
# index_offset['right_upper_eyelid'] = 56
# index_offset['left_lower_eyelid'] = 86
# index_offset['right_lower_eyelid'] = 103
# index_offset['nose'] = 120
# index_offset['outer_lip'] = 130
# index_offset['inner_lip'] = 146
# index_offset['upper_teeth'] = 162
# index_offset['lower_teeth'] = 162 # lower teeth already has offset in the raw_annotations
# index_offset['left_ears'] = 198
# index_offset['right_ears'] = 217
# index_offset['mouth'] = 130
# # index_offset['iris'] = 236
# # index_offset['pupil'] = 239
# index_offset['overall'] = 0
# anno_version = 1
# number of points for all parts
num_pts = dict()
num_pts['face_old'] = 20
num_pts['face'] = 26
num_pts['left_upper_eyelid'] = 24
num_pts['right_upper_eyelid'] = 24
num_pts['upper_eyelid'] = 24
num_pts['left_lower_eyelid'] = 17
num_pts['right_lower_eyelid'] = 17
num_pts['lower_eyelid'] = 17
num_pts['nose'] = 10
num_pts['outer_lip'] = 16
num_pts['inner_lip'] = 16
num_pts['upper_teeth'] = 18
num_pts['lower_teeth'] = 18
num_pts['left_ears'] = 19
num_pts['right_ears'] = 19
num_pts['ears'] = 19
num_pts['mouth'] = 68
# num_pts['iris'] = 3
# num_pts['pupil'] = 3
num_pts['overall'] = 224
# index offset of keypoints for all parts
index_offset = dict()
index_offset['face_old'] = 0
index_offset['face'] = 0
index_offset['left_upper_eyelid'] = 26
index_offset['right_upper_eyelid'] = 50
index_offset['left_lower_eyelid'] = 74
index_offset['right_lower_eyelid'] = 91
index_offset['nose'] = 108
index_offset['mouth'] = 118
index_offset['outer_lip'] = 118
index_offset['inner_lip'] = 134
index_offset['upper_teeth'] = 150
index_offset['lower_teeth'] = 150 # lower teeth already has offset in the raw_annotations
index_offset['left_ears'] = 186
index_offset['right_ears'] = 205
# index_offset['iris'] = 224
# index_offset['pupil'] = 227
index_offset['overall'] = 0
anno_version = 2
rotate_degree = {'330001': 90, '330005': -90, '330006': -90, '330007': -90, '330010': 90, '330011': -90, '330012': -90, '330013': -90, '330014': -90, '330015': -90,
'330016': -90, '330017': -90, '330018': -90, '330019': 90, '330020': 90, '330021': 90, '330022': 90, '330023': -90, '330024': -90, '330025': -90,
'330026': -90, '330027': -90, '330028': -90, '330029': -90, '330030': -90, '330031': -90, '330032': -90, '330033': -90, '330034': -90, '330035': 90,
'330036': -90, '330037': -90, '330038': 90, '330039': -90, '330040': -90, '330041': -90, '330042': -90, '330043': -90, '330044': -90, '330045': -90}
rotate_degree_v2 = {'330000': -90, '330005': 90, '330006': -90, '330007': 90, '330010': -90, '330011': -90, '330012': -90, '330014': 90, '330015': -90, '330016': 90,
'330017': 90, '330018': -90, '330019': -90, '330020': 90, '330022': 90, '330023': 90, '330024': -90, '330025': 90, '330026': -90, '330027': -90,
'330028': 90, '330029': 90, '330030': 90, '330031': 90, '330032': 90, '330033': 90, '330036': 90, '330037': 90, '330038': -90, '330040': -90,
'330041': -90, '330042': -90, '330043': -90, '330045': -90, '400004': -90, '400007': -90, '400008': -90, '400010': 90, '400012': 90, '400017': -90,
'400021': 90, '400024': 90, '400025': 90, '400028': 90, '400036': -90, '400039': -90, '400040': 90, '400041': -90, '410001': -90, '410004': 90,
'410016': 90, '410018': -90, '410019': 90, '410029': 90, '410033': 90, '410043': -90, '410044': -90, '410045': 90, '410048': -90, '410049': 90,
'410050': 90, '410051': -90, '410053': 90, '410057': -90, '410061': -90, '410066': 90, '410067': 90, '410068': -90, '410069': 90, '410070': -90,
'410073': -90,}
def get_rotate_dict():
return rotate_degree
def get_rotate_degree(camera_id, debug=True):
if debug:
assert isstring(camera_id), 'the input camera id is not a string for getting rotation degree'
assert camera_id in get_camera_list(), 'the camera id requested: %s does not exist' % camera_id
return rotate_degree[camera_id]
def get_rotate_dict_v2():
return rotate_degree_v2
def get_rotate_degree_v2(camera_id, debug=True):
if debug:
assert isstring(camera_id), 'the input camera id is not a string for getting rotation degree'
assert camera_id in get_camera_list(), 'the camera id requested: %s does not exist' % camera_id
return rotate_degree_v2[camera_id]
def get_compact_subset_list():
return ['face', 'ears', 'lower_eyelid', 'upper_eyelid', 'nose', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']
def get_detailed_subset_list():
return ['face', 'left_ears', 'right_ears', 'left_lower_eyelid', 'right_lower_eyelid', 'left_upper_eyelid', 'right_upper_eyelid', 'nose', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']
def get_camera_list():
return ['330001', '330005', '330006', '330007', '330010', '330011', '330012', '330014', '330015', '330016', '330017', '330018', '330019', '330020', '330021', '330022', '330023',
'330024', '330025', '330026', '330027', '330028', '330029', '330030', '330031', '330032', '330033', '330034', '330035', '330036', '330037', '330038', '330039', '330040',
'330041', '330042', '330043', '330044', '330045']
def get_camera_list_v2():
return rotate_degree_v2.keys()
def subset_detailed_convert2compact(subset, debug=True):
'''
convert a subset in detailed version to the corresponding compact version
'''
if debug:
assert subset in get_detailed_subset_list() or subset == 'face_old', 'the input subset is not in the detailed subset list'
if subset == 'left_lower_eyelid' or subset == 'right_lower_eyelid':
return 'lower_eyelid'
elif subset == 'left_upper_eyelid' or subset == 'right_upper_eyelid':
return 'upper_eyelid'
elif subset == 'left_ears' or subset == 'right_ears':
return 'ears'
else:
return subset
def get_left_camera_list():
return ['330035', '330039', '330036', '330024', '330023', '330045',
'330021', '330040', '330037', '330041', '330043', '330033', '330028', '330030',
'330025', '330042', '330038', '330020', '330012']
def get_right_camera_list():
return ['330016', '330011', '330032', '330017', '330005', '330019',
'330014', '330034', '330006', '330015', '330018', '330026', '330044', '330027', '330022',
'330031', '330010', '330001', '330029', '330014', '330007' ]
def get_filename(recording_id, recording_type, camera_id, frame_number, labeler_id, debug=True):
'''
return the full filename given all info
'''
if debug:
assert isstring(recording_id), 'recording id is not a string'
assert isstring(recording_type), 'recording type is not a string'
assert isscalar(frame_number), 'frame number is not a scalar'
assert isstring(labeler_id), 'labeler id is not a string'
assert camera_id in get_camera_list(), 'camera id %s is not in the camera list' % camera_id
return '--'.join([recording_id, recording_type, camera_id, '%05d' % (frame_number), labeler_id])
def get_image_id(filename, debug=True):
'''
return the real image id and the labeler id, this function assume the name is separated by '--'
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
image_id = '--'.join(substrings[:-1])
return image_id
def get_labeler_id(filename, debug=True):
'''
return the real image id and the labeler id, this function assume the name is separated by '--'
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
labeler_id = substrings[-1]
return labeler_id
def get_frame_number(filename, debug=True):
'''
extract the frame number from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return int(substrings[3])
def get_person_id(filename, debug=True):
'''
extract the person ID from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('_')
return substrings[1]
def get_recording_id(filename, debug=True):
'''
extract the recording id, including date and person id and dot flag
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return substrings[0]
def get_recording_type(filename, debug=True):
'''
extract the recording type, sentence or neutral tongue or expression
'''
if debug:
assert isstring(filename), 'input filename is not a string'
substrings = filename.split('--')
return substrings[1]
def get_camera_id(filename, debug=True):
'''
extract the camera ID from MUGSY filename
'''
if debug:
assert isstring(filename), 'input filename is not a string'
# print filename
substrings = filename.split('--')
return substrings[2]
def get_image_name(image_id, labeler_id, debug=True):
'''
merge the image id and labeler id and returns the imagename
'''
return image_id + '--' + labeler_id
def get_crop_bbox_from_subset_and_camera(subset, camera_id, debug=True):
'''
get pre-defined cropping bbox from subset and camera id
return:
bbox in TLBR format
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert camera_id in get_camera_list(), 'camera id is not correct!'
if 'lower_eyelid' in subset:
if camera_id == '330030':
bbox = [700, 1169, 1659, 1888]
return bbox
def check_left_right(subset, filename, debug=True):
if debug:
assert subset in ['ears', 'lower_eyelid', 'upper_eyelid'], 'subset is not correct!'
if subset == 'lower_eyelid':
camera_id = get_camera_id(filename, debug=debug)
if camera_id == '330014':
return 'right'
elif camera_id == '330030':
return 'left'
else:
assert False, 'camera wrong!!'
elif subset == 'upper_eyelid':
if camera_id in ['330001', '330010']:
return 'right'
elif camera_id == ['330020', '330038']:
return 'left'
else:
assert False, 'camera wrong!!'
else:
assert False, 'not supported'
def get_line_index_list(subset, debug=True):
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old' or subset == 'overall', 'subset is not correct!'
if 'lower_eyelid' in subset:
return [[0, 8, 4, 7, 2, 6, 3, 5, 1], [16, 15, 14, 13, 12, 11, 10, 9]]
elif 'upper_eyelid' in subset:
return [[0, 8, 4, 7, 2, 6, 3, 5, 1], [16, 15, 14, 13, 12, 11, 10, 9], [23, 22, 21, 20, 19, 18, 17]]
elif subset == 'outer_lip':
return [[0, 8, 4, 9, 2, 10, 5, 11, 1], [0, 12, 6, 13, 3, 14, 7, 15, 1]]
elif subset == 'inner_lip':
return [[1, 11, 5, 10, 2, 9, 4, 8, 0], [0, 12, 6, 13, 3, 14, 7, 15, 1]]
elif subset == 'face':
return [[0, 1, 4, 3, 5, 2], [8, 9, 16, 12, 15, 10, 13, 11, 14, 8], [17, 18, 24, 21, 25, 19, 23, 20, 22, 17], [6, 7]]
elif subset == 'lower_teeth':
return [[17, 8, 7, 16, 17], [7, 6, 15, 16], [6, 5, 14, 15], [5, 0, 9, 14], [0, 1, 10, 9], [1, 2, 11, 10], [2, 3, 12, 11], [3, 4, 13, 12]]
elif subset == 'upper_teeth':
return [[8, 17, 16, 7, 8], [16, 15, 6, 7], [15, 14, 5, 6], [14, 9, 0, 5], [9, 10, 1, 0], [10, 11, 2, 1], [11, 12, 3, 2], [12, 13, 4, 3]]
elif subset == 'mouth':
return [[0, 8, 4, 9, 2, 10, 5, 11, 1], [0, 12, 6, 13, 3, 14, 7, 15, 1], [17, 27, 21, 26, 18, 25, 20, 24, 16], [16, 28, 22, 29, 19, 30, 23, 31, 17],
[40, 49, 48, 39, 40], [48, 47, 38, 39], [47, 46, 37, 38], [46, 41, 32, 37], [41, 42, 33, 32], [42, 43, 34, 33], [43, 44, 35, 34], [44, 45, 36, 35],
[67, 58, 57, 66, 67], [57, 56, 65, 66], [56, 55, 64, 65], [55, 50, 59, 64], [50, 51, 60, 59], [51, 52, 61, 60], [52, 53, 62, 61], [53, 54, 63, 62]]
else:
assert False, '%s is not supported' % subset
def get_camera_from_subset(subset, debug=True):
'''
get camera id for a specific subset as many parts are captured only from several fixed camera position
return:
a list of camera id suitable for this specific part
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
if 'lower_eyelid' in subset:
return ['330030', '330014']
elif 'left_upper_eyelid' == subset:
return ['330020', '330038', '330030', '330014']
elif 'right_upper_eyelid' == subset:
return ['330010', '330001', '330030', '330014']
elif subset == 'face_old':
return ['330030', '330014', '330010', '330001', '330020', '330038', '330012', '330031']
elif 'outer_lip' == subset:
return ['330030']
elif 'inner_lip' == subset:
return ['330030']
elif 'lower_teeth' == subset:
return ['330030']
elif 'upper_teeth' == subset:
return ['330030']
elif 'mouth' == subset:
return ['330030']
elif 'nose' == subset:
return ['330012', '330031']
elif subset == 'face':
return ['330030', '330014', '330012', '330031']
else:
assert False, '%s is not supported' % subset
def get_num_pts(subset, debug=True):
'''
get number of points for a specific facial part
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old' or subset == 'overall', 'subset is not correct!'
return num_pts[subset]
def get_index_offset(subset, debug=True):
'''
get number of points for a specific facial part
'''
if debug:
assert subset in get_detailed_subset_list() or subset == 'face_old', 'subset is not correct!'
return index_offset[subset]
def get_anno_version():
return anno_version
def get_num_pts_all():
return num_pts['overall']
def get_detailed_subset(filename, subset, debug=True):
'''
for ears, lower_eyelid, upper_eyelid, this function returns the left right part based on camera position
'''
if subset in ['face', 'nose', 'upper_teeth', 'lower_teeth', 'outer_lip', 'inner_lip']:
return subset
else:
camera_id = get_camera_id(filename, debug=debug)
if camera_id in get_left_camera_list():
return 'left_' + subset
elif camera_id in get_right_camera_list():
return 'right_' + subset
else:
assert False, 'camera ID %s error!' % camera_id
def get_part_index_from_chopping(subset, debug=True):
'''
get part index for each individual part from face old dataset
return
a list of index
'''
if debug:
assert subset in get_detailed_subset_list() or subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
if subset in ['right_lower_eyelid', 'right_upper_eyelid', 'outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'left_lower_eyelid', 'left_upper_eyelid', 'mouth']:
if subset == 'right_lower_eyelid' or subset == 'right_upper_eyelid':
return [4, 5, 6, 7]
elif subset == 'left_lower_eyelid' or subset == 'left_upper_eyelid':
return [8, 9, 10, 11]
elif subset in ['outer_lip', 'inner_lip', 'upper_teeth', 'lower_teeth', 'mouth']:
return [12, 13, 14, 17]
else:
assert False, '%s part is not supported in face old dataset' % subset
def get_search_range_in_sheet():
return 1000 # define the range to search the name in google sheet
def get_experiments_sheet_id():
return '1ViiXL89ek9rLACudOnLbAu6_c4UIyIBtosIhkIncWiE' # sheet id for trained model
def get_training_params_colums_in_experiments_sheet():
return ['F', 'G', 'H', 'I', 'J', 'K', 'L', 'M']
def get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=True): # all experiments are unique
'''
the returned index is already 1-indexed
'''
if debug:
assert subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert size_set in ['resized_4', 'cropped', 'cropped_all'], 'size set is not correct'
column_search_range = range(get_search_range_in_sheet())
# find index in rows in experiments sheet
columns_search_dataset = ['A%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_dataset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_dataset, debug=debug) #
columns_search_subset = ['B%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_subset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_subset, debug=debug) #
columns_search_sizeset = ['C%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_sizeset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_sizeset, debug=debug) #
columns_search_model = ['D%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_model = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_search_model, debug=debug) #
row_index_exp = None
for row_index in column_search_range:
if columns_name_subset[row_index] == subset and columns_name_sizeset[row_index] == size_set and columns_name_model[row_index] == model_name and columns_name_dataset[row_index] == dataset:
row_index_exp = row_index+1
break
if row_index_exp is None:
assert False, 'No entry model (%s, %s, %s, %s) found!' % (dataset, subset, size_set, model_name)
return row_index_exp
def fetch_fitsize_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get input size during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
inputsize = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='F'+str(row_index), debug=debug)[0]
substrings = inputsize.split(' ')
width = substrings[0]
height = substrings[2]
return [width, height]
def fetch_downsample_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get downsample factor during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
downsample = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='H'+str(row_index), debug=debug)[0]
substrings = downsample.split('x')
return substrings[0]
def fetch_model_date_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
date = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='P'+str(row_index), debug=debug)[0]
return date
def fetch_resize_factor_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
factor = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='g'+str(row_index), debug=debug)[0]
return factor
def fetch_output_stage_from_google_sheets(dataset, subset, size_set, model_name, debug=True):
'''
get model date during training
'''
row_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
out_stage = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range='I'+str(row_index), debug=debug)[0]
out_stage = out_stage.split('S')
return out_stage[-1]
def get_evaluation_sheet_id():
return '1cmORxhEOD-E4cYuaKXrJgMeiJ2h5uu4mIoESawaTvFg' # sheet id for evaluated model
def get_training_params_colums_in_evaluation_sheet():
return ['E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']
def get_testing_params_colums_in_evaluation_sheet():
return ['M', 'N', 'O', 'P', 'Q', 'R']
def get_row_index_list_from_evaluation_sheet(dataset, subset, size_set, evaluation_name, debug=True): # all evaluated models might not be unique
'''
the returned index is already 1-indexed
'''
if debug:
assert subset in get_compact_subset_list() or subset == 'face_old', 'subset is not correct!'
assert size_set in ['resized_4', 'cropped', 'cropped_all'], 'size set is not correct'
column_search_range = range(get_search_range_in_sheet())
# find index in rows in experiments sheet
columns_search_dataset = ['A%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_dataset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_dataset, debug=debug) #
columns_search_subset = ['B%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_subset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_subset, debug=debug) #
columns_search_sizeset = ['C%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_sizeset = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_sizeset, debug=debug) #
columns_search_model = ['D%d' % (search_value_tmp + 1) for search_value_tmp in column_search_range]
columns_name_model = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), search_range=columns_search_model, debug=debug) #
row_index_list = list()
for row_index in column_search_range:
if columns_name_subset[row_index] == subset and columns_name_sizeset[row_index] == size_set and columns_name_model[row_index] == evaluation_name and columns_name_dataset[row_index] == dataset:
row_index_list.append(row_index + 1)
if len(row_index_list) == 0:
assert False, '%s, %s, %s, %s is not on the search range within the google sheet' % (dataset, subset, size_set, evaluation_name)
return row_index_list
def update_info_evaluation_sheet(dataset, subset, size_set, model_name, evaluation_name, info_list, debug=True):
exp_index = get_row_index_from_experiments_sheet(dataset, subset, size_set, model_name, debug=debug)
evaluation_index_list = get_row_index_list_from_evaluation_sheet(dataset, subset, size_set, evaluation_name, debug=debug)
update_training_info_evaluation_sheet(exp_index, evaluation_index_list, debug=debug)
update_testing_info_evaluation_sheet(evaluation_index_list, info_list, debug=debug)
def update_training_info_evaluation_sheet(exp_index, evaluation_index_list, debug=True):
add_index_exp = lambda x: x+str(exp_index)
columns_list = list(map(add_index_exp, get_training_params_colums_in_experiments_sheet()))
training_info_list = get_data_from_sheet(service=get_sheet_service(), sheet_id=get_experiments_sheet_id(), search_range=columns_list, debug=debug) #
# paste info to evaluation sheet
for line_index in evaluation_index_list:
add_index_evaluation = lambda x: x+str(line_index)
columns_list = list(map(add_index_evaluation, get_training_params_colums_in_evaluation_sheet()))
update_row2sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), row_starting_position=columns_list[0], data=training_info_list, debug=debug) #
def update_testing_info_evaluation_sheet(evaluation_index_list, info_list, debug=True):
'''
update testing configuration to the record
'''
if debug:
assert len(info_list) == len(get_testing_params_colums_in_evaluation_sheet()), 'the information list is not correct %d vs %d' % (len(info_list), len(get_testing_params_colums_in_evaluation_sheet()))
info_list = list(info_list)
for line_index in evaluation_index_list:
row_starting_position = get_testing_params_colums_in_evaluation_sheet()[0] + str(line_index)
update_row2sheet(service=get_sheet_service(), sheet_id=get_evaluation_sheet_id(), row_starting_position=row_starting_position, data=info_list, debug=debug)
|
python
|
from dataclasses import dataclass
import pytest
import argdcls
from argdcls.config import _parse
@dataclass
class Config:
lr: float
adam: bool = False
def test_load_params():
# "*"
config = argdcls.load(Config, ["@lr=1.0"])
assert config.lr == 1.0
# ""
config = argdcls.load(Config, ["lr=1.0", "adam=True"])
assert config.lr == 1.0
assert config.adam
# "+"
config = argdcls.load(Config, ["lr=1.0", "+addon=3"])
assert config.lr == 1.0
assert not config.adam
assert config.addon == 3 # type: ignore
# "++"
config = argdcls.load(Config, ["++lr=1.0", "++adam=True", "++addon=3"])
assert config.lr == 1.0
assert config.adam
assert config.addon == 3 # type: ignore
def test_error_cases():
# raise value error if typo exists
with pytest.raises(Exception) as e:
_ = argdcls.load(Config, ["lr=1.0", "adm=True"])
assert (
str(e.value)
== "Parameter \"adm\" not in ['lr', 'adam']. You may use \"+adm=True\" instead."
)
def test_parse():
# "*"
param_t, key, val = _parse("@lr=1.0")
assert param_t == "@"
assert key == "lr"
assert val == 1.0
# ""
param_t, key, val = _parse("lr=1.0")
assert param_t == ""
assert key == "lr"
assert val == 1.0
# "+"
param_t, key, val = _parse("+lr=1.0")
assert param_t == "+"
assert key == "lr"
assert val == 1.0
# "++"
param_t, key, val = _parse("++lr=1.0")
assert param_t == "++"
assert key == "lr"
assert val == 1.0
|
python
|
#! /usr/bin/env python
""" A simple demo using aubio and pyaudio to play beats in real time
Note you will need to have pyaudio installed: `pip install pyaudio`.
Examples:
./demo_tapthebeat.py ~/Music/track1.ogg
When compiled with ffmpeg/libav, you should be able to open remote streams. For
instance using youtube-dl (`pip install youtube-dl`):
./demo_tapthebeat.py `youtube-dl -xg https://youtu.be/zZbM9n9j3_g`
"""
import sys
import time
import pyaudio
import aubio
import numpy as np
win_s = 1024 # fft size
hop_s = 512
# parse command line arguments
if len(sys.argv) < 2:
print("Usage: %s <filename> [samplerate]" % sys.argv[0])
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
# create aubio source
a_source = aubio.source(filename, samplerate, hop_s)
samplerate = a_source.samplerate
# create aubio tempo detection
a_tempo = aubio.tempo("default", win_s, hop_s, samplerate)
# create a simple click sound
click = 0.7 * np.sin(2. * np.pi * np.arange(hop_s) / hop_s * samplerate / 3000.)
zerobuf=np.zeros(hop_s).tobytes()
# pyaudio callback
def pyaudio_callback(_in_data, _frame_count, _time_info, _status):
samples, read = a_source()
#print("s ",len(samples)) # len =512, floats
#print("read ",read) # same with hopsize
is_beat = a_tempo(samples)
if is_beat:
samples += click
print('tick') # avoid print in audio callback
audiobuf = samples.tobytes()
if read < hop_s:
return (zerobuf, pyaudio.paComplete)
return (zerobuf, pyaudio.paContinue)
# create pyaudio stream with frames_per_buffer=hop_s and format=paFloat32
p = pyaudio.PyAudio()
pyaudio_format = pyaudio.paFloat32
frames_per_buffer = hop_s
n_channels = 1
stream = p.open(format=pyaudio_format, channels=n_channels, rate=samplerate,
output=True, frames_per_buffer=frames_per_buffer,
stream_callback=pyaudio_callback)
# start pyaudio stream
stream.start_stream()
# wait for stream to finish
while stream.is_active():
time.sleep(0.1)
# stop pyaudio stream
stream.stop_stream()
stream.close()
# close pyaudio
|
python
|
'''
Created 03/20/2014
@authors: Yifan Ning
@summary: parse Molecular Formula and drugbank_id from drugbank.xml
then parse MF(Molecular Formula), FDA Preferred Term and UNNI from UNNIs records
match the results from drugbank and results of parse UNNIs records
output terms: FDA Preferred Term, UNII, Drugbank URI
output file: PT-UNIIs-Drugbank-byMF-03202014.txt
'''
import xml.etree.ElementTree as ET
import os, sys
DRUGBANK_XML = "drugbank.xml"
UNIIS_RECORDS = "UNIIs 25Jan2014 Records.txt"
NS = "{http://drugbank.ca}"
DRUGBANK_BIO2RDF = "http://bio2rdf.org/drugbank:"
DRUGBANK_CA = "http://www.drugbank.ca/drugs/"
dict_ickey_dbid = {}
'''
<property>
<kind>Molecular Formula</kind>
<value>C4H6N4O3S2</value>
<source>ChemAxon</source>
</property>
'''
def parseDbIdAndMF(root):
for drug in root.iter(tag=NS + "drug"):
dbid = drug.find(NS + "drugbank-id")
if dbid == None:
continue
else:
drugbankid = dbid.text
for subProp in drug.iter(NS + "property"):
msKind = subProp.find(NS + "kind")
if msKind == None:
continue
elif msKind.text == "Molecular Formula":
msValue = subProp.find(NS + "value")
if msValue == None:
continue
else:
#print drugbankid + '\t' + subValue.text[9:]
ms = msValue.text
dict_ickey_dbid [ms] = drugbankid
tree = ET.parse(DRUGBANK_XML)
root = tree.getroot()
parseDbIdAndMF(root)
#read mapping file that contains UNII PT MF
for line in open(UNIIS_RECORDS,'r').readlines():
row = line.split('\t')
mf = row[2]
if len(mf) == 0:
continue
if dict_ickey_dbid.has_key(mf):
drugbankid = dict_ickey_dbid[mf]
output = row[1] +'\t'+ row[0] +'\t'+ DRUGBANK_CA + drugbankid +'\t'+ DRUGBANK_BIO2RDF + drugbankid
print output.encode('utf-8').strip()
|
python
|
import pandas as pd
from ..util import generate_name
from tickcounter.questionnaire import Encoder
class MultiEncoder(object):
def __init__(self, encoding_rule):
if isinstance(encoding_rule, Encoder):
self.rules = {
encoding_rule.name: encoding_rule
}
elif isinstance(encoding_rule, list):
if isinstance(encoding_rule[0], Encoder):
self.rules = dict()
for i in encoding_rule:
self.rules[i.name] = i
else:
pass
# Need to convert the dictionary to encoders, and give default name
else:
raise ValueError(f"Expected list of encoder or dictionary objects, got {type(encoding_rule)} instead")
def transform(self, data,*, rule_map=None, columns=None, ignore_list=None, return_rule=False, mode="any"):
result = data.copy()
encode_rule = None
if isinstance(data, pd.DataFrame):
encode_rule = pd.Series(dtype=str, index=data.columns)
if rule_map is None:
for i in result.columns:
if ignore_list is not None and i in ignore_list:
continue
else:
unique_values = result[i].value_counts().index
for rule in self.rules.values():
if mode == "strict":
if len(set(unique_values) ^ set(rule.target)) == 0:
result[i] = rule.transform(result[i])
encode_rule[i] = rule.name
break
elif mode == "any":
if len(set(unique_values) - set(rule.target)) == 0:
result[i] = rule.transform(result[i])
encode_rule[i] = rule.name
break
else:
raise ValueError("rule argument can only be strict or any")
else:
# Check for correct format for rule_map
# Transform according to the rules
pass
elif isinstance(data, pd.Series):
encode_rule = pd.Series(dtype=str, index=[data.name])
unique_values = result.value_counts().index
for rule in self.rules.values():
if mode == "strict":
if len(set(unique_values) ^ set(rule.target)) == 0:
result = rule.transform(result)
encode_rule[data.name] = rule.name
break
elif mode == "any":
if len(set(unique_values) - set(rule.target)) == 0:
result = rule.transform(result)
encode_rule[data.name] = rule.name
break
else:
raise ValueError("rule argument can only be strict or any")
else:
raise TypeError(f"Expected pandas Series or DataFrame, got {type(data)} instead")
if return_rule:
return (result, encode_rule)
else:
return result
def count_neutral(self, data, **kwargs):
# Might need to refactor this
return_flag = False
if 'return_rule' in kwargs.keys() and kwargs['return_rule']:
return_flag = True
else:
kwargs['return_rule'] = True
df_encoded, rule = self.transform(data, **kwargs)
total = None
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
for col, encoder in rule.dropna().iteritems():
# Need to rewrite this. We transform the thing twice to get the count of neutral!
ss_tally = self.rules[encoder].count_neutral(data[col] if isinstance(data, pd.DataFrame) else data)
# If encoder does not have neutral, it will return None
if ss_tally is not None:
if total is None:
total = pd.DataFrame([ss_tally]).T
else:
total = pd.concat([total, ss_tally], axis=1)
else:
continue
# None will result if there is no neutral specified
if total is not None:
total = total.sum(axis=1)
total.rename("Neutral count", inplace=True)
if return_flag:
return (total, rule)
else:
return total
|
python
|
from rest_framework import serializers
from accounts.models import User
class AccountSerializer(serializers.ModelSerializer):
def validate(self, attrs):
username = attrs.get('username', None)
email = attrs.get('email', None)
password = attrs.get('password', None)
try:
if email and username:
user = User.objects.get(username=username)
if user:
raise serializers.ValidationError('This username is already used.')
user = User.objects.get(email=email)
if user:
raise serializers.ValidationError('This email is already registered')
return attrs
else:
raise serializers.ValidationError('Fill all fields')
except User.DoesNotExist:
return attrs
class Meta:
model = User
fields = ('password', 'username', 'email')
|
python
|
import os
import pdb
import random
import sys
import time
from pprint import pformat
import numpy
import torch as tc
import torch.nn as nn
from tqdm import tqdm
from utils.logger import Logger
from utils.random_seeder import set_random_seed
from config import get_config
from training_procedure import Trainer
def main(C , logger , run_id = 0):
T = Trainer(C = C , logger = logger)
T.flags["change split"] = ( run_id % C.change_split == 0 )
(graph , labels) , (train_nodes , dev_nodes , test_nodes) , model , (optimizer , loss_func) = T.init(idx = run_id)
patience_cnt = 0
maj_metric = "micro"
best_metric = 0
best_metric_epoch = -1 # best number on dev set
report_dev_res = 0
report_tes_res = 0
pbar = tqdm(range(C.num_epoch) , ncols = 130)
for epoch_id in pbar:
model , loss = T.train(graph, labels, train_nodes, model, loss_func, optimizer)
dev_res, tes_res, tra_res = T.evaluate(
graph, labels, [dev_nodes, test_nodes, train_nodes], model, loss_func
)
now_metric = dev_res[maj_metric] # current number on dev set
if C.no_dev or best_metric <= now_metric:
best_metric = now_metric
best_metric_epoch = epoch_id
report_dev_res = dev_res
report_tes_res = tes_res
patience_cnt = 0
else:
patience_cnt += 1
if C.patience > 0 and patience_cnt >= C.patience:
break
postfix_str = "<%d> [Dev] %.2f [Test] %.2f (%.2f) [Train] %.2f" % ( epoch_id ,
dev_res[maj_metric], tes_res[maj_metric], report_tes_res[maj_metric], tra_res[maj_metric]
)
pbar.set_postfix_str(postfix_str)
logger.log("best epoch is %d" % best_metric_epoch)
logger.log("Best Epoch Valid Acc is %.2f" % (report_dev_res[maj_metric]))
logger.log("Best Epoch Test Acc is %.2f" % (report_tes_res[maj_metric]))
# note returned tra_res is always that of last epoch
return model , report_dev_res , report_tes_res , tra_res
if __name__ == "__main__":
C = get_config()
# init logger
logger = Logger(mode = [print])
logger.add_line = lambda : logger.log("-" * 50)
logger.log(" ".join(sys.argv))
logger.add_line()
logger.log()
if C.seed > 0:
set_random_seed(C.seed)
logger.log ("Seed set. %d" % (C.seed))
# start run
seeds = [random.randint(0,233333333) for _ in range(C.multirun)]
dev_ress = []
tes_ress = []
tra_ress = []
for run_id in range(C.multirun):
logger.add_line()
logger.log ("\t\t%d th Run" % run_id)
logger.add_line()
set_random_seed(seeds[run_id])
logger.log ("Seed set to %d." % seeds[run_id])
model , dev_res , tes_res , tra_res = main(C , logger , run_id)
logger.log("%d th Run ended. Best Epoch Valid Result is %s" % (run_id , str(dev_res)))
logger.log("%d th Run ended. Best Epoch Test Result is %s" % (run_id , str(tes_res)))
logger.log("%d th Run ended. Final Train Result is %s" % (run_id , str(tra_res)))
dev_ress.append(dev_res)
tes_ress.append(tes_res)
tra_ress.append(tra_res)
logger.add_line()
for metric in ["micro" , "macro"]:
for res , name in zip(
[dev_ress , tes_ress , tra_ress] ,
["Dev" , "Test" , "Train"]
):
now_res = [x[metric] for x in res]
logger.log ("%s of %s : %s" % (metric , name , str([round(x,2) for x in now_res])))
avg = sum(now_res) / C.multirun
std = (sum([(x - avg) ** 2 for x in now_res]) / C.multirun) ** 0.5
logger.log("%s of %s : avg / std = %.2f / %.2f" % (metric , name , avg , std))
logger.log("")
|
python
|
#!/usr/bin/env python2
# coding:utf-8
import base64
import copy
import errno
import getopt
import json
import logging
import os
import sys
import threading
import time
import traceback
import boto3
import oss2
import yaml
from botocore.client import Config
from pykit import jobq
report_state_lock = threading.RLock()
ali_sync_state = {
'total_n': 0,
'total_bytes': 0,
'no_content_md5': 0,
'no_content_md5_list': [],
'exist': 0,
'check_need_s3_error': 0,
'check_need_s3_error_list': [],
'size_override': 0,
'md5_equal': 0,
'default_override': 0,
'default_not_override': 0,
'piped': 0,
'piped_bytes': 0,
'pipe_succeed': 0,
'pipe_succeed_bytes': 0,
'pipe_failed': 0,
'pipe_failed_bytes': 0,
'pipe_failed_exception_error': 0,
'pipe_failed_exception_error_list': [],
'pipe_failed_ali_file_size_error': 0,
'pipe_failed_ali_file_size_error_list': [],
'pipe_failed_ali_md5_error': 0,
'pipe_failed_ali_md5_error_list': [],
'compared': 0,
'compare_succeed': 0,
'compare_failed': 0,
'compare_failed_not_found_error': 0,
'compare_failed_not_found_error_list': [],
'compare_failed_exception_error': 0,
'compare_failed_exception_error_list': [],
'compare_failed_size_error': 0,
'compare_failed_size_error_list': [],
'compare_failed_content_type_error': 0,
'compare_failed_content_type_error_list': [],
'compare_failed_content_md5_error': 0,
'compare_failed_content_md5_error_list': [],
}
ali_meta_prefix = 'x-oss-meta-'
def add_logger():
log_file = os.path.join(cnf['LOG_DIR'], 'ali-sync-for-' +
cnf['ALI_BUCKET_NAME'] + '.log')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s, %(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def _mkdir(path):
try:
os.makedirs(path, 0755)
except OSError as e:
if e[0] == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _thread(func, args):
th = threading.Thread(target=func, args=args)
th.daemon = True
th.start()
return th
def get_conf(conf_path):
with open(conf_path) as f:
conf = yaml.safe_load(f.read())
return conf
def get_boto_client(endpoint):
client = boto3.client(
's3',
use_ssl=False,
aws_access_key_id=cnf['BAISHAN_ACCESS_KEY'],
aws_secret_access_key=cnf['BAISHAN_SECRET_KEY'],
config=Config(signature_version='s3v4'),
region_name='us-east-1',
endpoint_url=endpoint,
)
return client
def load_progress():
if os.path.isfile(cnf['PROGRESS_FILE']):
with open(cnf['PROGRESS_FILE'], 'r') as progress_file:
progress = json.loads(progress_file.read())
return progress
return {
'marker': '',
'total_n': 0,
'total_size': 0,
}
def store_progress():
with open(cnf['PROGRESS_FILE'], 'w') as progress_file:
progress_file.write(json.dumps(current_progress))
def clear_progress():
os.remove(cnf['PROGRESS_FILE'])
def iter_files():
marker = current_progress['marker']
start_marker = cnf.get('START_MARKER', '')
if start_marker > marker:
marker = start_marker
end_marker = cnf.get('END_MARKER', None)
for file_object in oss2.ObjectIterator(oss2_bucket, prefix=cnf['PREFIX'], marker=marker):
if end_marker and file_object.key > end_marker:
break
yield file_object
current_progress['total_n'] += 1
current_progress['total_size'] += file_object.size
current_progress['marker'] = file_object.key
if current_progress['total_n'] % 10000 == 0:
store_progress()
store_progress()
def get_ali_user_meta(headers):
meta = {}
for k, v in headers.iteritems():
if k.lower().startswith(ali_meta_prefix):
meta_name = k.lower()[len(ali_meta_prefix):]
meta[meta_name] = v
return meta
def validate_and_extract_ali_file_info(resp_object, result):
file_object = result['file_object']
if resp_object.content_length != file_object.size:
result['pipe_failed_ali_file_size_error'] = {
'key': file_object.key,
'content_length': resp_object.content_length,
'size': file_object.size,
}
logger.warn('ali file size error' +
repr(result['ali_file_size_error']))
return
ali_file_info = {
'size': file_object.size,
'content_type': resp_object.content_type,
}
if 'Content-MD5' in resp_object.headers:
md5 = base64.b64decode(resp_object.headers['Content-MD5'])
md5 = md5.encode('hex')
if md5 != file_object.etag.lower():
result['pipe_failed_ali_md5_error'] = {
'key': file_object.key,
'content_md5': md5,
'etag': file_object.etag.lower(),
}
logger.warn('ali md5 error' + repr(result['ali_md5_error']))
return
ali_file_info['content_md5'] = md5
else:
result['no_content_md5'] = {
'key': file_object.key,
'object_type': file_object.type,
}
ali_file_info['meta'] = get_ali_user_meta(resp_object.headers)
return ali_file_info
def get_s3_file_info(s3_key):
resp = s3_client.head_object(
Bucket=cnf['BAISHAN_BUCKET_NAME'],
Key=s3_key,
)
s3_file_info = {
'size': resp['ContentLength'],
'content_type': resp['ContentType'],
'meta': resp['Metadata'],
'content_md5': resp['ETag'].lower().strip('"'),
}
return s3_file_info
def compare_file_info(ali_file_info, s3_file_info, result, th_status):
if ali_file_info['size'] != s3_file_info['size']:
th_status['compare_failed_size_error_n'] = th_status.get(
'compare_failed_size_error_n', 0) + 1
result['compare_failed_size_error'] = {
'key': result['file_object'].key,
'ali_file_size': ali_file_info['size'],
's3_file_size': s3_file_info['size'],
}
return False
if ali_file_info['content_type'] != s3_file_info['content_type']:
th_status['compare_failed_content_type_error_n'] = th_status.get(
'compare_failed_content_type_error_n', 0) + 1
result['compare_failed_content_type_error'] = {
'key': result['file_object'].key,
'ali_content_type': ali_file_info['content_type'],
's3_content_type': s3_file_info['content_type'],
}
return False
for k, v in ali_file_info['meta'].iteritems():
if k not in s3_file_info['meta'] or v != s3_file_info['meta'][k]:
th_status['compare_failed_meta_error_n'] = th_status.get(
'compare_failed_meta_error_n', 0) + 1
result['compate_failed_meta_error'] = {
'key': result['file_object'].key,
'ali_meta': repr(ali_file_info['meta']),
's3_meta': repr(s3_file_info['meta']),
}
return False
if 'content_md5' in ali_file_info:
if ali_file_info['content_md5'] != s3_file_info['content_md5']:
th_status['compare_failed_content_md5_error_n'] = th_status.get(
'compare_failed_content_md5_error_n', 0) + 1
result['compare_failed_content_md5_error'] = {
'key': result['file_object'].key,
'ali_content_md5': ali_file_info['content_md5'],
's3_content_md5': s3_file_info['content_md5'],
}
return False
return True
def compare_file(result, th_status):
result['compared'] = True
th_status['compared_n'] = th_status.get('compared_n', 0) + 1
try:
s3_file_info = get_s3_file_info(result['s3_key'])
except Exception as e:
result['compare_failed'] = True
th_status['compare_failed_n'] = th_status.get(
'compare_failed_n', 0) + 1
if hasattr(e, 'message') and 'Not Found' in e.message:
result['compare_failed_not_found_error'] = True
th_status['compare_failed_not_found_n'] = th_status.get(
'compare_failed_not_found_n', 0) + 1
result['compare_failed_not_found_err'] = {
'key': result['file_object'].key,
'error': repr(e),
}
logger.error('file not exist is s3 when compare file %s: %s' %
(result['s3_key'], traceback.format_exc()))
else:
result['compare_failed_exception_error'] = True
th_status['compare_failed_exception_n'] = th_status.get(
'compare_failed_exception_n', 0) + 1
result['compare_failed_exception_error'] = {
'key': result['file_object'].key,
'error': repr(e),
}
logger.error('got exception when get s3 file info %s: %s' %
(result['s3_key'], traceback.format_exc()))
return False
ali_file_info = result['ali_file_info']
if not compare_file_info(ali_file_info, s3_file_info, result, th_status):
result['compared_failed'] = True
th_status['compare_failed_n'] = th_status.get(
'compare_failed_n', 0) + 1
return False
result['compare_succeed'] = True
th_status['compare_succeed_n'] = th_status.get('compare_succeed_n', 0) + 1
return True
def pipe_file(result, th_status):
result['piped'] = True
th_status['piped_n'] = th_status.get('piped_n', 0) + 1
def update_pipe_progress(done_bytes, total_bytes):
th_status['pipe_progress'] = (done_bytes, total_bytes)
file_object = result['file_object']
try:
resp_object = oss2_bucket.get_object(
file_object.key, progress_callback=update_pipe_progress)
ali_file_info = validate_and_extract_ali_file_info(resp_object, result)
if ali_file_info == None:
result['pipe_failed'] = True
th_status['pipe_failed_n'] = th_status.get('pipe_failed_n', 0) + 1
return False
extra_args = {
'ACL': cnf['FILE_ACL'],
'ContentType': ali_file_info['content_type'],
'Metadata': ali_file_info['meta'],
}
s3_client.upload_fileobj(resp_object, cnf['BAISHAN_BUCKET_NAME'],
result['s3_key'], ExtraArgs=extra_args)
result['pipe_succeed'] = True
th_status['pipe_succeed_n'] = th_status.get('pipe_succeed_n', 0) + 1
result['ali_file_info'] = ali_file_info
return True
except Exception as e:
result['pipe_failed'] = True
th_status['pipe_failed_n'] = th_status.get('pipe_failed_n', 0) + 1
result['pipe_failed_exception_error'] = {
'key': file_object.key,
'error': repr(e),
}
logger.error('got exception when pipe file %s: %s' %
(file_object.key, traceback.format_exc()))
return False
def convert_key(key):
return key
def check_need(result, th_status):
if not cnf['CHECK_EXIST']:
return True
file_object = result['file_object']
try:
s3_file_info = get_s3_file_info(result['s3_key'])
except Exception as e:
if hasattr(e, 'message') and 'Not Found' in e.message:
return True
else:
th_status['check_need_s3_error_n'] = th_status.get(
'check_need_s3_error_n', 0) + 1
result['check_need_s3_error'] = {
'key': result['s3_key'],
'error': repr(e),
}
logger.error('faied to get s3 file info in check need %s: %s' %
(result['s3_key'], traceback.format_exc()))
return False
result['exist'] = True
th_status['exist_n'] = th_status.get('exist_n', 0) + 1
if s3_file_info['size'] != file_object.size:
result['size_override'] = True
th_status['size_override_n'] = th_status.get('size_override_n', 0) + 1
logger.info(('need to override file: %s, because size not equal, ' +
'ali_size: %d, s3_size: %d') %
(result['s3_key'], file_object.size, s3_file_info['size']))
return True
if s3_file_info['content_md5'].lower() == file_object.etag.lower():
result['md5_equal'] = True
th_status['md5_equal_n'] = th_status.get('md5_equal_n', 0) + 1
return False
if cnf['OVERRIDE']:
result['default_override'] = True
th_status['default_override_n'] = th_status.get(
'default_override_n', 0) + 1
return True
else:
result['default_not_override'] = True
th_status['default_not_override_n'] = th_status.get(
'default_not_override_n', 0) + 1
return False
def sync_one_file(file_object):
thread_name = threading.current_thread().getName()
thread_status[thread_name] = thread_status.get(thread_name, {})
th_status = thread_status[thread_name]
th_status['total_n'] = th_status.get('total_n', 0) + 1
result = {
'file_object': file_object,
's3_key': convert_key(file_object.key)
}
if not check_need(result, th_status):
return result
if not pipe_file(result, th_status):
return result
if not compare_file(result, th_status):
return result
return result
def update_sync_stat(result):
file_object = result['file_object']
ali_sync_state['total_n'] += 1
ali_sync_state['total_bytes'] += file_object.size
if 'no_content_md5' in result:
ali_sync_state['no_content_md5'] += 1
ali_sync_state['no_content_md5_list'].append(result['no_content_md5'])
if 'check_need_s3_error' in result:
ali_sync_state['check_need_s3_error'] += 1
ali_sync_state['check_need_s3_error_list'].append(
result['check_need_s3_error'])
return
if 'exist' in result:
ali_sync_state['exist'] += 1
if 'size_override' in result:
ali_sync_state['size_override'] += 1
elif 'md5_equal' in result:
ali_sync_state['md5_equal'] += 1
elif 'default_override' in result:
ali_sync_state['default_override'] += 1
elif 'default_not_override' in result:
ali_sync_state['default_not_override'] += 1
if not 'piped' in result:
return
ali_sync_state['piped'] += 1
ali_sync_state['piped_bytes'] += file_object.size
if 'pipe_failed' in result:
ali_sync_state['pipe_failed'] += 1
ali_sync_state['pipe_failed_bytes'] += file_object.size
if 'pipe_failed_exception_error' in result:
ali_sync_state['pipe_failed_exception_error'] += 1
ali_sync_state['pipe_failed_exception_error_list'].append(
result['pipe_failed_exception_error'])
elif 'pipe_failed_ali_file_size_error' in result:
ali_sync_state['pipe_failed_ali_file_size_error'] += 1
ali_sync_state['pipe_failed_ali_file_size_error_list'].append(
result['pipe_failed_ali_file_size_error'])
elif 'pipe_failed_ali_md5_error' in result:
ali_sync_state['pipe_failed_ali_md5_error'] += 1
ali_sync_state['pipe_failed_ali_md5_error_list'].append(
result['pipe_failed_ali_md5_error'])
return
ali_sync_state['pipe_succeed'] += 1
ali_sync_state['pipe_succeed_bytes'] += file_object.size
if not 'compared' in result:
return
if 'compare_failed' in result:
ali_sync_state['compare_failed'] += 1
if 'compare_failed_not_found_error' in result:
ali_sync_state['compare_failed_not_found_error'] += 1
ali_sync_state['compare_failed_not_found_error_list'].append(
result['compare_failed_not_found_error'])
elif 'compare_failed_exception_error' in result:
ali_sync_state['compare_failed_exception_error'] += 1
ali_sync_state['compare_failed_exception_error_list'].append(
result['compare_failed_exception_error'])
elif 'compare_failed_size_error' in result:
ali_sync_state['compare_failed_size_error'] += 1
ali_sync_state['compare_failed_size_error_list'].append(
result['compare_failed_size_error'])
elif 'compare_failed_content_md5_error' in result:
ali_sync_state['compare_failed_content_md5_error'] += 1
ali_sync_state['compare_failed_exception_error_list'].append(
result['compare_failed_content_md5_error'])
return
ali_sync_state['compare_succeed'] += 1
def report_thread_status(th_status):
total_n = th_status.get('total_n', 0)
s3_error_n = th_status.get('check_need_s3_error_n', 0)
exist_n = th_status.get('exist_n', 0)
size_override_n = th_status.get('size_override_n', 0)
md5_equal_n = th_status.get('md5_equal_n', 0)
d_override_n = th_status.get('default_override_n', 0)
d_not_override_n = th_status.get('default_not_override_n', 0)
piped_n = th_status.get('piped_n', 0)
pipe_succeed_n = th_status.get('pipe_succeed_n', 0)
pipe_failed_n = th_status.get('pipe_failed_n', 0)
pipe_progress = th_status.get('pipe_progress', (0, 0))
compared_n = th_status.get('compared_n', 0)
compare_succeed_n = th_status.get('compare_succeed_n', 0)
compare_failed_n = th_status.get('compare_failed_n', 0)
not_found_n = th_status.get('compare_failed_not_found_n', 0)
exception_n = th_status.get('compare_failed_exception_n', 0)
size_error_n = th_status.get('compare_failed_size_error_n', 0)
content_type_error_n = th_status.get(
'compare_failed_content_typ_error_n', 0)
meta_error_n = th_status.get('compate_failed_meta_error_n', 0)
content_md5_error_n = th_status.get(
'compare_failed_content_md5_error_n', 0)
print (('total: %d, get s3 file info failed: %s, exist: %d, size ' +
'override: %d, md5_equal: %d, default override: %d, default' +
'not override: %d ') %
(total_n, s3_error_n, exist_n, size_override_n, md5_equal_n,
d_override_n, d_not_override_n))
print ('piped: %d, pipe succeed: %d, pipe failed: %d, pipe grogress: %s' %
(piped_n, pipe_succeed_n, pipe_failed_n, repr(pipe_progress)))
print (('compared: %d, compare succeed: %d, compare failed: %d, not ' +
'found: %d, exception: %d, size error: %d, type error: %d, ' +
'meta error: %d, md5 error: %d') %
(compared_n, compare_succeed_n, compare_failed_n, not_found_n,
exception_n, size_error_n, content_type_error_n,
meta_error_n, content_md5_error_n))
def _report_state():
# os.system('clear')
print (('ali bucket name: %s, prefix: %s, start marker: %s, ' +
'end marker: %s, baishan bucket name: %s') %
(cnf['ALI_BUCKET_NAME'], cnf['PREFIX'], cnf['START_MARKER'],
cnf['END_MARKER'], cnf['BAISHAN_BUCKET_NAME']))
print ''
print (('previous iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(previous_progress['total_n'],
previous_progress['total_size'],
previous_progress['marker']))
print (('current iter progress: total number: %d, ' +
'total size: %d, marker: %s') %
(current_progress['total_n'],
current_progress['total_size'],
current_progress['marker']))
print ''
print ('total number: %d, total bytes: %d, no content md5: %d' %
(ali_sync_state['total_n'], ali_sync_state['total_bytes'],
ali_sync_state['no_content_md5']))
print ''
print 'check exist: %s' % cnf['CHECK_EXIST']
print 'get s3 file info failed: %d' % ali_sync_state['check_need_s3_error']
print (('exist: %d, size_override: %d, md5_equal: %d, ' +
'default_override: %d, default_not_override: %d') %
(ali_sync_state['exist'],
ali_sync_state['size_override'],
ali_sync_state['md5_equal'],
ali_sync_state['default_override'],
ali_sync_state['default_not_override']))
print ''
print 'piped: %d, piped_bytes: %d' % (ali_sync_state['piped'],
ali_sync_state['piped_bytes'])
print ('pipe succeed: %d, pipe succeed bytes: %d' %
(ali_sync_state['pipe_succeed'],
ali_sync_state['pipe_succeed_bytes']))
print ('pipe failed: %d, pipe failed bytes: %d' %
(ali_sync_state['pipe_failed'],
ali_sync_state['pipe_failed_bytes']))
print (('pipe failed reason: exception: %d, ali file size error: %d, ' +
'ali md5 error: %d') %
(ali_sync_state['pipe_failed_exception_error'],
ali_sync_state['pipe_failed_ali_file_size_error'],
ali_sync_state['pipe_failed_ali_md5_error']))
print ''
print ('compared: %d, compare_succeed: %d, compare_failed: %d' %
(ali_sync_state['compared'],
ali_sync_state['compare_succeed'],
ali_sync_state['compare_failed']))
print (('compare failed reason: not found: %d, exception: %d, ' +
'size error: %d, content type error: %d, content md5 error: %d') %
(ali_sync_state['compare_failed_not_found_error'],
ali_sync_state['compare_failed_exception_error'],
ali_sync_state['compare_failed_size_error'],
ali_sync_state['compare_failed_content_type_error'],
ali_sync_state['compare_failed_content_md5_error']))
print ''
print 'threads status:'
for th_name, th_status in thread_status.iteritems():
print th_name
report_thread_status(th_status)
print ''
def report_state():
with report_state_lock:
_report_state()
def report(sess):
while not sess['stop']:
report_state()
time.sleep(cnf['REPORT_INTERVAL'])
def dump_state():
with open(cnf['STATE_FILE'], 'w') as stat_file:
stat_file.write(json.dumps(ali_sync_state))
def sync():
try:
report_sess = {'stop': False}
report_th = _thread(report, (report_sess,))
jobq.run(iter_files(), [(sync_one_file, 3),
(update_sync_stat, 1),
])
report_sess['stop'] = True
report_th.join()
report_state()
dump_state()
except KeyboardInterrupt:
report_state()
dump_state()
sys.exit(0)
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:], '', ['conf=', ])
opts = dict(opts)
if opts.get('--conf') is None:
conf_path = '../conf/ali_sync.yaml'
else:
conf_path = opts['--conf']
cnf = get_conf(conf_path)
oss2_auth = oss2.Auth(cnf['ALI_ACCESS_KEY'], cnf['ALI_SECRET_KEY'])
oss2_bucket = oss2.Bucket(
oss2_auth, cnf['ALI_ENDPOINT'], cnf['ALI_BUCKET_NAME'])
s3_client = get_boto_client(cnf['BAISHAN_ENDPOINT'])
_mkdir(cnf['LOG_DIR'])
logger = add_logger()
thread_status = {}
cmd = args[0]
if cmd == 'sync':
current_progress = load_progress()
previous_progress = copy.deepcopy(current_progress)
sync()
elif cmd == 'clear_progress':
clear_progress()
|
python
|
from __future__ import print_function
import os
import argparse
import numpy as np
from module.retinaface_function_in_numpy import PriorBox_in_numpy
from utils.retinaface_tool_in_numpy import py_cpu_nms_in_numpy, decode_in_numpy, decode_landm_in_numpy
import cv2
from module.retinaface_model_in_numpy import RetinaFace
from hyperparams import Hyperparams
def get_retinaface_net():
'''
be used to get class net
:param para:
:return:
'''
print('Loading network...')
cfg = np.load("E:/py_file/temFace/temFace/data/retinate.npy", allow_pickle=True)
cfg = cfg.item()
# print(cfg)
net = RetinaFace(cfg)
return net
def face_detection(net,img_raw):
cfg = Hyperparams().cfg_mnet
img = np.float32(img_raw)
# testing scale
target_size = 1600
max_size = 2150
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
resize = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:防止超过最大尺寸
if np.round(resize * im_size_max) > max_size:
resize = float(max_size) / float(im_size_max)
if True:
resize = 1
if resize != 1:
img = cv2.resize(img, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
im_height, im_width, _ = img.shape
scale = np.array([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]).astype(np.float64)
img -= (104, 117, 123) # 单通道
img = img.transpose(2, 0, 1) # 转置
img = np.expand_dims(img,0)
loc, conf, landms, test_size = net(img)
# 面框
priorbox = PriorBox_in_numpy(cfg, image_size=(im_height, im_width), test_size=test_size)
priors = priorbox.forward()
#prior_data = priors.data
prior_data = priors
boxes = decode_in_numpy(loc.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
scores = conf.squeeze(0)[:, 1]
landms = decode_landm_in_numpy(landms.squeeze(0), prior_data, cfg['variance'])
scale1 = np.array([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]]).astype(np.float64)
landms = landms * scale1 / resize
# ignore low scores
inds = np.where(scores > 0.02)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1] # 返回的是从小到大排序的索引
#if len(order) > 10:
# order = order[:10]
# order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
#print("pre:{}".format(dets.shape))
keep = py_cpu_nms_in_numpy(dets, 0.4)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
#print("back:{}".format(dets.shape))
landms = landms[keep]
# keep top-K faster NMS
# dets = dets[:args.keep_top_k, :]
# landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1) # 连接
faces = []
for b in dets:
if b[4] < 0.5:
continue
# text = "{:.4f}".format(b[4])
b = list(map(int, b))
#cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
faces.append(b)
#print("facesnum:{}".format(len(faces)))
return faces
if __name__ == '__main__':
get_retinaface_net()
|
python
|
import logging
def init_logging(log_file, log_level):
logging.getLogger().setLevel(log_level)
log_formatter = logging.Formatter('%(asctime)s %(message)s')
root_logger = logging.getLogger()
if log_file:
file_handler = logging.FileHandler(log_file, encoding='utf8')
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
logging.info('Logging to %s', log_file)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
|
python
|
from django.core.management.base import BaseCommand
from apps.boxes.models import BoxUpload
class Command(BaseCommand):
help = 'Removes expired and completed boxes uploads'
def handle(self, *args, **options):
deleted = BoxUpload.objects.not_active().delete()
self.stdout.write(
self.style.SUCCESS('Successfully removed {} expired '
'and completed uploads.'
.format(deleted[0]))
)
|
python
|
from audhelper.audhelper import __version__, __author__
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="audhelper",
version=__version__,
author=__author__,
author_email="[email protected]",
description="Audio helper functions including visualization and processing functions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Zepyhrus/audhelper",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.4',
)
|
python
|
import cv2
import numpy as np
img = cv2.imread(r"EDIZ\OPENCV\basin.jpg")
gri = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
temp1 = cv2.imread(r"EDIZ\OPENCV\temp1.jpg",0)
temp2 = cv2.imread(r"EDIZ\OPENCV\temp2.jpg",0)
w,h = temp2.shape[::-1]
res = cv2.matchTemplate(gri,temp2,cv2.TM_CCOEFF_NORMED)
thresh = 0.5
loc = np.where(res>=thresh)
# print(zip(*loc[::-1]))
for pt in zip(*loc[::-1]):
cv2.rectangle(img,pt,(pt[0]+w,pt[1]+h),(0,255,255),2)
cv2.imshow("ilkresim",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
python
|
###
# Copyright 2017 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
""" Directory Command for rdmc """
import sys
import re
import getpass
from argparse import ArgumentParser, SUPPRESS, REMAINDER, Action, RawDescriptionHelpFormatter
from redfish.ris.rmc_helper import IloResponseError, IdTokenError
from rdmc_helper import ReturnCodes, InvalidCommandLineError, IncompatibleiLOVersionError,\
InvalidCommandLineErrorOPTS, NoContentsFoundForOperationError, \
ResourceExists, Encryption
__subparsers__ = ['ldap', 'kerberos', 'test']
PRIVKEY = {1: ('Login', 'AssignedPrivileges'),\
2: ('RemoteConsolePriv', 'OemPrivileges'),\
3: ('ConfigureUsers', 'AssignedPrivileges'),\
4: ('ConfigureManager', 'AssignedPrivileges'),\
5: ('VirtualMediaPriv', 'OemPrivileges'),\
6: ('VirtualPowerAndResetPriv', 'OemPrivileges'),\
7: ('HostNICConfigPriv', 'OemPrivileges'),\
8: ('HostBIOSConfigPriv', 'OemPrivileges'),\
9: ('HostStorageConfigPriv', 'OemPrivileges'),\
10: ('SystemRecoveryConfigPriv', 'OemPrivileges'),\
11: ('ConfigureSelf', 'AssignedPrivileges'),\
12: ('ConfigureComponents', 'AssignedPrivileges')}
class _DirectoryParse(Action):
def __init__(self, option_strings, dest, nargs, **kwargs):
super(_DirectoryParse, self).__init__(option_strings, dest, nargs, **kwargs)
def __call__(self, parser, namespace, values, option_strings):
""" Helper for parsing options """
if option_strings.endswith('disable'):
setattr(namespace, self.dest, False)
elif option_strings.endswith('enable'):
setattr(namespace, self.dest, True)
elif option_strings.endswith('enablelocalauth'):
setattr(namespace, self.dest, False)
elif option_strings.endswith('disablelocalauth'):
setattr(namespace, self.dest, True)
elif option_strings == '--removerolemap':
setattr(namespace, self.dest, {'remove': []})
for role in next(iter(values)).split(','):
role = role.replace('"', '')
if role:
namespace.roles['remove'].append(role)
elif option_strings == '--addrolemap':
setattr(namespace, self.dest, {'add': []})
for role in next(iter(values)).split(','):
role = role.replace('"', '')
if role and re.match('.*:.*', role):
privs = role.split(':')[0].split(';')
if len(privs) > 1:
for priv in privs:
try:
if priv and int(priv) > 12:
try:
parser.error("Invalid privilege number added %s." % priv)
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
except ValueError:
try:
parser.error("Privileges must be added as numbers.")
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
namespace.roles['add'].append(role)
else:
try:
parser.error("Supply roles to add in form <local role>:<remote group>")
except SystemExit:
raise InvalidCommandLineErrorOPTS("")
elif option_strings == '--addsearch':
setattr(namespace, self.dest, {'add': []})
for search in next(iter(values)).split(','):
if search:
namespace.search['add'].append(search)
elif option_strings == '--removesearch':
setattr(namespace, self.dest, {'remove': []})
for search in next(iter(values)).split(','):
if search:
namespace.search['remove'].append(search)
class DirectoryCommand():
""" Update directory settings on the server """
def __init__(self):
self.ident = {
'name':'directory',\
'usage': None,\
'description':'\tAdd credentials, service address, two search strings, and enable'\
'\n\tLDAP directory service, remote role groups (mapping), local custom role\n\t'\
'IDs with privileges.\n\n\tTo view help on specific sub-commands'\
' run: directory <sub-command> -h\n\n\tExample: directory ldap -h\n',
'summary':'Update directory settings, add/delete directory roles, and test directory '\
'settings on the currently logged in server.',\
'aliases': ['ad', 'activedirectory'],\
'auxcommands': ["IloAccountsCommand"]
}
#self.definearguments(self.parser)
self.cmdbase = None
self.rdmc = None
self.auxcommands = dict()
#self.typepath = rdmcObj.app.typepath
#self.iloaccounts = rdmcObj.commands_dict["IloAccountsCommand"](rdmcObj)
def run(self, line):
"""Main directory Function
:param line: string of arguments passed in
:type line: str.
"""
try:
ident_subparser = False
for cmnd in __subparsers__:
if cmnd in line:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line)
ident_subparser = True
break
if not ident_subparser:
(options, args) = self.rdmc.rdmc_parse_arglist(self, line, default=True)
except (InvalidCommandLineErrorOPTS, SystemExit):
if ("-h" in line) or ("--help" in line):
return ReturnCodes.SUCCESS
else:
raise InvalidCommandLineErrorOPTS("")
self.directoryvalidation(options)
if self.rdmc.app.getiloversion() < 5.140:
raise IncompatibleiLOVersionError("Directory settings are only available on "\
"iLO 5 1.40 or greater.")
results = None
if options.command.lower() == 'ldap' or ((True if options.ldap_kerberos == 'ldap' \
else False) if hasattr(options, 'ldap_kerberos') else False):
try:
results = self.rdmc.app.select(selector='AccountService.', \
path_refresh=True)[0].dict
path = results[self.rdmc.app.typepath.defs.hrefstring]
oem = results['Oem'][self.rdmc.app.typepath.defs.oemhp]
local_auth = results['LocalAccountAuth']
results = results['LDAP']
name = 'LDAP'
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather LDAP settings.")
elif options.command.lower() == 'kerberos' or ((True if options.ldap_kerberos == \
'kerberos' else False) if hasattr(options, 'ldap_kerberos') else False):
try:
results = self.rdmc.app.select(selector='AccountService.', \
path_refresh=True)[0].dict
path = results[self.rdmc.app.typepath.defs.hrefstring]
oem = results['Oem'][self.rdmc.app.typepath.defs.oemhp]
local_auth = results['LocalAccountAuth']
results = results['ActiveDirectory']
name = 'ActiveDirectory'
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather Kerberos settings.")
if results:
keytab = None
payload = {}
if hasattr(options, 'keytab'):
keytab = options.keytab
try:
directory_settings = self.directory_helper(results, options)
except IndexError:
directory_settings = self.directory_helper(results, options)
if directory_settings:
payload[name] = directory_settings
if hasattr(options, 'authmode'):
if options.authmode:
payload.update({'Oem':{'Hpe':{'DirectorySettings': \
{'LdapAuthenticationMode': options.authmode}}}})
if not payload and not keytab:
if getattr(options, 'json', False):
self.rdmc.ui.print_out_json({name: results, 'LocalAccountAuth': local_auth, \
"Oem": {"Hpe": oem}})
else:
self.print_settings(results, oem, local_auth, name)
if payload:
priv_patches = {}
try:
if hasattr(options, "localauth"):
if options.localauth:
payload['LocalAccountAuth'] = 'Enabled' \
if options.localauth else 'Disabled'
elif local_auth:
payload['LocalAccountAuth'] = 'Enabled' if local_auth else 'Disabled'
except (NameError, AttributeError):
payload['LocalAccountAuth'] = 'Disabled'
try:
maps = {}
if payload.get('LDAP'):
maps = payload['LDAP'].get('RemoteRoleMapping', {})
elif payload.get('ActiveDirectory'):
maps = payload['ActiveDirectory'].get('RemoteRoleMapping', {})
#Check if we need to modify roles after creating
for mapping in maps:
privs = mapping['LocalRole'].split(';')
if len(privs) > 1:
privs = [int(priv) for priv in privs if priv]
if 10 in privs:
user_privs = self.auxcommands['iloaccounts'].getsesprivs()
if 'SystemRecoveryConfigPriv' not in user_privs.keys():
raise IdTokenError("The currently logged in account "\
"must have the System Recovery Config privilege to "\
"add the System Recovery Config privilege to a local "\
"role group.")
priv_patches[mapping['RemoteGroup']] = privs
mapping['LocalRole'] = "ReadOnly"
except Exception:
pass
self.rdmc.ui.printer("Changing settings...\n")
try:
self.rdmc.app.patch_handler(path, payload)
except IloResponseError as excp:
if not results['ServiceEnabled']:
self.rdmc.ui.error("You must enable this directory service before or "\
"during assignment of username and password. Try adding the flag "\
"--enable.\n", excp)
else:
raise IloResponseError
if priv_patches:
self.update_mapping_privs(priv_patches)
if keytab:
path = oem['Actions'][next(iter(oem['Actions']))]['target']
self.rdmc.ui.printer("Adding keytab...\n")
self.rdmc.app.post_handler(path, {"ImportUri": keytab})
elif options.command.lower() == 'test':
self.test_directory(options, json=getattr(options, "json", False))
self.cmdbase.logout_routine(self, options)
#Return code
return ReturnCodes.SUCCESS
def update_mapping_privs(self, roles_to_update):
""" Helper function to update created role mappings to match user privileges.
:param roles_to_update: Dictionary of privileges to update.
:type roles_to_update: dict
"""
self.rdmc.ui.printer("Updating privileges of created role maps...\n")
try:
results = self.rdmc.app.select(selector='AccountService.', path_refresh=True)[0].dict
roles = self.rdmc.app.getcollectionmembers(\
self.rdmc.app.getidbytype('RoleCollection.')[0])
except (KeyError, IndexError):
raise NoContentsFoundForOperationError("Unable to gather Role settings. Roles may not "\
"be updated to match privileges requested.")
for rolemap in results['LDAP']['RemoteRoleMapping']:
for role in roles:
if role['RoleId'] == rolemap['LocalRole']:
role['RemoteGroup'] = rolemap['RemoteGroup']
break
for role in roles:
privs = {'AssignedPrivileges' : [], 'OemPrivileges': []}
for update_role in roles_to_update.keys():
if role.get('RemoteGroup', None) == update_role:
for priv in roles_to_update[update_role]:
privs[PRIVKEY[priv][1]].append(PRIVKEY[priv][0])
try:
self.rdmc.app.patch_handler(role['@odata.id'], privs)
self.rdmc.ui.printer("Updated privileges for %s\n" % update_role)
except IloResponseError as excp:
self.rdmc.ui.error("Unable to update privileges for %s\n" % update_role, \
excp)
break
def directory_helper(self, settings, options):
""" Helper function to set the payload based on options and arguments
:param settings: dictionary to change
:type settings: dict.
:param options: list of options
:type options: list.
"""
payload = {}
serviceaddress = None
if hasattr(options, 'serviceaddress'):
if isinstance(options.serviceaddress, str):
serviceaddress = options.serviceaddress
if serviceaddress == '""' or serviceaddress == "''":
serviceaddress = ''
if hasattr(options, 'port'):
if isinstance(options.port, str):
if serviceaddress is None:
serviceaddress = settings['ServiceAddresses'][0]
serviceaddress = serviceaddress + ':' + options.port
if hasattr(options, 'realm'):
if isinstance(options.realm, str):
if serviceaddress is None:
serviceaddress = settings['ServiceAddresses'][0]
if options.realm == '""' or options.realm == "''":
options.realm = ''
serviceaddress = serviceaddress + '@' + options.realm
if not serviceaddress is None:
payload['ServiceAddresses'] = [serviceaddress]
if hasattr(options, 'enable'):
if not options.enable is None:
payload['ServiceEnabled'] = options.enable
if hasattr(options, 'ldap_username') and hasattr(options, 'ldap_password'):
if options.ldap_username and options.ldap_password:
payload.update({"Authentication":{"Username": options.ldap_username,\
"Password": options.ldap_password}})
if hasattr(options, 'roles'):
if options.roles:
payload['RemoteRoleMapping'] = self.role_helper(options.roles, \
settings['RemoteRoleMapping'])
if hasattr(options, 'search'):
if options.search:
payload.update({"LDAPService": {"SearchSettings": \
self.search_helper(options.search, settings['LDAPService']['SearchSettings'])}})
return payload
def test_directory(self, options, json=False):
""" Function to perform directory testing
:param options: namespace of custom parser attributes which contain the original command
arguments for 'start/stop/viewresults'
:type options: namespace
:param json: Bool to print in json format or not.
:type json: bool.
"""
results = self.rdmc.app.select(selector='HpeDirectoryTest.', path_refresh=True)[0].dict
if options.start_stop_view.lower() == 'start':
path = None
for item in results['Actions']:
if 'StartTest' in item:
path = results['Actions'][item]['target']
break
if not path:
raise NoContentsFoundForOperationError("Unable to start directory test.")
self.rdmc.ui.printer("Starting the directory test. Monitor results with "\
"command: \"directory viewresults\".\n")
self.rdmc.app.post_handler(path, {})
elif options.start_stop_view.lower() == 'stop':
path = None
for item in results['Actions']:
if 'StopTest' in item:
path = results['Actions'][item]['target']
break
if not path:
raise NoContentsFoundForOperationError("Unable to stop directory test.")
self.rdmc.ui.printer("Stopping the directory test.\n")
self.rdmc.app.post_handler(path, {})
elif options.start_stop_view.lower() == 'viewresults':
if getattr(options, "json", False):
self.rdmc.ui.print_out_json(results['TestResults'])
else:
for test in results['TestResults']:
self.rdmc.ui.printer('Test: %s\n' % test['TestName'])
self.rdmc.ui.printer("------------------------\n")
self.rdmc.ui.printer('Status: %s\n' % test['Status'])
self.rdmc.ui.printer('Notes: %s\n\n' % test['Notes'])
def print_settings(self, settings, oem_settings, local_auth_setting, name):
""" Pretty print settings of LDAP or Kerberos
:param settings: settings to print
:type settings: dict.
:param oem_settings: oem_settings to print
:type oem_settings: dict.
:param local_auth_settings: local authorization setting
:type local_auth_settings: str.
:param name: type of setting (activedirectory or ldap)
:type name: str.
"""
self.rdmc.ui.printer("%s settings:\n" % ('Kerberos' if name == 'ActiveDirectory' else name))
self.rdmc.ui.printer("--------------------------------\n")
self.rdmc.ui.printer("Enabled: %s\n" % str(settings['ServiceEnabled']))
serviceaddress = settings['ServiceAddresses'][0]
self.rdmc.ui.printer("Service Address: %s\n" % (serviceaddress if serviceaddress else \
"Not Set"))
self.rdmc.ui.printer("Local Account Authorization: %s\n" % local_auth_setting)
if name.lower() == 'activedirectory':
address_settings = oem_settings['KerberosSettings']
self.rdmc.ui.printer("Port: %s\n" % address_settings['KDCServerPort'])
self.rdmc.ui.printer("Realm: %s\n" % (address_settings['KerberosRealm'] if \
address_settings['KerberosRealm'] else "Not Set"))
else:
address_settings = oem_settings['DirectorySettings']
self.rdmc.ui.printer("Port: %s\n" % address_settings['LdapServerPort'])
self.rdmc.ui.printer("Authentication Mode: %s\n" % \
address_settings['LdapAuthenticationMode'])
self.rdmc.ui.printer("Search Settings:\n")
try:
count = 1
for search in settings['LDAPService']['SearchSettings']["BaseDistinguishedNames"]:
self.rdmc.ui.printer("\tSearch %s: %s\n" % (count, search))
count += 1
except KeyError:
self.rdmc.ui.printer("\tNo Search Settings\n")
self.rdmc.ui.printer("Remote Role Mapping(s):\n")
for role in settings['RemoteRoleMapping']:
self.rdmc.ui.printer("\tLocal Role: %s\n" % role['LocalRole'])
self.rdmc.ui.printer("\tRemote Group: %s\n" % role['RemoteGroup'])
def role_helper(self, new_roles, curr_roles):
""" Helper to prepare adding and removing roles for patching
:param new_roles: dictionary of new roles to add or remove
:type new_roles: dict.
:param curr_roles: list of current roles on the system
:type curr_roles: list.
"""
final_roles = curr_roles
if 'add' in new_roles:
for role in new_roles['add']:
role = role.split(':', 1)
if not self.duplicate_group(role[1], curr_roles):
final_roles.append({"LocalRole":role[0], "RemoteGroup":role[1]})
else:
raise ResourceExists('Group DN "%s" already exists.' % role[1].split(':')[0])
if 'remove' in new_roles:
removed = False
for role in new_roles['remove']:
removed = False
for item in reversed(final_roles):
if item['LocalRole'] == role:
del final_roles[final_roles.index(item)]
removed = True
break
if not removed:
raise InvalidCommandLineError("Unable to find local role %s to delete" % role)
return final_roles
def duplicate_group(self, group_dn, curr_roles):
""" Checks if new role is a duplicate
:param group_dn: group domain name from user
:type group_dn: str.
:param curr_roles: list of current roles
:type curr_roles: list.
"""
group_dn = group_dn.split(':')[0]
for item in curr_roles:
comp_dn = item["RemoteGroup"].split(':')[0]
if comp_dn == group_dn:
return True
return False
def search_helper(self, new_searches, curr_searches):
""" Helper to prepare search strings for patching
:param new_serches: dictionary of new searches to add
:type new_searches: dict.
:param curr_searches: list of current searches
:type curr_searches: dict.
"""
final_searches = curr_searches
if 'add' in new_searches:
if 'BaseDistinguishedNames' in final_searches:
for search in new_searches['add']:
final_searches['BaseDistinguishedNames'].append(search)
else:
final_searches['BaseDistinguishedNames'] = new_searches['add']
elif 'remove' in new_searches:
to_remove = []
if 'BaseDistinguishedNames' not in curr_searches:
raise NoContentsFoundForOperationError("No search strings to remove")
for search in new_searches['remove']:
if search in curr_searches['BaseDistinguishedNames']:
to_remove.append(search)
else:
raise InvalidCommandLineError("Unable to find search %s to delete" % search)
for item in to_remove:
final_searches['BaseDistinguishedNames'].remove(item)
if not final_searches['BaseDistinguishedNames']:
sys.stdout.write('Attempting to delete all searches.\n')
final_searches['BaseDistinguishedNames'].append("")
return final_searches
def directoryvalidation(self, options):
""" directory validation function
:param options: command line options
:type options: list.
"""
self.cmdbase.login_select_validation(self, options)
def options_argument_group(self, parser):
""" Additional argument
:param parser: The parser to add the removeprivs option group to
:type parser: ArgumentParser/OptionParser
"""
parser.add_argument(
'-j',
'--json',
dest='json',
action="store_true",
help="Optionally include this flag if you wish to change the"\
" displayed output to JSON format. Preserving the JSON data"\
" structure makes the information easier to parse.",
default=False
)
def definearguments(self, customparser):
""" Wrapper function for new command main function
:param customparser: command line input
:type customparser: parser.
"""
if not customparser:
return
#self.cmdbase.add_login_arguments_group(customparser)
subcommand_parser = customparser.add_subparsers(dest='command')
default_parser = subcommand_parser.add_parser('default')
default_parser.add_argument(
'ldap_kerberos',
help="Specify LDAP or Kerberos configuration settings",
metavar='LDAP_KERBEROS',
nargs='?',
type= str,
default=None,
)
self.cmdbase.add_login_arguments_group(default_parser)
privilege_help='\n\nPRIVILEGES:\n\t1: Login\n\t2: Remote Console\n\t'\
'3: User Config\n\t4: iLO (Manager) Config\n\t5: Virtual Media\n\t'\
'6: Virtual Power and Reset\n\t7: Host NIC Config\n\t8: Host Bios Config\n\t9: '\
'Host Storage Config\n\t10: System Recovery Config\n\t11: Self Password Change\n\t'\
'12: Configure Components\n\n\tLOCAL ROLES:\n\tReadOnly\n\tOperator\n\tAdministrator'\
'\n\n\tNOTE: The Self Password Change privilege is automatically added to roles with '\
'the Login privilege.'
ldap_help='\tShow, add or modify properties pertaining to iLO LDAP Configuration.'
ldap_parser = subcommand_parser.add_parser(
__subparsers__[0],
help=ldap_help,
description=ldap_help+'\n\n\tSimply show LDAP configuration:\n\t\tdirectory ldap\n\n'\
'To modify the LDAP username, password, service address, search strings or '\
'enable/disable LDAP.\n\t\tdirectory ldap <username> <password> '\
'--serviceaddress x.x.y.z --addsearch string1, string2 --enable.\n\n\tTo add role '\
'mapping.\n\t\tdirectory ldap <username> <password> --addrolemap \"LocalRole1:\"'\
'\"RemoteGroup3,LocalRole2:RemoteGroup4:SID.\n\n\tTo remove role mapping.\n\t\t'\
'directory ldap <username> <password> --removerolemap LocalRole1, LocalRole2.'\
+privilege_help,
formatter_class=RawDescriptionHelpFormatter
)
ldap_parser.add_argument(
'ldap_username',
help='The LDAP username used in verifying AD (optional outside of \'--enable\' and'\
'\'--disable\')',
metavar='USERNAME',
nargs='?',
type= str,
default=None,
)
ldap_parser.add_argument(
'ldap_password',
help='The LDAP password used in verifying AD (optional outside of \'--enable\' and' \
'\'--disable\')',
metavar='PASSWORD',
nargs='?',
type= str,
default=None,
)
ldap_parser.add_argument(
'--enable',
'--disable',
dest='enable',
type=str,
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to enable LDAP services.",
default=None,
)
ldap_parser.add_argument(
'--addsearch',
'--removesearch',
dest='search',
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to add or remove search strings for "\
"generic LDAP services.",
type=str,
default={},
)
ldap_parser.add_argument(
'--serviceaddress',
dest='serviceaddress',
help='Optionally include this flag to set the service address of the LDAP Services.',
default=None,
)
ldap_parser.add_argument(
'--port',
dest='port',
help="Optionally include this flag to set the port of the LDAP services.",
default=None,
)
ldap_parser.add_argument(
'--addrolemap',
'--removerolemap',
dest='roles',
nargs='*',
action=_DirectoryParse,
help='Optionally add this flag to add or remove Role Mapping(s) for the LDAP and '\
'Kerberos services. Remove EX: --removerolemap LocalRole1,LocalRole2 '\
'Add EX: --addrolemap "LocalRole1:RemoteGroup3,LocalRole2:RemoteGroup4\n\n"'
'SID EX: --addrolemap "LocalRole1:RemoteGroup2:SID,LocalRole2:RemoteGroup5:SID'\
'\n\nNOTE 1: Create a custom local role group (and subsequently assign to a role map)'\
'by adding the numbers associated with privilege(s) desired separated by a semicolon'\
'(;)\n\nNOTE 2: SID is optional',
type=str,
default={},
)
ldap_parser.add_argument(
'--enablelocalauth',
'--disablelocalauth',
dest='localauth',
nargs='*',
type=str,
action=_DirectoryParse,
help="Optionally include this flag if you wish to enable or disable authentication "\
"for local accounts.",
default=None
)
ldap_parser.add_argument(
'--authentication',
dest='authmode',
choices=['DefaultSchema', 'ExtendedSchema'],
help="Optionally include this flag if you would like to choose a LDAP authentication "
"mode Valid choices are: DefaultSchema (Directory Default Schema or Schema-free) or "\
"ExtendedSchema (HPE Extended Schema).",
default=None
)
self.cmdbase.add_login_arguments_group(ldap_parser)
self.options_argument_group(ldap_parser)
kerberos_help='Show, add or modify properties pertaining to AD Kerberos Configuration.'
kerberos_parser = subcommand_parser.add_parser(
__subparsers__[1],
help=kerberos_help,
description=ldap_help+'\n\nExamples:\n\nShow Kerberos specific AD/LDAP configuration '\
'settings.\n\tdirectory kerberos\n\nShow current AD Kerberos configuration.'\
'\n\tdirectory kerberos\n\nAlter kerberos service address, AD relm and Port.\n\t'\
'directory kerberos --serviceaddress x.x.y.z --port 8888 --realm adrealm1',
formatter_class=RawDescriptionHelpFormatter
)
kerberos_parser.add_argument(
'--serviceaddress',
dest='serviceaddress',
help="Optionally include this flag to set the Kerberos serviceaddress.",
default=None,
)
kerberos_parser.add_argument(
'--port',
dest='port',
help="Optionally include this flag to set the Kerberos port.",
default=None,
)
kerberos_parser.add_argument(
'--realm',
dest='realm',
help="Optionally include this flag to set the Kerberos realm.",
default=None
)
kerberos_parser.add_argument(
'--keytab',
dest='keytab',
help="Optionally include this flag to import a Kerberos Keytab by it's URI location.",
default=""
)
kerberos_parser.add_argument(
'--enable',
'--disable',
dest='enable',
type=str,
nargs='*',
action=_DirectoryParse,
help="Optionally add this flag to enable or disable Kerberos services.",
default=None,
)
self.cmdbase.add_login_arguments_group(kerberos_parser)
self.options_argument_group(kerberos_parser)
directory_test_help='Start, stop or view results of an AD/LDAP test which include: ICMP, '\
'Domain Resolution, Connectivity, Authentication, Bindings, LOM Object and User '\
'Context tests.'
directory_test_parser = subcommand_parser.add_parser(
__subparsers__[2],
help=directory_test_help,
description=ldap_help+'\n\nExamples:\n\nStart a directory test:\n\tdirectory test '\
'start\n\nStop a directory test:\n\tdirectory test stop\n\nView results of the last '\
'directory test:\n\tdirectory test viewresults',
formatter_class=RawDescriptionHelpFormatter
)
directory_test_parser.add_argument(
'start_stop_view',
help="Start, stop, or view results on an AD/LDAP test.",
metavar='START, STOP, VIEWSTATUS',
default='viewresults'
)
self.cmdbase.add_login_arguments_group(directory_test_parser)
|
python
|
from __future__ import annotations
from discord.ext import commands
__all__ = ("HierarchyFail",)
class HierarchyFail(commands.CheckFailure):
...
|
python
|
import numpy as np
import pandas as pd
import os
import scipy.io as spio
import math
from random import sample
class ProcessData:
def __init__(self, data_set_directory, columns_to_use):
self.directory = data_set_directory
self.selected_columns = columns_to_use
self.datasets = []
self.all_common_sensors = []
#self.read_data()
def find_common_sensors(self):
files = os.listdir(self.directory)
for file in files:
if file.endswith(".csv"):
df = pd.read_csv(self.directory+'/'+file)
sensors = list(df.columns[1:])
if len(self.all_common_sensors) == 0:
self.all_common_sensors = sensors
else:
self.all_common_sensors = list(set(self.all_common_sensors) & set(sensors))
sensors = []
for sensor in self.all_common_sensors:
sensors.append([])
for file in files:
if file.endswith(".csv"):
sensors[-1].append(sensor)
self.all_common_sensors = sensors
return self.all_common_sensors
def read_data(self, considered_files):
files = os.listdir(self.directory)
count = 0
for file in files:
if file.endswith(".csv") and count in considered_files:
df = pd.read_csv(self.directory+'/'+file)
unselected_columns = list(df.columns)
for x in [unselected_columns[0]] + self.selected_columns:
#print(file, x)
unselected_columns.remove(x)
df = df.drop(columns=unselected_columns)
df['fire_label'] = np.ones(df.iloc[:, 0].shape)
df.iloc[np.where(df.iloc[:, 0] < 10), -1] = 0
data = self.process_data(df)
self.datasets.append(data)
if file.endswith(".csv"):
count += 1
def process_data(self, df):
num_sensors = len(df.columns)-2
x = []
for i in range(1, num_sensors + 1):
x.append(np.array([np.array(df.iloc[1:, i:i + 1]), np.array(df.iloc[:-1, i:i + 1])])[:, :, 0].T[:, :, np.newaxis])
y = np.array(df.iloc[1:, -1:])
time = np.array(df.iloc[1:, 0:1])
return [x, y, time]
def shape_to_input_output(self, sensors):
x = []
for j in sensors:
count = 0
for i in range(len(self.datasets)):
if count == 0:
x.append(self.datasets[i][0][j])
y = self.datasets[i][1]
time = self.datasets[i][2]
else:
x[-1] = np.concatenate((x[-1], self.datasets[i][0][j]), axis=0)
y = np.concatenate((y, self.datasets[i][1]), axis=0)
time = np.concatenate((time, self.datasets[i][2]), axis=0)
count += 1
x[-1] = x[-1]/np.max(x[-1])
rand_samples = sample(range(y.shape[0]), y.shape[0])
#spio.savemat('rand_indices.mat', {'indices': rand_samples})
'''
# To load the previously saved random indices
rand_samples = spio.loadmat('rand_indices.mat')
rand_samples = list(rand_samples['indices'][0])
'''
y = y[rand_samples, :]
time = time[rand_samples, :]
for j in sensors:
x[j] = x[j][rand_samples, :, :]
return x, y, time
|
python
|
# coding=utf-8
""" Demo app, to show OpenCV video and PySide2 widgets together."""
import sys
from PySide2.QtWidgets import QApplication
from sksurgerycore.configuration.configuration_manager import \
ConfigurationManager
from sksurgerybard.widgets.bard_overlay_app import BARDOverlayApp
def run_demo(config_file, calib_dir):
""" Prints command line args, and launches main screen."""
app = QApplication([])
configuration = None
if config_file is not None:
configurer = ConfigurationManager(config_file)
configuration = configurer.get_copy()
viewer = BARDOverlayApp(configuration, calib_dir)
viewer.start()
sys.exit(app.exec_())
|
python
|
# coding: utf-8
import sys
k_bit_rate_num_bits = [ 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 32 ]
k_highest_bit_rate = len(k_bit_rate_num_bits) - 1
k_lowest_bit_rate = 1
k_num_bit_rates = len(k_bit_rate_num_bits)
k_invalid_bit_rate = 255
# This code assumes that rotations, translations, and scales are packed on 3 components (e.g. quat drop w)
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
permutation_tries = []
permutation_tries_no_scale = []
for rotation_bit_rate in range(k_num_bit_rates):
for translation_bit_rate in range(k_num_bit_rates):
transform_size = k_bit_rate_num_bits[rotation_bit_rate] * 3 + k_bit_rate_num_bits[translation_bit_rate] * 3
permutation_tries_no_scale.append((transform_size, rotation_bit_rate, translation_bit_rate))
for scale_bit_rate in range(k_num_bit_rates):
transform_size = k_bit_rate_num_bits[rotation_bit_rate] * 3 + k_bit_rate_num_bits[translation_bit_rate] * 3 + k_bit_rate_num_bits[scale_bit_rate] * 3
permutation_tries.append((transform_size, rotation_bit_rate, translation_bit_rate, scale_bit_rate))
# Sort by transform size, then by each bit rate
permutation_tries.sort()
permutation_tries_no_scale.sort()
print('constexpr uint8_t k_local_bit_rate_permutations_no_scale[{}][2] ='.format(len(permutation_tries_no_scale)))
print('{')
for transform_size, rotation_bit_rate, translation_bit_rate in permutation_tries_no_scale:
print('\t{{ {}, {} }},\t\t// {} bits per transform'.format(rotation_bit_rate, translation_bit_rate, transform_size))
print('};')
print()
print('constexpr uint8_t k_local_bit_rate_permutations[{}][3] ='.format(len(permutation_tries)))
print('{')
for transform_size, rotation_bit_rate, translation_bit_rate, scale_bit_rate in permutation_tries:
print('\t{{ {}, {}, {} }},\t\t// {} bits per transform'.format(rotation_bit_rate, translation_bit_rate, scale_bit_rate, transform_size))
print('};')
|
python
|
#!/usr/bin/python
"""
Date utilities to do fast datetime parsing.
Copyright (C) 2013 Byron Platt
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO: At the moment this has been targeted toward the datetime formats used by
# NNTP as it was developed for use in a NNTP reader. There is, however, no
# reason why this module could not be extended to include other formats.
import calendar
import datetime
import dateutil.parser
import dateutil.tz
class _tzgmt(dateutil.tz.tzutc):
"""GMT timezone.
"""
def tzname(self, dt):
return "GMT"
TZ_LOCAL = dateutil.tz.tzlocal()
"""Local timezone (at the time the module was loaded)"""
TZ_UTC = dateutil.tz.tzutc()
"""UTC timezone."""
TZ_GMT = _tzgmt()
"""GMT timezone."""
_months = dict(
jan=1, feb=2, mar=3, apr=4, may=5, jun=6,
jul=7, aug=8, sep=9, oct=10,nov=11,dec=12
)
"""Conversion dictionary for english abbreviated month to integer."""
def _offset(value):
"""Parse timezone to offset in seconds.
Args:
value: A timezone in the '+0000' format. An integer would also work.
Returns:
The timezone offset from GMT in seconds as an integer.
"""
o = int(value)
if o == 0:
return 0
a = abs(o)
s = a*36+(a%100)*24
return (o//a)*s
def timestamp_d_b_Y_H_M_S(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
)))
def datetimeobj_d_b_Y_H_M_S(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
"""
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT
)
def timestamp_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) - _offset(z)
def datetimeobj_a__d_b_Y_H_M_S_z(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
"""
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
)
def timestamp_YmdHMS(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
))
def datetimeobj_YmdHMS(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return datetime.datetime(
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT
)
def timestamp_epoch(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
The time in seconds since epoch as an integer.
"""
return int(value)
def datetimeobj_epoch(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
"""
return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)
def timestamp_fmt(value, fmt):
"""Convert timestamp string to time in seconds since epoch.
Wraps the datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(
datetime.datetime.strptime(value, fmt).utctimetuple()
))
def datetimeobj_fmt(value, fmt):
"""Convert timestamp string to a datetime object.
Wrapper for datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
A datetime object.
"""
return datetime.datetime.strptime(value, fmt)
def timestamp_any(value):
"""Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
"""
return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple()))
def datetimeobj_any(value):
"""Convert timestamp string to a datetime object.
Most timestamps strings are supported in fact this is a wrapper for the
dateutil.parser.parse() method. This is SLOW use the other datetimeobj_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
A datetime object.
"""
return dateutil.parser.parse(value)
_timestamp_formats = {
"%d %b %Y %H:%M:%S" : timestamp_d_b_Y_H_M_S,
"%a, %d %b %Y %H:%M:%S %z": timestamp_a__d_b_Y_H_M_S_z,
"%Y%m%d%H%M%S" : timestamp_YmdHMS,
"epoch" : timestamp_epoch,
}
def timestamp(value, fmt=None):
"""Parse a datetime to a unix timestamp.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other timestamp_*() functions for more details.
Args:
value: A string representing a datetime.
fmt: A timestamp format string like for time.strptime().
Returns:
The time in seconds since epoch as and integer for the value specified.
"""
if fmt:
return _timestamp_formats.get(fmt,
lambda v: timestamp_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return timestamp_d_b_Y_H_M_S(value)
except (KeyError, ValueError, OverflowError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return timestamp_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError, OverflowError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return timestamp_YmdHMS(value)
except (ValueError, OverflowError):
pass
# epoch timestamp
try:
return timestamp_epoch(value)
except ValueError:
pass
# slow version
return timestamp_any(value)
_datetimeobj_formats = {
"%d %b %Y %H:%M:%S" : datetimeobj_d_b_Y_H_M_S,
"%a, %d %b %Y %H:%M:%S %z": datetimeobj_a__d_b_Y_H_M_S_z,
"%Y%m%d%H%M%S" : datetimeobj_YmdHMS,
"epoch" : datetimeobj_epoch,
}
def datetimeobj(value, fmt=None):
"""Parse a datetime to a datetime object.
Uses fast custom parsing for common datetime formats or the slow dateutil
parser for other formats. This is a trade off between ease of use and speed
and is very useful for fast parsing of timestamp strings whose format may
standard but varied or unknown prior to parsing.
Common formats include:
1 Feb 2010 12:00:00 GMT
Mon, 1 Feb 2010 22:00:00 +1000
20100201120000
1383470155 (seconds since epoch)
See the other datetimeobj_*() functions for more details.
Args:
value: A string representing a datetime.
Returns:
A datetime object.
"""
if fmt:
return _datetimeobj_formats.get(fmt,
lambda v: datetimeobj_fmt(v, fmt)
)(value)
l = len(value)
if 19 <= l <= 24 and value[3] == " ":
# '%d %b %Y %H:%M:%Sxxxx'
try:
return datetimeobj_d_b_Y_H_M_S(value)
except (KeyError, ValueError):
pass
if 30 <= l <= 31:
# '%a, %d %b %Y %H:%M:%S %z'
try:
return datetimeobj_a__d_b_Y_H_M_S_z(value)
except (KeyError, ValueError):
pass
if l == 14:
# '%Y%m%d%H%M%S'
try:
return datetimeobj_YmdHMS(value)
except ValueError:
pass
# epoch timestamp
try:
return datetimeobj_epoch(value)
except ValueError:
pass
# slow version
return datetimeobj_any(value)
# testing
if __name__ == "__main__":
import sys
import timeit
log = sys.stdout.write
times = (
datetime.datetime.now(TZ_UTC),
datetime.datetime.now(TZ_GMT),
datetime.datetime.now(TZ_LOCAL),
datetime.datetime.now(),
)
# check timezones
for t in times:
log("%s\n" % t.strftime("%Y-%m-%d %H:%M:%S %Z"))
# TODO validate values (properly)
# check speed
values = (
{
"name": "Implemented Format",
"time": "20130624201912",
"fmt" : "%Y%m%d%H%M%S"
},
{
"name": "Unimplemented Format",
"time": "2013-06-24 20:19:12",
"fmt" : "%Y-%m-%d %H:%M:%S"
}
)
tests = (
{
"name" : "GMT timestamp (strptime version)",
"test" : "int(calendar.timegm(datetime.datetime.strptime('%(time)s', '%(fmt)s').utctimetuple()))",
"setup": "import calendar, datetime",
},
{
"name" : "GMT timestamp (dateutil version)",
"test" : "int(calendar.timegm(dateutil.parser.parse('%(time)s').utctimetuple()))",
"setup": "import calendar, dateutil.parser",
},
{
"name" : "GMT timestamp (fast version)",
"test" : "timestamp('%(time)s')",
"setup": "from __main__ import timestamp",
},
{
"name" : "GMT timestamp (fast version with format hint)",
"test" : "timestamp('%(time)s', '%(fmt)s')",
"setup": "from __main__ import timestamp",
},
{
"name" : "GMT datetime object (strptime version)",
"test" : "datetime.datetime.strptime('%(time)s', '%(fmt)s').replace(tzinfo=TZ_GMT)",
"setup": "import datetime; from __main__ import TZ_GMT",
},
{
"name" : "GMT datetime object (dateutil version)",
"test" : "dateutil.parser.parse('%(time)s').replace(tzinfo=TZ_GMT)",
"setup": "import dateutil.parser; from __main__ import TZ_GMT",
},
{
"name" : "GMT datetime object (fast version)",
"test" : "datetimeobj('%(time)s')",
"setup": "from __main__ import datetimeobj",
},
{
"name" : "GMT datetime object (fast version with format hint)",
"test" : "datetimeobj('%(time)s', '%(fmt)s')",
"setup": "from __main__ import datetimeobj",
}
)
iters = 100000
for v in values:
log("%(name)s (%(fmt)s)\n" % v)
for t in tests:
log(" %(name)-52s" % t)
elapsed = timeit.timeit(t["test"] % v, t["setup"], number=iters)
log("%0.3f sec (%d loops @ %0.3f usec)\n" % (
elapsed, iters, (elapsed/iters)*1000000
))
|
python
|
'''OpenGL extension SGIS.texture_filter4
Overview (from the spec)
This extension allows 1D and 2D textures to be filtered using an
application-defined, four sample per dimension filter. (In addition to
the NEAREST and LINEAR filters defined in the original GL Specification.)
Such filtering results in higher image quality. It is defined only
for non-mipmapped filters. The filter that is specified must be
symmetric and separable (in the 2D case).
The official definition of this extension is available here:
http://oss.sgi.com/projects/ogl-sample/registry/SGIS/texture_filter4.txt
Automatically generated by the get_gl_extensions script, do not edit!
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions
from OpenGL.GL import glget
import ctypes
EXTENSION_NAME = 'GL_SGIS_texture_filter4'
GL_FILTER4_SGIS = constant.Constant( 'GL_FILTER4_SGIS', 0x8146 )
GL_TEXTURE_FILTER4_SIZE_SGIS = constant.Constant( 'GL_TEXTURE_FILTER4_SIZE_SGIS', 0x8147 )
glGetTexFilterFuncSGIS = platform.createExtensionFunction(
'glGetTexFilterFuncSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, arrays.GLfloatArray,),
doc = 'glGetTexFilterFuncSGIS( GLenum(target), GLenum(filter), GLfloatArray(weights) ) -> None',
argNames = ('target', 'filter', 'weights',),
)
glTexFilterFuncSGIS = platform.createExtensionFunction(
'glTexFilterFuncSGIS', dll=platform.GL,
extension=EXTENSION_NAME,
resultType=None,
argTypes=(constants.GLenum, constants.GLenum, constants.GLsizei, arrays.GLfloatArray,),
doc = 'glTexFilterFuncSGIS( GLenum(target), GLenum(filter), GLsizei(n), GLfloatArray(weights) ) -> None',
argNames = ('target', 'filter', 'n', 'weights',),
)
def glInitTextureFilter4SGIS():
'''Return boolean indicating whether this extension is available'''
return extensions.hasGLExtension( EXTENSION_NAME )
|
python
|
from support import print_func
import using_name
print_func("Jack")
|
python
|
"""Various utilities used throughout the code.
Here go various utilities that don't belong directly in any class,
photometry utils module nor or SED model module.
"""
import os
import pickle
import random
import time
from contextlib import closing
import numpy as np
from scipy.special import erf
from scipy.stats import gaussian_kde, norm
from termcolor import colored
def estimate_pdf(distribution):
"""Estimates the PDF of a distribution using a gaussian KDE.
Parameters
----------
distribution: array_like
The distribution.
Returns
-------
xx: array_like
The x values of the PDF.
pdf: array_like
The estimated PDF.
"""
kde = gaussian_kde(distribution)
xmin, xmax = distribution.min(), distribution.max()
xx = np.linspace(xmin, xmax, 300)
pdf = kde(xx)
return xx, pdf
def estimate_cdf(distribution, hdr=False):
"""Estimate the CDF of a distribution."""
h, hx = np.histogram(distribution, density=True, bins=499)
cdf = np.zeros(500) # ensure the first value of the CDF is 0
if hdr:
idx = np.argsort(h)[::-1]
cdf[1:] = np.cumsum(h[idx]) * np.diff(hx)
else:
cdf[1:] = np.cumsum(h) * np.diff(hx)
return cdf
def norm_fit(x, mu, sigma, A):
"""Gaussian function."""
return A * norm.pdf(x, loc=mu, scale=sigma)
def credibility_interval(post, alpha=1.):
"""Calculate bayesian credibility interval.
Parameters:
-----------
post : array_like
The posterior sample over which to calculate the bayesian credibility
interval.
alpha : float, optional
Confidence level.
Returns:
--------
med : float
Median of the posterior.
low : float
Lower part of the credibility interval.
up : float
Upper part of the credibility interval.
"""
z = erf(alpha / np.sqrt(2))
lower_percentile = 100 * (1 - z) / 2
upper_percentile = 100 * (1 + z) / 2
low, med, up = np.percentile(
post, [lower_percentile, 50, upper_percentile]
)
return med, low, up
def credibility_interval_hdr(xx, pdf, cdf, sigma=1.):
"""Calculate the highest density region for an empirical distribution.
Reference: Hyndman, Rob J. 1996
Parameters
----------
xx: array_like
The x values of the PDF (and the y values of the CDF).
pdf: array_like
The PDF of the distribution.
cdf: array_like
The CDF of the distribution.
sigma: float
The confidence level in sigma notation. (e.g. 1 sigma = 68%)
Returns
-------
best: float
The value corresponding to the peak of the posterior distribution.
low: float
The minimum value of the HDR.
high: float
The maximum value of the HDR.
Note: The HDR is capable of calculating more robust credible regions
for multimodal distributions. It is identical to the usual probability
regions of symmetric about the mean distributions. Using this then should
lead to more realistic errorbars and 3-sigma intervals for multimodal
outputs.
"""
# Get best fit value
best = xx[np.argmax(pdf)]
z = erf(sigma / np.sqrt(2))
# Sort the pdf in reverse order
idx = np.argsort(pdf)[::-1]
# Find where the CDF reaches 100*z%
idx_hdr = np.where(cdf >= z)[0][0]
# Isolate the HDR
hdr = pdf[idx][:idx_hdr]
# Get the minimum density
hdr_min = hdr.min()
# Get CI
low = xx[pdf > hdr_min].min()
high = xx[pdf > hdr_min].max()
return best, low, high
def display_star_fin(star, c):
"""Display stellar information."""
temp, temp_e = star.temp, star.temp_e
rad, rad_e = star.rad, star.rad_e
plx, plx_e = star.plx, star.plx_e
lum, lum_e = star.lum, star.lum_e
dist, dist_e = star.dist, star.dist_e
print(colored(f'\t\t\tGaia DR2 ID : {star.g_id}', c))
if star.tic:
print(colored(f'\t\t\tTIC : {star.tic}', c))
if star.kic:
print(colored(f'\t\t\tKIC : {star.kic}', c))
print(colored('\t\t\tGaia Effective temperature : ', c), end='')
print(colored(f'{temp:.3f} +/- {temp_e:.3f}', c))
if rad is not None:
print(colored('\t\t\tGaia Stellar radius : ', c), end='')
print(colored(f'{rad:.3f} +/- {rad_e:.3f}', c))
if lum is not None:
print(colored('\t\t\tGaia Stellar Luminosity : ', c), end='')
print(colored(f'{lum:.3f} +/- {lum_e:.3f}', c))
print(colored('\t\t\tGaia Parallax : ', c), end='')
print(colored(f'{plx:.3f} +/- {plx_e:.3f}', c))
print(colored('\t\t\tBailer-Jones distance : ', c), end='')
print(colored(f'{dist:.3f} +/- {dist_e:.3f}', c))
print(colored('\t\t\tMaximum Av : ', c), end='')
print(colored(f'{star.Av:.3f}', c))
print('')
pass
def display_star_init(star, c):
"""Display stellar information."""
print(colored('\n\t\t#####################################', c))
print(colored('\t\t## ARIADNE ##', c))
print(colored('\t\t#####################################', c))
print(colored(' spectrAl eneRgy dIstribution', c), end=' ')
print(colored('bAyesian moDel averagiNg fittEr', c))
print(colored('\n\t\t\tAuthor : Jose Vines', c))
print(colored('\t\t\tContact : jose . vines at ug . uchile . cl', c))
print(colored('\t\t\tStar : ', c), end='')
print(colored(star.starname, c))
pass
def display_routine(engine, live_points, dlogz, ndim, bound=None, sample=None,
nthreads=None, dynamic=None):
"""Display program information.
What is displayed is:
Program name
Program author
Star selected
Algorithm used (i.e. Multinest or Dynesty)
Setup used (i.e. Live points, dlogz tolerance)
"""
colors = [
'red', 'green', 'blue', 'yellow',
'grey', 'magenta', 'cyan', 'white'
]
c = random.choice(colors)
if engine == 'multinest':
engine = 'MultiNest'
if engine == 'dynesty':
engine = 'Dynesty'
print(colored('\n\t\t*** EXECUTING MAIN FITTING ROUTINE ***', c))
print(colored('\t\t\tSelected engine : ', c), end='')
print(colored(engine, c))
print(colored('\t\t\tLive points : ', c), end='')
print(colored(str(live_points), c))
print(colored('\t\t\tlog Evidence tolerance : ', c), end='')
print(colored(str(dlogz), c))
print(colored('\t\t\tFree parameters : ', c), end='')
print(colored(str(ndim), c))
if engine == 'Dynesty' or engine == 'Bayesian Model Averaging':
print(colored('\t\t\tBounding : ', c), end='')
print(colored(bound, c))
print(colored('\t\t\tSampling : ', c), end='')
print(colored(sample, c))
print(colored('\t\t\tN threads : ', c), end='')
print(colored(nthreads, c))
if dynamic:
print(colored('\t\t\tRunning the Dynamic Nested Sampler', c))
print('')
pass
def end(coordinator, elapsed_time, out_folder, engine, use_norm):
"""Display end of run information.
What is displayed is:
best fit parameters
elapsed time
Spectral type
"""
colors = [
'red', 'green', 'blue', 'yellow',
'grey', 'magenta', 'cyan', 'white'
]
c = random.choice(colors)
if use_norm:
order = np.array(['teff', 'logg', 'z', 'norm', 'rad', 'Av'])
else:
order = np.array(
['teff', 'logg', 'z', 'dist', 'rad', 'Av']
)
if engine == 'Bayesian Model Averaging':
res_dir = f'{out_folder}/BMA.pkl'
else:
res_dir = f'{out_folder}/{engine}_out.pkl'
with closing(open(res_dir, 'rb')) as jar:
out = pickle.load(jar)
star = out['star']
mask = star.filter_mask
n = int(star.used_filters.sum())
for filt in star.filter_names[mask]:
p_ = get_noise_name(filt) + '_noise'
order = np.append(order, p_)
theta = np.zeros(order.shape[0] - 1 + n)
for i, param in enumerate(order):
if param != 'loglike':
theta[i] = out['best_fit_averaged'][param]
if param == 'inflation':
for m, fi in enumerate(star.filter_names[mask]):
_p = get_noise_name(fi) + '_noise'
theta[i + m] = out['best_fit_averaged'][_p]
if engine != 'Bayesian Model Averaging':
z, z_err = out['global_lnZ'], out['global_lnZerr']
print('')
print(colored('\t\t\tFitting finished.', c))
print(colored('\t\t\tBest fit parameters are:', c))
fmt_str = ''
for i, p in enumerate(order):
p2 = p
if 'noise' in p:
continue
fmt_str += '\t\t\t'
fmt = 'f'
if p == 'norm':
p2 = '(R/D)^2'
fmt = 'e'
if p == 'z':
p2 = '[Fe/H]'
fmt_str += f'{p2} : {theta[i]:.4{fmt}} '
if not coordinator[i]:
unlo, unhi = out['uncertainties_averaged'][p]
lo, up = out['confidence_interval_averaged'][p]
fmt_str += f'+ {unhi:.4{fmt}} - {unlo:.4{fmt}} '
fmt_str += f'[{lo:.4{fmt}}, {up:.4{fmt}}]\n'
else:
fmt_str += 'fixed\n'
if not use_norm:
ad = out['best_fit_averaged']['AD']
unlo, unhi = out['uncertainties_averaged']['AD']
lo, up = out['confidence_interval_averaged']['AD']
fmt_str += f'\t\t\tAngular Diameter : {ad:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
mass = out['best_fit_averaged']['grav_mass']
unlo, unhi = out['uncertainties_averaged']['grav_mass']
lo, up = out['confidence_interval_averaged']['grav_mass']
fmt_str += f'\t\t\tGrav mass : {mass:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
lum = out['best_fit_averaged']['lum']
unlo, unhi = out['uncertainties_averaged']['lum']
lo, up = out['confidence_interval_averaged']['lum']
fmt_str += f'\t\t\tLuminosity : {lum:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
if engine == 'Bayesian Model Averaging':
miso = out['best_fit_averaged']['iso_mass']
unlo, unhi = out['uncertainties_averaged']['iso_mass']
lo, up = out['confidence_interval_averaged']['iso_mass']
fmt_str += f'\t\t\tIso mass : {miso:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
age = out['best_fit_averaged']['age']
unlo, unhi = out['uncertainties_averaged']['age']
lo, up = out['confidence_interval_averaged']['age']
fmt_str += f'\t\t\tAge (Gyr) : {age:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
eep = out['best_fit_averaged']['eep']
unlo, unhi = out['uncertainties_averaged']['eep']
lo, up = out['confidence_interval_averaged']['eep']
fmt_str += f'\t\t\tEEP : {eep:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
for i, p in enumerate(order):
if 'noise' not in p:
continue
unlo, unhi = out['uncertainties_averaged'][p]
lo, up = out['confidence_interval_averaged'][p]
p_ = 'Excess '
if 'SDSS' not in p and 'PS1' not in p:
p1, p2 = p.split('_')
else:
p1, p2, p3 = p.split('_')
p1 += '_' + p2
p2 = p3
fmt_str += f'\t\t\t{p_ + p1} {p2} : {theta[i]:.4f} '
fmt_str += f'+ {unhi:.4f} - {unlo:.4f} [{lo:.4f}, {up:.4f}]\n'
print(colored(fmt_str, c), end='')
spt = out['spectral_type']
print(colored('\t\t\tMamajek Spectral Type : ', c), end='')
print(colored(spt, c))
if engine != 'Bayesian Model Averaging':
print(colored('\t\t\tlog Bayesian evidence : ', c), end='')
print(colored(f'{z:.3f} +/-', c), end=' ')
print(colored(f'{z_err:.3f}', c))
else:
probs = out['weights']
for k in probs.keys():
text = f'\t\t\t{k} probability : '
print(colored(text, c), end='')
print(colored(f'{probs[k]:.4f}', c))
print(colored('\t\t\tElapsed time : ', c), end='')
print(colored(elapsed_time, c))
pass
def create_dir(path):
"""Create a directory."""
try:
os.mkdir(path)
except OSError:
err_msg = f"Creation of the directory {path:s} failed. "
err_msg += "It might already exist"
print(colored(err_msg, 'red'))
pass
else:
print(colored(f"Created the directory {path:s}", 'blue'))
pass
pass
def execution_time(start):
"""Calculate run execution time."""
end = time.time() - start
weeks, rest0 = end // 604800, end % 604800
days, rest1 = rest0 // 86400, rest0 % 86400
hours, rest2 = rest1 // 3600, rest1 % 3600
minutes, seconds = rest2 // 60, rest2 % 60
elapsed = ''
if weeks == 0:
if days == 0:
if hours == 0:
if minutes == 0:
elapsed = f'{seconds:.2f} seconds'
else:
elapsed = f'{minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{days:.0f} days'
elapsed += f', {hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
else:
elapsed = f'{weeks:.0f} weeks'
elapsed += f', {days:.0f} days'
elapsed += f', {hours:.0f} hours'
elapsed += f', {minutes:.0f} minutes'
elapsed += f' and {seconds:.2f} seconds'
return elapsed
def get_noise_name(filt):
"""Retrieve parameter name for white noise."""
if filt == 'TYCHO_B_MvB':
return 'BT'
if filt == 'TYCHO_V_MvB':
return 'VT'
if filt == 'SPITZER_IRAC_36':
return 'IRAC 36'
if filt == 'SPITZER_IRAC_45':
return 'IRAC 45'
if filt == 'NGTS_I':
return 'NGTS'
if filt == 'WISE_RSR_W1':
return 'W1'
if filt == 'WISE_RSR_W2':
return 'W2'
if 'SDSS' in filt or 'PS1' in filt:
return filt
return filt.split('_')[-1]
def out_filler(samp, logdat, param, name, out, fmt='f', fixed=False,
method='averaged'):
"""Fill up the output file."""
if method not in ['averaged', 'samples']:
raise Exception('Method is wrong!')
if fixed is False:
try:
xx, pdf = estimate_pdf(samp)
cdf = estimate_cdf(samp, hdr=True)
best, lo, up = credibility_interval_hdr(xx, pdf, cdf, sigma=1)
_, lo3, up3 = credibility_interval_hdr(xx, pdf, cdf, sigma=3)
except ValueError:
wrn = f'HDR failed for parameter {param}, reverting to regular CI'
wrn += ' calculation. Be sure to check the histograms afterwards'
wrn += ' for diagnosis.'
print(colored(wrn, 'red'))
best, lo, up = credibility_interval(samp)
_, lo3, up3 = credibility_interval(samp, alpha=3)
out[f'best_fit_{method}'][param] = best
logdat += f'{name}\t{best:.4{fmt}}\t'
out[f'uncertainties_{method}'][param] = (best - lo, up - best)
logdat += f'{up - best:.4{fmt}}\t{best - lo:.4{fmt}}\t'
out[f'confidence_interval_{method}'][param] = (lo3, up3)
logdat += f'{lo:.4{fmt}}\t{up:.4{fmt}}\n'
else:
out[f'best_fit_{method}'][param] = fixed
out[f'uncertainties_{method}'][param] = np.nan
out[f'confidence_interval_{method}'][param] = np.nan
logdat += f'{name}\t{fixed:.4{fmt}}\t'
logdat += '(FIXED)\n'
return logdat
def get_max_from_kde(samp):
"""Get maximum of the given distribution."""
raise DeprecationWarning()
kde = gaussian_kde(samp)
xmin = samp.min()
xmax = samp.max()
xx = np.linspace(xmin, xmax, 1000)
kde = kde(xx)
best = xx[kde.argmax()]
return best
|
python
|
import queue
import cards
import random
#create card decks
class constructBoard:
def __init__(self, lowLevelCards = queue.Queue(), midLevelCards = queue.Queue(), highLevelCards = queue.Queue()):
self.lowlevelcards = lowLevelCards
self.midlevelcards = midLevelCards
self.highlevelcards = highLevelCards
for size in range(100):
card = cards.cards()
if card.getpoints() == 0:
lowLevelCards.put(card)
elif card.getpoints() == 1 or card.getpoints() == 2:
midLevelCards.put(card)
else:
highLevelCards.put(card)
def printboard(self):
cardslots = []
cardslots.append(self.highlevelcards.get())
image1 = cardslots[0].getimage()
cardslots.append(self.highlevelcards.get())
image2 = cardslots[1].getimage()
cardslots.append(self.highlevelcards.get())
image3 = cardslots[2].getimage()
cardslots.append(self.highlevelcards.get())
image4 = cardslots[3].getimage()
print(image1[0] + ' '+ image2[0] + ' ' + image3[0] + ' ' + image4[0] + ' ')
print(image1[1] + ' ' + image2[1] + ' ' + image3[1] + ' ' + image4[1] + ' ')
print(image1[2] + ' ' + image2[2] + ' ' + image3[2] + ' ' + image4[2] + ' ')
print(image1[3] + ' ' + image2[3] + ' ' + image3[3] + ' ' + image4[3] + ' ')
print(image1[4] + ' ' + image2[4] + ' ' + image3[4] + ' ' + image4[4] + ' ')
print(image1[5] + ' ' + image2[5] + ' ' + image3[5] + ' ' + image4[5] + ' ')
print(image1[6] + ' ' + image2[6] + ' ' + image3[6] + ' ' + image4[6] + ' ')
print(image1[7] + ' ' + image2[7] + ' ' + image3[7] + ' ' + image4[7] + ' ')
print(image1[8] + ' ' + image2[8] + ' ' + image3[8] + ' ' + image4[8] + ' ')
mtop1 = self.midlevelcards.get()
mimage1 = mtop1.getimage()
mtop2 = self.midlevelcards.get()
mimage2 = mtop2.getimage()
mtop3 = self.midlevelcards.get()
mimage3 = mtop3.getimage()
mtop4 = self. midlevelcards.get()
mimage4 = mtop4.getimage()
print(mimage1[0] + ' ' + mimage2[0] + ' ' + mimage3[0] + ' ' + mimage4[0] + ' ')
print(mimage1[1] + ' ' + mimage2[1] + ' ' + mimage3[1] + ' ' + mimage4[1] + ' ')
print(mimage1[2] + ' ' + mimage2[2] + ' ' + mimage3[2] + ' ' + mimage4[2] + ' ')
print(mimage1[3] + ' ' + mimage2[3] + ' ' + mimage3[3] + ' ' + mimage4[3] + ' ')
print(mimage1[4] + ' ' + mimage2[4] + ' ' + mimage3[4] + ' ' + mimage4[4] + ' ')
print(mimage1[5] + ' ' + mimage2[5] + ' ' + mimage3[5] + ' ' + mimage4[5] + ' ')
print(mimage1[6] + ' ' + mimage2[6] + ' ' + mimage3[6] + ' ' + mimage4[6] + ' ')
print(mimage1[7] + ' ' + mimage2[7] + ' ' + mimage3[7] + ' ' + mimage4[7] + ' ')
print(mimage1[8] + ' ' + mimage2[8] + ' ' + mimage3[8] + ' ' + mimage4[8] + ' ')
ltop1 = self.lowlevelcards.get()
limage1 = ltop1.getimage()
ltop2 = self.lowlevelcards.get()
limage2 = ltop2.getimage()
ltop3 = self.lowlevelcards.get()
limage3 = ltop3.getimage()
ltop4 = self.lowlevelcards.get()
limage4 = ltop4.getimage()
print(limage1[0] + ' ' + limage2[0] + ' ' + limage3[0] + ' ' + limage4[0] + ' ')
print(limage1[1] + ' ' + limage2[1] + ' ' + limage3[1] + ' ' + limage4[1] + ' ')
print(limage1[2] + ' ' + limage2[2] + ' ' + limage3[2] + ' ' + limage4[2] + ' ')
print(limage1[3] + ' ' + limage2[3] + ' ' + limage3[3] + ' ' + limage4[3] + ' ')
print(limage1[4] + ' ' + limage2[4] + ' ' + limage3[4] + ' ' + limage4[4] + ' ')
print(limage1[5] + ' ' + limage2[5] + ' ' + limage3[5] + ' ' + limage4[5] + ' ')
print(limage1[6] + ' ' + limage2[6] + ' ' + limage3[6] + ' ' + limage4[6] + ' ')
print(limage1[7] + ' ' + limage2[7] + ' ' + limage3[7] + ' ' + limage4[7] + ' ')
print(limage1[8] + ' ' + limage2[8] + ' ' + limage3[8] + ' ' + limage4[8] + ' ')
def main():
board = constructBoard()
board.printboard()
main()
|
python
|
from pytest import raises
class IndexedPropertyMapper(object):
def __init__(self, desc, instance):
self.desc = desc
self.instance = instance
def __getitem__(self, item):
return self.desc.fget(self.instance, item)
def __setitem__(self, item, value):
# hmm. is this order of arguments right?
self.desc.fset(self.instance, value, item)
def __delitem__(self, item):
self.desc.fdel(self.instance, item)
class MultiIndexedPropertyMapper(object):
def __init__(self, desc, instance):
self.desc = desc
self.instance = instance
def __getitem__(self, item):
return self.desc.fget(self.instance, *item)
def __setitem__(self, item, value):
# hmm. is this order of arguments right?
self.desc.fset(self.instance, *item, value)
def __delitem__(self, item):
self.desc.fdel(self.instance, *item)
# could we allow __delete__ to invalidate all nodes?
# what does __set__ do? assign the whole mapping? that sounds ok.
# can we assign slices -> translate to multipl set calls? but we don't know.
class IndexedPropertyDescriptor(property):
def __get__(self, instance, owner):
return IndexedPropertyMapper(self, instance)
class MultiIndexedPropertyDescriptor(property):
def __get__(self, instance, owner):
return MultiIndexedPropertyMapper(self, instance)
def index(fget, *args, **kwargs):
if fget.__code__.co_argcount > 2:
return MultiIndexedPropertyDescriptor(fget, *args, **kwargs)
if fget.__code__.co_argcount == 2:
return IndexedPropertyDescriptor(fget, *args, **kwargs)
raise ValueError('index property must take at least one parameter')
def test_indexed_property():
class Thingy(object):
@index
def AddOne(self, i):
return i + 1
@index
def AddTwo(self, i, j):
return i+j
t = Thingy()
with raises(AttributeError):
t.AddOne = 123
with raises(AttributeError):
del t.AddOne
assert t.AddOne[1] == 2
assert t.AddOne[3] == 4
assert t.AddTwo[2,3] == 5
class FibonacciThingy(object):
@index
def Fib(self, item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return self.Fib[item - 1] + self.Fib[item - 2]
def method_fib(self, item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return barefaced_fib(item - 1) + barefaced_fib(item - 2)
def test_fibonacci():
t = FibonacciThingy()
with raises(KeyError):
t.Fib[-100]
assert t.Fib[0] == 1
assert t.Fib[1] == 1
assert t.Fib[6] == 13
def test_benchmark_fibonacci(benchmark):
t = FibonacciThingy()
benchmark(lambda : t.Fib[20])
assert False
def barefaced_fib(item):
if item < 0:
raise KeyError('must be bigger than 0')
if item == 0 or item == 1:
return 1
return barefaced_fib(item - 1) + barefaced_fib(item - 2)
def test_benchmark_barefaced_fib(benchmark):
benchmark(lambda : barefaced_fib(20))
|
python
|
import asyncio
import shutil
from collections import namedtuple
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime, timedelta, timezone
from shlex import split as lex
from subprocess import DEVNULL, Popen
import bottle as bt
import httpx
import peewee as pw
import toml
from waitress import serve
confdir = shutil.os.path.expanduser("~") + "/.config/twitch-py"
bt.TEMPLATE_PATH.insert(0, f"{confdir}/views")
cachedir = shutil.os.path.expanduser("~") + "/.cache/twitch-py"
db = pw.SqliteDatabase(f"{confdir}/data.db")
os_ = shutil.sys.platform.lower()
Image = namedtuple("Image", "id url")
Result = namedtuple("Result", "query model")
class App:
process: Popen = None # Holds process id of current stream/vod
url = "http://localhost:8080/" # Index page of local site
messages = [] # Log of events since application start
errors = {
400: "Bad Request",
404: "Not Found",
500: "Server Error",
502: "Bad Gateway",
}
@staticmethod
def display(message: str = "") -> None:
"""
Reprints terminal screen with most recent event messages
Re-centers logo and change list length based on terminal size
"""
shutil.os.system("clear")
t = shutil.get_terminal_size()
logo = "\n".join(
line.center(t.columns)
for line in """
_ _ _ _
| |___ _(_) |_ ___| |__ _ __ _ _
| __\ \ /\ / / | __/ __| '_ \ _____| '_ \| | | |
| |_ \ V V /| | || (__| | | |_____| |_) | |_| |
\__| \_/\_/ |_|\__\___|_| |_| | .__/ \__, |
|_| |___/ v1.5
""".splitlines()
)
divide = ("─" * round(t.columns / 1.5)).center(t.columns) + "\n"
print(logo, App.url.center(t.columns), sep="\n", end=divide)
(m := App.messages).append(message)
print(*[f" > {msg}" for msg in m[-min(len(m), (t.lines - 12)) :]], sep="\n")
@bt.hook("before_request")
def _connect_db() -> None:
"""
The following is run at the start of each page request (user action on webpage)
"""
db.connect()
if not any(
path in bt.request.path
for path in ["authenticate", "config", "settings", "error"]
):
Db.check_user() # Redirect to login if no user in data.db
Db.check_cache() # If no cache but user login, run initial cache from follows
@bt.hook("after_request")
def _close_db() -> None:
"""
The following is run after server fulfills page request
"""
if not db.is_closed():
db.close() # Terminate connection with data.db
class BaseModel(pw.Model):
"""
Base class for database models, where data.db is the shared database
"""
class Meta:
database = db
class User(BaseModel):
"""
Model/table for the user login. Necessary to store access token for
Twitch Helix API requests
"""
id = pw.IntegerField()
login = pw.TextField()
display_name = pw.TextField()
profile_image_url = pw.TextField()
access_token = pw.TextField()
class Streamer(BaseModel):
"""
Model/table for all Twitch streamers. Holds data for displaying content
on webpages, and boolean for whether streamer is followed by the user.
"""
id = pw.IntegerField(primary_key=True)
login = pw.TextField()
display_name = pw.TextField()
broadcaster_type = pw.TextField(default="user") # If not partner/affiliate
description = pw.TextField(default="Twitch streamer") # Default if no description
profile_image_url = pw.TextField()
followed = pw.BooleanField(default=False)
class Game(BaseModel):
"""
Holds data for presenting game names and box art. The box art stored
is a specified size that exists for all games (some sizes are incompatible)
"""
id = pw.IntegerField(primary_key=True)
name = pw.TextField()
box_art_url = pw.TextField()
class Helix:
"""
Application information to interface with the Helix API
"""
client_id = "o232r2a1vuu2yfki7j3208tvnx8uzq"
redirect_uri = "http://localhost:8080/authenticate"
app_scopes = "user:edit+user:edit:follows+user:read:follows"
endpoint = "https://api.twitch.tv/helix"
oauth = (
"https://id.twitch.tv/oauth2/authorize?client_id="
f"{client_id}&redirect_uri={redirect_uri}"
f"&response_type=token&scope={app_scopes}"
)
@staticmethod
def headers() -> dict:
"""
Prepares headers with app id and stored user-access-token from authentication
"""
return {
"Client-ID": Helix.client_id,
"Authorization": f"Bearer {User.get().access_token}",
}
@staticmethod
def get(params: str) -> list[dict]:
"""
Blueprint for http requests specifically for Helix API
Includes necessary client-id and user access token
Input `params` is used to specify API endpoint as so:
https://api.twitch.tv/helix/<params>
The response is of json format
```
{
"data": [{},{}],
"pagination":...
}
```
and the `data` key is selected, which is of type `list[dict]`
"""
try:
with httpx.Client(headers=Helix.headers(), timeout=None) as session:
resp: list[dict] = session.get(f"{Helix.endpoint}/{params}").json()[
"data"
]
return resp
except httpx.HTTPError as e:
App.display(f"Error in handling request with params {params}. Error: {e}")
bt.abort(code=502, text=f"Error in handling request with params {params}")
@staticmethod
def get_iter(params: str) -> list[dict]:
"""
Blueprint for http requests specifically for Helix API
Includes necessary client-id and user access token
Input `params` is used to specify API endpoint as so:
https://api.twitch.tv/helix/<params>
The response is of json format
```
{
"data": [{},{}],
"pagination":
{"cursor" : [0-9a-zA-Z]+}
}
```
The response's `data` field (of type `list[dict]`) is appended to `results`
The `pagination` cursor, if it exists, is used as a request parameter for a
subsequent request at the same endpoint to show the next series of results
Iterates requests with new index of results until no more data is found
"""
results, data = [], []
with httpx.Client(headers=Helix.headers(), timeout=None) as session:
while True:
resp = session.get(f"{Helix.endpoint}/{params}").json()
try:
data: list[dict] = resp["data"]
except httpx.HTTPError as e:
App.display(f"Error with {resp}. Caused the error {e}")
bt.abort(
code=502, text=f"Error with request {Helix.endpoint}/{params}"
)
if data == []:
break
results += data
if resp["pagination"] == {}:
return results
pagination = resp["pagination"]["cursor"]
if "after" in params:
params = params[: (params.rfind("=") + 1)] + pagination
else:
params = params + f"&after={pagination}"
return results
class Fetch:
@staticmethod
def user(access_token: str) -> User:
"""
Once user logs in via the twitch portal, the access token taken from
the /authentication uri is used to fetch user data and populate the 'user'
table in `data.db`.
https://api.twitch.tv/helix/users
headers contain unique user access token (required)
"""
headers = {
"Client-ID": Helix.client_id,
"Authorization": f"Bearer {access_token}",
}
try:
user: dict = httpx.get(
f"{Helix.endpoint}/users", headers=headers, timeout=None
).json()["data"][0]
except Exception as e:
App.display(f"Error occurred: {e}")
bt.abort(code=500, text="Error in fetching user data")
shutil.sys.exit()
user["access_token"] = access_token
user["id"] = int(user["id"])
return User.create(**user)
@staticmethod
def follows(id: int) -> set[int]:
"""
Fetches id numbers of user's followed channels.
https://api.twitch.tv/helix/users/follows?from_id=<user_id>
"""
resp = Helix.get_iter(f"users/follows?from_id={id}&first=100")
return {int(follow["to_id"]) for follow in resp}
@staticmethod
async def live(ids: set[int]) -> list[dict]:
"""
Input: set of user ids.
Splits ids in chunks of 100 (limit of API endpoint) and fetches stream data.
If channel is not live, data is empty, thus only live stream info is returned.
https://api.twitch.tv/helix/streams?user_id=<id1>&...&user_id=<id100>
"""
tmp = list(ids)
id_lists = [tmp[x : x + 100] for x in range(0, len(tmp), 100)]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
stream_list: list[httpx.Response] = await asyncio.gather(
*(
session.get(
f"{Helix.endpoint}/streams?{'&'.join([f'user_id={i}' for i in i_list])}"
)
for i_list in id_lists
)
)
streams = []
for resp in stream_list:
data: list[dict] = resp.json()["data"]
if data:
streams += data
return streams
@staticmethod
def stream_info(streams: list[dict]) -> list[dict]:
"""
From stream data, cache games and users from their ids.
Caching fetches additional data which is then appended to stream data dict
"""
async def cache():
tasks = []
for args in [("game_id", "games"), ("user_id", "users")]:
ids = {int(i) for stream in streams if (i := stream[args[0]])}
tasks.append(Db.cache(ids, mode=args[1]))
await asyncio.gather(*tasks)
asyncio.run(cache())
for stream in streams:
channel: Streamer = Streamer.get(int(stream["user_id"]))
try:
game = Game.get(int(stream["game_id"]))
stream["box_art_url"] = game.box_art_url
except ValueError:
stream[
"box_art_url"
] = "https://static-cdn.jtvnw.net/ttv-static/404_boxart.jpg"
stream["profile_image_url"] = channel.profile_image_url
stream["uptime"] = time_elapsed(stream["started_at"])
stream["thumbnail_url"] = stream["thumbnail_url"].replace(
"-{width}x{height}", ""
)
streams.sort(key=lambda stream: stream["viewer_count"], reverse=True)
return streams
class Db:
key_defaults = ["broadcaster_type", "description", "offline_image_url"]
@staticmethod
def check_user() -> bt.redirect:
"""
Check if User is logged in (table exists in data.db).
Redirect to authentication page if no user
"""
if db.table_exists("user") is False or User.get_or_none() is None:
App.display("No user found. Please log in.")
return bt.redirect(Helix.oauth)
@staticmethod
def check_cache():
"""Initial creation of database tables and caching if tables do not exist"""
if (Streamer.table_exists() and Game.table_exists()) is False:
db.create_tables([Streamer, Game])
App.display("Building cache")
follows = Fetch.follows(User.get().id)
asyncio.run(Db.cache(follows, "users"))
Streamer.update(followed=True).execute()
@staticmethod
async def cache(ids: set[int], mode: str) -> None:
"""
Caching mode: 'users' or 'games'.
If game/streamer id does not exist in database, send to caching.
https://api.twitch.tv/helix/<'games' or 'users'>?id=<id1>&id=<id2>...
"""
model = Streamer if mode == "users" else Game
tag = "box_art_url" if mode == "games" else "profile_image_url"
tmp = [i for i in ids if model.get_or_none(i) is None]
if not tmp:
return None
id_lists = [tmp[x : x + 100] for x in range(0, len(tmp), 100)]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
resps: list[httpx.Response] = await asyncio.gather(
*(
session.get(
f"{Helix.endpoint}/{mode}?{'&'.join([f'id={i}' for i in i_list])}"
)
for i_list in id_lists
)
)
data = []
for resp in resps:
datum: list[dict] = resp.json()["data"]
if datum:
data += datum
for datum in data:
if mode == "games":
datum["box_art_url"] = datum["box_art_url"].replace(
"-{width}x{height}", "-285x380"
)
else:
for key in Db.key_defaults:
if not datum[key]: # Remove to replace with key's default
datum.pop(key)
# `tag` key different for game datum and user datum
images = [Image(datum["id"], datum[tag]) for datum in data]
def download_image(image: Image) -> None:
"""Get image data from url, write to file with `mode` directory
and datum `id` as the filename"""
data = httpx.get(image.url).content
with open(f"{cachedir}/{mode}/{image.id}.jpg", "wb") as f:
f.write(data)
with ThreadPoolExecutor() as tp:
tp.map(download_image, images)
for datum in data:
datum[tag] = f"/cache/{mode}/{datum['id']}.jpg" # Point to file path
datum["id"] = int(datum["id"])
model.create(**datum) # Discards unused keys
@staticmethod
def update_follows() -> set[int]:
"""
Fetch user's current follows and cache
Toggle channel follow if follow in database and current do not match
"""
follows = Fetch.follows(User.get().id)
asyncio.run(Db.cache(follows, "users"))
streamers: list[Streamer] = [streamer for streamer in Streamer.select()]
to_toggle = set()
for streamer in streamers:
sid = streamer.id
if (sid in follows and streamer.followed is not True) or (
sid not in follows and streamer.followed is True
):
to_toggle.add(streamer)
if to_toggle:
asyncio.run(Db.toggle_follow(to_toggle))
return follows
@staticmethod
async def toggle_follow(streamers: set[Streamer]) -> None:
"""Send http POST or DELETE based on value of follow after toggling"""
url = f"{Helix.endpoint}/users/follows"
async def send(session: httpx.AsyncClient, data: dict, streamer: Streamer):
Streamer.update(followed=not streamer.followed).where(
Streamer.id == streamer.id
).execute()
if streamer.followed is True:
App.display(f"Unfollowing {streamer.display_name}")
await session.delete(url, params=data)
else:
App.display(f"Following {streamer.display_name}")
await session.post(url, params=data)
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
tasks = []
for streamer in streamers:
data = {"to_id": str(streamer.id), "from_id": str(User.get().id)}
tasks.append(send(session, data, streamer))
await asyncio.gather(*tasks)
@bt.route("/")
def index():
"""Index of web application. Displays live streams of user's follows"""
follows = Db.update_follows()
streams = Fetch.stream_info(asyncio.run(Fetch.live(follows)))
return bt.template("index.tpl", User=User.get(), streams=streams)
@bt.route("/authenticate")
def authenticate():
"""
User is prompted with login portal. After login, uri redirect includes
access token. Javascript in `authenticate.tpl` grabs this token which is
used to fetch user information which is then cached along with token.
"""
if access_token := bt.request.query.get("access_token"):
User.create_table()
user = Fetch.user(access_token)
App.display(f"Logged in as {user.display_name}")
return bt.redirect("/")
return bt.template("authenticate.tpl")
@bt.route("/<channel>")
def channel(channel, mode=None, data=None):
"""Profile page of channel"""
try:
channel: Streamer = Streamer.get(
(Streamer.display_name == channel) | (Streamer.login == channel)
)
except pw.DoesNotExist:
bt.abort(code=404, text="User does not exist")
date = {"start": "", "end": ""}
if bt.request.query.get("follow"):
asyncio.run(Db.toggle_follow({channel}))
bt.redirect(f"/{channel.login}")
elif bt.request.query.get("watch"):
watch_video(channel.login)
return """<script>setTimeout(function () { window.history.back() });</script>"""
elif bt.request.query.get("vod"):
mode = "vod"
vods = Helix.get_iter(f"videos?user_id={channel.id}&type=archive")
data = process_data(vods, mode)
elif bt.request.query.get("clips"):
mode = "clip"
start = bt.request.query.get("start") + "T00:00:00Z"
end = bt.request.query.get("end") + "T00:00:00Z"
clips = Helix.get(
f"clips?broadcaster_id={channel.id}&first=100&started_at={start}&ended_at={end}"
)
data = process_data(clips, mode="clip")
data = sorted(data, key=lambda info: info["view_count"], reverse=True)
date = {"start": start[:-10], "end": end[:-10]}
elif url := bt.request.query.get("video"):
watch_video(mode="vod", url=url)
return """<script>setTimeout(function () { window.history.back() });</script>"""
elif bt.request.query.get("close"):
bt.redirect(f"/{channel.login}")
return bt.template("channel.tpl", channel=channel, mode=mode, data=data, date=date)
@bt.route("/search")
def search():
"""
List results that match search query string and cache results based on id
For categories, display data from database based on id
For channels, display data from database as well as request data from endpoint
"""
query = bt.request.query.q
t = bt.request.query.t
mode, model, count = (
("games", Game, 10) if t == "categories" else ("users", Streamer, 5)
)
search_results = Helix.get(f"search/{t}?query={query}&first={count}")
ids = {int(result["id"]) for result in search_results}
asyncio.run(Db.cache(ids, mode=mode))
if t == "categories":
results = model.select().where(model.id.in_(ids))
else:
results = [
Result(result, model.get_by_id(int(result["id"])))
for result in search_results
]
return bt.template("search.tpl", query=query, mode=mode, results=results)
@bt.route("/following")
def following():
"""Read data.db for users with `followed == True`"""
Db.update_follows()
follows = (
Streamer.select()
.where(Streamer.followed == True)
.order_by(Streamer.display_name)
)
return bt.template("following.tpl", follows=follows)
@bt.route("/categories/<game_id>")
def browse(game_id="all"):
"""
`/all` View list of games by viewer count
`/<game_id>` View top streams under game category
"""
if game_id == "all":
return bt.redirect("/top/games")
else:
try:
game: Game = Game.get(int(game_id))
streams = Helix.get(f"streams?first=50&game_id={game_id}")
data = Fetch.stream_info(streams)
return bt.template("top.tpl", data=data, t="channels_filter", game=game)
except httpx.HTTPError:
bt.abort(code=404, text=f"Cannot find streams for game id {game_id}")
@bt.route("/top/<t>")
def top(t):
"""
`/games` View list of top games by total viewer count
`/streams` View list of top streams across platform
"""
if t == "channels":
top_streams = Helix.get("streams?first=50")
data = Fetch.stream_info(top_streams)
elif t == "games":
games = [int(g["id"]) for g in Helix.get("games/top?first=100")]
asyncio.run(Db.cache(set(games), mode="games"))
data = list(Game.select().where(Game.id.in_(games)))
data.sort(key=lambda x: games.index(x.id))
else:
bt.abort(code=400, text="Not a valid type for /top")
return bt.template("top.tpl", data=data, t=t)
@bt.route("/settings")
def settings():
"""
Settings page to view current settings, open settings file,
clear cache, and log out.
"""
command = lex(f"xdg-open {confdir}/static/settings.toml")
if bt.request.query.get("open"):
Popen(command)
return bt.redirect("/settings")
elif bt.request.query.get("cache"):
App.display("Clearing cache...")
db.drop_tables([Streamer, Game])
shutil.os.system(f"rm -f {cachedir}/games/* {cachedir}/users/*")
return bt.redirect("/settings")
elif bt.request.query.get("logout"):
App.display("Logging out...")
db.drop_tables([User, Streamer, Game])
return bt.redirect("/settings")
try:
config = toml.load(f"{confdir}/static/settings.toml")[f"{os_}"]
except toml.TomlDecodeError as e:
Popen(command)
bt.abort(code=404, text="Could not parse settings.toml")
return bt.template("settings.tpl", config=config)
@bt.route("/static/<filename:path>")
def send_static(filename):
"""Serve files located in configuration directory"""
return bt.static_file(filename, root=f"{confdir}/static/")
@bt.route("/cache/<filename:path>")
def cache(filename):
"""Serve images cached in ~/.cache/twitch-py"""
return bt.static_file(filename, root=f"{cachedir}/")
@bt.error(400)
def error400(error):
return bt.template("error_page.tpl", code=App.errors[400], error=error)
@bt.error(404)
def error404(error):
return bt.template("error_page.tpl", code=App.errors[404], error=error)
@bt.error(500)
def error500(error):
return bt.template("error_page.tpl", code=App.errors[500], error=error)
@bt.error(502)
def error502(error):
return bt.template("error_page.tpl", code=App.errors[502], error=error)
def time_elapsed(start: str, d="") -> str:
"""Use 'started_at' key and current time to calculated time since"""
start = datetime.strptime(start, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=timezone.utc)
current = datetime.now(tz=timezone.utc)
elapsed = round((current - start).total_seconds())
delta = str(timedelta(seconds=elapsed))
if "d" in delta:
d = delta[: (delta.find("d") - 1)] + "d"
h, m, s = delta.split(" ")[-1].split(":")
return f"{d}{h}h{m}m"
def watch_video(channel: str = "", mode: str = "live", url: str = "") -> None:
"""
Save process if of running video as App attribute for later termination.
Passes through player and arg settings from `settings.toml`.
"""
c = toml.load(f"{confdir}/static/settings.toml")[f"{os_}"]
if c["multi"] is False and App.process is not None:
App.process.terminate()
if mode == "live":
App.display(f"Launching stream twitch.tv/{channel}")
command = f'streamlink -l none -p {c["app"]} -a "{c["args"]}" \
--twitch-disable-ads --twitch-low-latency twitch.tv/{channel} best'
else:
App.display(f"Launching video: {url}")
command = f'{c["app"]} {c["args"]} --really-quiet {url}'
p = Popen(lex(command), stdout=DEVNULL)
if c["multi"] is False:
App.process = p
def process_data(data: list[dict], mode: str) -> list[dict]:
"""
Format data of vod/clip for presenting. For clips, cache game data
and fetch relevant vod with timestamp of clip.
"""
if mode == "vod":
for vod in data:
vod["thumbnail_url"] = vod["thumbnail_url"].replace(
"%{width}x%{height}", "480x270"
)
if not vod["thumbnail_url"]:
vod[
"thumbnail_url"
] = "https://vod-secure.twitch.tv/_404/404_processing_320x180.png"
vod["created_at"] = time_elapsed(vod["created_at"])
if mode == "clip":
for clip in data:
clip.setdefault(
"box_art_url", "https://static-cdn.jtvnw.net/ttv-static/404_boxart.jpg"
)
clip.setdefault("game_name", "Streaming")
clip["time_since"] = time_elapsed(clip["created_at"])
clip["thumbnail_url"] = clip["thumbnail_url"].rsplit("-", 1)[0] + ".jpg"
asyncio.run(
Db.cache(
{int(gid) for clip in data if (gid := clip["game_id"])}, mode="games"
)
)
for clip in data:
try:
game: Game = Game.get(int(clip["game_id"]))
clip["box_art_url"] = game.box_art_url
clip["game_name"] = game.name
except ValueError:
pass
asyncio.run(vod_from_clip(data))
return data
async def vod_from_clip(clips: list[dict]) -> list[dict]:
"""
Fetch vod clip was taken from if it exists. Calculate timestamp of clip in
vod using formatted date strings.
"""
to_fetch = [vod_id for clip in clips if (vod_id := clip["video_id"])]
async with httpx.AsyncClient(headers=Helix.headers(), timeout=None) as session:
vod_data = await asyncio.gather(
*(
session.get(f"{Helix.endpoint}/videos?id={vod_id}")
for vod_id in to_fetch
)
)
vods = [resp.json()["data"][0] for resp in vod_data]
for clip in clips:
if clip["video_id"]:
clip["vod"] = vods.pop(0) # Consume vod if vod exists for clip
vod_id, timestamp = clip["video_id"], clip["created_at"]
vod_start = datetime.strptime(
clip["vod"]["created_at"], "%Y-%m-%dT%H:%M:%SZ"
).replace(tzinfo=timezone.utc)
timestamp = datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
elapsed = round((timestamp - vod_start).total_seconds() - 61)
if "h" not in clip["vod"]["duration"]:
clip["vod"]["duration"] = f"0h{clip['vod']['duration']}"
minutes, seconds = divmod(elapsed, 60)
hours, minutes = divmod(minutes, 60)
clip[
"vod_link"
] = f"http://www.twitch.tv/videos/{vod_id}/?t={hours}h{minutes}m{seconds}s"
else:
clip["vod_link"] = None
return clips
def install(arg: str) -> None:
"""Run the latest installation script without having to clone repo if app installed"""
commands = [
"curl -sL -o twitch-install.sh https://raw.githubusercontent.com/RaeedAhmed/twitch-py/master/install.sh",
"chmod +x twitch-install.sh",
f"./twitch-install.sh -{arg}",
"rm twitch-install.sh",
]
for command in commands:
Popen(lex(command)).wait()
if __name__ == "__main__":
docs = """Usage: twitch-py [COMMAND]
-h, --help Display help for commands
-c, --clear-cache Clear cached data while preserving login
-s, --settings Open settings file to edit
--update Install twitch-py from latest git repo
--uninstall Remove all associated files from system
"""
arg = shutil.sys.argv[1:]
if not arg:
App.display("Launching server...")
try:
serve(app=bt.app(), host="localhost", threads=16, port=8080)
except KeyboardInterrupt:
pass
except httpx.HTTPError as e:
App.display(f"Error: {e}. Retrying...")
bt.redirect(bt.request.path)
finally:
App.display("Exiting...")
elif len(arg) > 1:
print("Too many arguments. Use -h for help")
elif arg[0] in ["-h", "--help", "help"]:
print(docs)
elif arg[0] in ["-c", "--clear-cache"]:
try:
App.display("Clearing cache...")
db.drop_tables([Streamer, Game])
shutil.os.system(f"rm -f {cachedir}/games/* {cachedir}/users/*")
except pw.OperationalError:
App.display("Database or cache does not exist")
elif arg[0] in ["--update", "update"]:
install("d")
elif arg[0] in ["--uninstall", "uninstall"]:
install("u")
elif arg[0] in ["-s", "--settings"]:
cmd = lex(f"xdg-open {confdir}/static/settings.toml")
Popen(cmd)
else:
print("Command not recognized. Use -h for help")
print(docs)
shutil.sys.exit()
|
python
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/ForgottenPassword2.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ForgottenPassword2Screen(object):
def setupUi(self, ForgottenPassword2Screen):
ForgottenPassword2Screen.setObjectName("ForgottenPassword2Screen")
ForgottenPassword2Screen.resize(1340, 720)
ForgottenPassword2Screen.setSizeGripEnabled(False)
self.widget = QtWidgets.QWidget(ForgottenPassword2Screen)
self.widget.setGeometry(QtCore.QRect(0, 0, 1340, 720))
self.widget.setStyleSheet("background-color: rgb(69, 69, 69);")
self.widget.setObjectName("widget")
self.Title = QtWidgets.QLabel(self.widget)
self.Title.setGeometry(QtCore.QRect(612, 20, 116, 51))
self.Title.setStyleSheet("font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239)")
self.Title.setObjectName("Title")
self.TabBar = QtWidgets.QWidget(self.widget)
self.TabBar.setGeometry(QtCore.QRect(0, 80, 1340, 80))
self.TabBar.setStyleSheet("background-color: rgb(239, 239, 239);")
self.TabBar.setObjectName("TabBar")
self.LoginTab = QtWidgets.QPushButton(self.TabBar)
self.LoginTab.setGeometry(QtCore.QRect(10, 5, 200, 70))
self.LoginTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.LoginTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.LoginTab.setObjectName("LoginTab")
self.SignUpTab = QtWidgets.QPushButton(self.TabBar)
self.SignUpTab.setGeometry(QtCore.QRect(220, 5, 200, 70))
self.SignUpTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.SignUpTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.SignUpTab.setObjectName("SignUpTab")
self.ForgottenPasswordTab = QtWidgets.QPushButton(self.TabBar)
self.ForgottenPasswordTab.setGeometry(QtCore.QRect(430, 5, 200, 70))
self.ForgottenPasswordTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ForgottenPasswordTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239);\n"
"font: 12pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"")
self.ForgottenPasswordTab.setObjectName("ForgottenPasswordTab")
self.ResetPasswordTab = QtWidgets.QPushButton(self.TabBar)
self.ResetPasswordTab.setGeometry(QtCore.QRect(640, 5, 200, 70))
self.ResetPasswordTab.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.ResetPasswordTab.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 20pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.ResetPasswordTab.setObjectName("ResetPasswordTab")
self.MainWidget = QtWidgets.QWidget(self.widget)
self.MainWidget.setGeometry(QtCore.QRect(10, 170, 1320, 540))
self.MainWidget.setStyleSheet("background-color: rgb(239, 239, 239);\n"
"border-radius: 20px;")
self.MainWidget.setObjectName("MainWidget")
self.VerificationCodeText = QtWidgets.QLabel(self.MainWidget)
self.VerificationCodeText.setGeometry(QtCore.QRect(300, 200, 301, 61))
self.VerificationCodeText.setStyleSheet("font: 25pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239); padding: 5px;")
self.VerificationCodeText.setObjectName("VerificationCodeText")
self.VerificationCodeInput = QtWidgets.QLineEdit(self.MainWidget)
self.VerificationCodeInput.setGeometry(QtCore.QRect(720, 200, 261, 60))
self.VerificationCodeInput.setStyleSheet("background-color: rgb(239, 239, 239);\n"
"color: rgb(69, 69, 69);\n"
"font: 18pt \"Sans Serif\";\n"
"border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);")
self.VerificationCodeInput.setText("")
self.VerificationCodeInput.setCursorPosition(0)
self.VerificationCodeInput.setObjectName("VerificationCodeInput")
self.SubmitButton = QtWidgets.QPushButton(self.MainWidget)
self.SubmitButton.setGeometry(QtCore.QRect(570, 440, 200, 70))
self.SubmitButton.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.SubmitButton.setStyleSheet("border: 2px solid;\n"
"border-radius: 20px;\n"
"border-color:rgb(69, 69, 69);\n"
"background-color: rgb(69, 69, 69);\n"
"font: 36pt \"Sans Serif\"; color:rgb(239, 239, 239);\n"
"")
self.SubmitButton.setObjectName("SubmitButton")
self.VerificationText = QtWidgets.QLabel(self.MainWidget)
self.VerificationText.setGeometry(QtCore.QRect(305, 60, 730, 61))
self.VerificationText.setStyleSheet("font: 25pt \"Sans Serif\"; color:rgb(69, 69, 69);\n"
"background-color: rgb(239, 239, 239); padding: 5px;")
self.VerificationText.setObjectName("VerificationText")
self.retranslateUi(ForgottenPassword2Screen)
QtCore.QMetaObject.connectSlotsByName(ForgottenPassword2Screen)
def retranslateUi(self, ForgottenPassword2Screen):
_translate = QtCore.QCoreApplication.translate
ForgottenPassword2Screen.setWindowTitle(_translate("ForgottenPassword2Screen", "Visualising the Riemann Hypothesis - Forgotten Password"))
self.Title.setText(_translate("ForgottenPassword2Screen", "Login"))
self.LoginTab.setText(_translate("ForgottenPassword2Screen", "Login"))
self.SignUpTab.setText(_translate("ForgottenPassword2Screen", "Sign Up"))
self.ForgottenPasswordTab.setText(_translate("ForgottenPassword2Screen", "Forgotten Password"))
self.ResetPasswordTab.setText(_translate("ForgottenPassword2Screen", "Reset Password"))
self.VerificationCodeText.setText(_translate("ForgottenPassword2Screen", "<html><head/><body><p align=\"right\">Verification Code:</p></body></html>"))
self.VerificationCodeInput.setPlaceholderText(_translate("ForgottenPassword2Screen", "Enter Verification Code"))
self.SubmitButton.setText(_translate("ForgottenPassword2Screen", "Submit"))
self.VerificationText.setText(_translate("ForgottenPassword2Screen", "<html><head/><body><p align=\"center\">A Verification Code has been sent to your email</p></body></html>"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ForgottenPassword2Screen = QtWidgets.QDialog()
ui = Ui_ForgottenPassword2Screen()
ui.setupUi(ForgottenPassword2Screen)
ForgottenPassword2Screen.show()
sys.exit(app.exec_())
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.util import get_const_tuple
import pytest
from common import get_all_backend
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel//bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel//oc_bn, oc_bn, in_channel//ic_bn, ic_bn//4, kh, kw, 4))
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1, 6))
return kernel
def verify_group_conv2d_NCHWc_int8(batch, in_channel, groups, in_size, num_filter, kernel, stride,
padding, dilation=1, add_bias=False, add_relu=False, dtype="int32"):
assert dilation == 1, "conv2d_NCHWc does not support dilation for now."
print("Workload: (%d, %d, %d, %d, %d, %d, %d, %d)" %
(batch, in_channel, groups, in_size, num_filter, kernel, stride, padding))
in_height = in_width = in_size
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 8
autotvm.GLOBAL_SCOPE.silent = True
A = te.placeholder((batch, in_channel//ic_block, in_height, in_width, ic_block), name='A', dtype='uint8')
W = te.placeholder((num_filter//oc_block, in_channel//ic_block//groups, kernel, kernel, ic_block//4, oc_block, 4), name='W', dtype='int8')
@memoize("topi.tests.test_topi_conv2d_NCHWc_int8.verify_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel//groups, kernel, kernel)).astype("int8")
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return _transform_data(a_np, ic_block), _transform_kernel(w_np, ic_block, oc_block), \
_transform_data(c_np, oc_block)
a_np, w_np, c_np = get_ref_data()
def check_device(device):
ctx = tvm.context(device, 0)
if not ctx.exist:
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
C = topi.x86.conv2d_NCHWc(A, W, (stride, stride), (padding, padding),
(dilation, dilation),
'NCHW%dc'%ic_block,
"NCHW%dc"%oc_block,
dtype)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), ctx)
func = tvm.build(s, [A, W, C], device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d" %
(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation))
# print(tvm.lower(s, [A, W, C], simple_mode=True))
func(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-3)
# for device in ["llvm"]:
for device in ["llvm -mcpu=skylake-avx512"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
autotvm.GLOBAL_SCOPE.silent = False
@pytest.mark.skip
def test_conv2d_NCHWc():
# ResNet50 workloads
verify_group_conv2d_NCHWc_int8(1, 256, 32, 224, 64, 7, 2, 3)
if __name__ == "__main__":
# The test requires Skylake and newer Intel machines to generate the correct
# instruction. This test directly calls the topi operator, requiring correct
# kernel shape. For older generation of Intel machines, the kernel needs to
# be 6D. This test tests 7D kernel, that can only work on Skylake+ machines.
# So, disabling the test.
# test_conv2d_NCHWc()
pass
|
python
|
# -*- coding: utf-8 -*-
import re
from pybuilder.core import init
from pybuilder.core import task
from pybuilder.core import depends
from pybuilder.errors import BuildFailedException
from pybuilder.pluginhelper.external_command import ExternalCommandBuilder
from pybuilder.utils import assert_can_execute
@init
def init_radon(project):
""" initialize radon task properties
"""
project.set_property_if_unset('radon_break_build_average_complexity_threshold', None)
project.set_property_if_unset('radon_break_build_complexity_threshold', None)
project.plugin_depends_on('radon')
@task('radon', description='execute radon cyclomatic complexity')
@depends('prepare')
def radon(project, logger, reactor):
""" execute radon cyclomatic complexity
"""
set_verbose_property(project)
command = get_command(project, reactor)
# assert_can_execute(command.parts, prerequisite='radon', caller='complexity')
result = command.run_on_production_source_files(logger, include_dirs_only=True)
if not verify_result(result, logger, command):
return
complexity_data = get_complexity(project, result, logger)
if not verify_complexity(complexity_data):
return
process_complexity(project, complexity_data)
def get_command(project, reactor):
""" return radon command
"""
command = ExternalCommandBuilder('radon', project, reactor)
command.use_argument('cc')
command.use_argument('-a')
command.use_argument('-s')
return command
def set_verbose_property(project):
""" set verbose property
"""
verbose = project.get_property('verbose')
project.set_property('radon_verbose_output', verbose)
def verify_result(result, logger, command):
""" return True if result contains lines, False otherwise
"""
if not result.report_lines:
logger.warn(f"Command {command.as_string} produced no output")
return False
if len(result.error_report_lines) > 0:
logger.error(f"Command {command.as_string} produced errors, see {result.error_report_file}")
return False
return True
def get_complexity(project, result, logger):
""" return complexity info and if verbose log contents of result
"""
complexity_data = {
'average': None,
'highest': {
'name': None,
'score': 0
}
}
regex_line = r'[A-Z] \d+:\d+ (?P<name>.*) - [A-Z] \((?P<score>\d+)\)'
for line in result.report_lines[:-1]:
line = line.strip()
match = re.match(regex_line, line)
if match:
score = float(match.group('score'))
if score > complexity_data['highest']['score']:
complexity_data['highest']['score'] = score
complexity_data['highest']['name'] = match.group('name')
average_complexity = result.report_lines[-1].strip()
logger.info(average_complexity)
regex_average = r'Average complexity: [A-Z] \((?P<average>.*)\)'
match = re.match(regex_average, average_complexity)
if match:
complexity_data['average'] = float(match.group('average'))
return complexity_data
def verify_complexity(complexity_data):
""" return True if complexity structure is valid, False otherwise
"""
if complexity_data['average'] is None:
return False
if complexity_data['highest']['name'] is None:
return False
return True
def process_complexity(project, complexity_data):
""" process complexity
"""
average_complexity_threshold = project.get_property('radon_break_build_average_complexity_threshold')
if average_complexity_threshold:
average = complexity_data['average']
if float(average) > average_complexity_threshold:
raise BuildFailedException(f'average complexity {average} is greater than {average_complexity_threshold}')
complexity_threshold = project.get_property('radon_break_build_complexity_threshold')
if complexity_threshold:
highest_score = complexity_data['highest']['score']
if float(highest_score) > complexity_threshold:
name = complexity_data['highest']['name']
raise BuildFailedException(f'{name} complexity {highest_score} is greater than {complexity_threshold}')
|
python
|
"""Class hierarchy for base gates."""
import math
from dataclasses import dataclass
from functools import singledispatch, reduce
from numbers import Number
from typing import Tuple, Union, Callable, Dict, Optional, Iterable, Any, List
import sympy
from typing_extensions import Protocol
import numpy as np
from ...utils import SCHEMA_VERSION
from . import _builtin_gates
Parameter = Union[sympy.Symbol, Number]
def serialize_expr(expr):
return str(expr)
def _make_symbols_map(symbol_names):
return {name: sympy.Symbol(name) for name in symbol_names}
def deserialize_expr(expr_str, symbol_names):
symbols_map = _make_symbols_map(symbol_names)
return sympy.sympify(expr_str, locals=symbols_map)
class Gate(Protocol):
"""Quantum gate representable by a matrix, translatable to other frameworks
and backends."""
@property
def name(self) -> str:
"""Globally unique name of the gate.
Name is used in textual representation and dispatching in conversion between
frameworks. Defining different gates with the same name as built-in ones
is discouraged."""
raise NotImplementedError()
@property
def params(self) -> Tuple[Parameter, ...]:
"""Value of parameters bound to this gate.
Length of `params` should be equal to number of parameters in gate's initializer.
In particular, nonparametric gates should always return ().
Examples:
- an `H` gate has no params
- a `RX(np.pi)` gate has a single param with value of `np.pi`
- a `RX(sympy.Symbol("theta"))` gate has a single symbolic param `theta`
- a `RX(sympy.sympify("theta * alpha"))` gate has a single symbolic expression param `theta*alpha`
We need it for translations to other frameworks and for serialization.
"""
raise NotImplementedError()
@property
def free_symbols(self):
"""Unbound symbols in the gate matrix.
Examples:
- an `H` gate has no free symbols
- a `RX(np.pi)` gate has no free symbols
- a `RX(sympy.Symbol("theta"))` gate has a single free symbol `theta`
- a `RX(sympy.sympify("theta * alpha"))` gate has two free symbols, `alpha` and `theta`
- a `RX(sympy.sympify("theta * alpha")).bind({sympy.Symbol("theta"): 0.42})` gate has one free symbol, `alpha`
"""
symbols = set(
symbol
for param in self.params
if isinstance(param, sympy.Expr)
for symbol in param.free_symbols
)
return sorted(symbols, key=str)
@property
def num_qubits(self) -> int:
"""Number of qubits this gate acts on.
We need it because matrix is computed lazily, and we don't want to create matrix
just to know the number of qubits.
"""
raise NotImplementedError()
@property
def matrix(self) -> sympy.Matrix:
"""Unitary matrix describing gate's action on state vector.
We need it to be able to implement .propagate() on the operation class.
"""
raise NotImplementedError()
def controlled(self, num_control_qubits: int) -> "Gate":
raise NotImplementedError()
@property
def dagger(self) -> "Gate":
raise NotImplementedError()
def bind(self, symbols_map: Dict[sympy.Symbol, Parameter]) -> "Gate":
raise NotImplementedError()
def __call__(self, *qubit_indices: int) -> "GateOperation":
"""Apply this gate on qubits in a circuit."""
return GateOperation(self, qubit_indices)
def to_dict(self):
return {
"name": self.name,
**(
{"params": list(map(serialize_expr, self.params))}
if self.params
else {}
),
**(
{"free_symbols": sorted(map(str, self.free_symbols))}
if self.free_symbols
else {}
),
}
def _gate_from_dict(dict_, custom_gate_defs):
"""Prototype implementation of circuit deserialization"""
gate_ref = _builtin_gates.builtin_gate_by_name(dict_["name"])
if gate_ref is not None:
# ATM we don't have a better way to check if the serialized gate was parametric
# or not
if isinstance(gate_ref, MatrixFactoryGate):
return gate_ref
else:
return gate_ref(
*[
deserialize_expr(param, dict_.get("free_symbols", []))
for param in dict_["params"]
]
)
if dict_["name"] == CONTROLLED_GATE_NAME:
wrapped_gate = _gate_from_dict(dict_["wrapped_gate"], custom_gate_defs)
return ControlledGate(wrapped_gate, dict_["num_control_qubits"])
if dict_["name"] == DAGGER_GATE_NAME:
wrapped_gate = _gate_from_dict(dict_["wrapped_gate"], custom_gate_defs)
return Dagger(wrapped_gate)
gate_def = next(
(
gate_def
for gate_def in custom_gate_defs
if gate_def.gate_name == dict_["name"]
),
None,
)
if gate_def is None:
raise ValueError(
f"Custom gate definition for {dict_['name']} missing from serialized dict"
)
symbol_names = map(serialize_expr, gate_def.params_ordering)
return gate_def(
*[deserialize_expr(param, symbol_names) for param in dict_["params"]]
)
# TODO:
# - controlled gate
# - dagger
@dataclass(frozen=True)
class GateOperation:
gate: Gate
qubit_indices: Tuple[int, ...]
def to_dict(self):
return {
"type": "gate_operation",
"gate": self.gate.to_dict(),
"qubit_indices": list(self.qubit_indices),
}
@classmethod
def from_dict(cls, dict_, custom_gate_defs):
return cls(
gate=_gate_from_dict(dict_["gate"], custom_gate_defs),
qubit_indices=tuple(dict_["qubit_indices"]),
)
def __str__(self):
return f"{self.gate}({','.join(map(str, self.qubit_indices))})"
GATE_OPERATION_DESERIALIZERS = {"gate_operation": GateOperation.from_dict}
def _gate_operation_from_dict(dict_, custom_gate_defs):
# Add deserializers here when we need to support custom, non-gate operations
return GATE_OPERATION_DESERIALIZERS[dict_["type"]](dict_, custom_gate_defs)
@singledispatch
def _sub_symbols(parameter, symbols_map: Dict[sympy.Symbol, Parameter]) -> Parameter:
raise NotImplementedError()
@_sub_symbols.register
def _sub_symbols_in_number(
parameter: Number, symbols_map: Dict[sympy.Symbol, Parameter]
) -> Number:
return parameter
@_sub_symbols.register
def _sub_symbols_in_expression(
parameter: sympy.Expr, symbols_map: Dict[sympy.Symbol, Parameter]
) -> sympy.Expr:
return parameter.subs(symbols_map)
@_sub_symbols.register
def _sub_symbols_in_symbol(
parameter: sympy.Symbol, symbols_map: Dict[sympy.Symbol, Parameter]
) -> Parameter:
return symbols_map.get(parameter, parameter)
def _all_attrs_equal(obj, other_obj, attrs):
return all(getattr(obj, attr) == getattr(other_obj, attr) for attr in attrs)
@dataclass(frozen=True)
class MatrixFactoryGate:
"""`Gate` protocol implementation with a deferred matrix construction.
Most built-in gates are instances of this class.
It requires the gate definition to be present during deserialization, so it's not
easily applicable for gates defined in Orquestra steps.
Keeping a `matrix_factory` instead of a plain gate matrix allows us to defer matrix
construction to _after_ parameter binding. This saves unnecessary work in scenarios
where we construct a quantum circuit and immediately bind parameter values. When done
multiple times, e.g. for every gate in each optimization step, this can lead to major
performance issues.
Args:
name: Name of this gate. Implementers of new gates should make sure that the names
are unique.
matrix_factory: a callable mapping arbitrary number of parameters into gate
matrix. Implementers of new gates should make sure the returned matrices are
square and of dimension being 2 ** `num_qubits`.
params: gate parameters - either concrete values or opaque symbols.
Will be passed to `matrix_factory` when `matrix` property is requested.
num_qubits: number of qubits this gate acts on.
"""
name: str
matrix_factory: Callable[..., sympy.Matrix]
params: Tuple[Parameter, ...]
num_qubits: int
is_hermitian: bool = False
@property
def matrix(self) -> sympy.Matrix:
"""Unitary matrix defining action of this gate.
This is a computed property using `self.matrix_factory` called
with parameters bound to this gate.
"""
return self.matrix_factory(*self.params)
def bind(self, symbols_map) -> "MatrixFactoryGate":
new_symbols = tuple(_sub_symbols(param, symbols_map) for param in self.params)
return MatrixFactoryGate(
name=self.name,
matrix_factory=self.matrix_factory,
params=new_symbols,
num_qubits=self.num_qubits,
)
def controlled(self, num_controlled_qubits: int) -> Gate:
return ControlledGate(self, num_controlled_qubits)
@property
def dagger(self) -> Gate:
return self if self.is_hermitian else Dagger(self)
def __str__(self):
return (
f"{self.name}({', '.join(map(str,self.params))})"
if self.params
else self.name
)
def __eq__(self, other):
if type(self) != type(other):
return False
if not _all_attrs_equal(
self, other, set(self.__dataclass_fields__) - {"params"}
):
return False
if len(self.params) != len(other.params):
return False
return all(
_are_matrix_elements_equal(p1, p2)
for p1, p2 in zip(self.params, other.params)
)
# Normally, we'd use the default implementations by inheriting from the Gate protocol.
# We can't do that because of __init__ arg default value issues, this is
# the workaround.
free_symbols = Gate.free_symbols
__call__ = Gate.__call__
to_dict = Gate.to_dict
CONTROLLED_GATE_NAME = "Control"
@dataclass(frozen=True)
class ControlledGate(Gate):
wrapped_gate: Gate
num_control_qubits: int
@property
def name(self):
return CONTROLLED_GATE_NAME
@property
def num_qubits(self):
return self.wrapped_gate.num_qubits + self.num_control_qubits
@property
def matrix(self):
return sympy.Matrix.diag(
sympy.eye(2 ** self.num_qubits - 2 ** self.wrapped_gate.num_qubits),
self.wrapped_gate.matrix,
)
@property
def params(self):
return self.wrapped_gate.params
def controlled(self, num_control_qubits: int) -> "ControlledGate":
return ControlledGate(
wrapped_gate=self.wrapped_gate,
num_control_qubits=self.num_control_qubits + num_control_qubits,
)
@property
def dagger(self) -> "ControlledGate":
return ControlledGate(
wrapped_gate=self.wrapped_gate.dagger,
num_control_qubits=self.num_control_qubits,
)
def bind(self, symbols_map) -> "Gate":
return self.wrapped_gate.bind(symbols_map).controlled(self.num_control_qubits)
def to_dict(self):
return {
"name": self.name,
"wrapped_gate": self.wrapped_gate.to_dict(),
"num_control_qubits": self.num_control_qubits,
}
DAGGER_GATE_NAME = "Dagger"
@dataclass(frozen=True)
class Dagger(Gate):
wrapped_gate: Gate
@property
def matrix(self) -> sympy.Matrix:
return self.wrapped_gate.matrix.adjoint()
@property
def params(self) -> Tuple[Parameter, ...]:
return self.wrapped_gate.params
@property
def num_qubits(self) -> int:
return self.wrapped_gate.num_qubits
@property
def name(self):
return DAGGER_GATE_NAME
def controlled(self, num_control_qubits: int) -> Gate:
return self.wrapped_gate.controlled(num_control_qubits).dagger
def bind(self, symbols_map) -> "Gate":
return self.wrapped_gate.bind(symbols_map).dagger
@property
def dagger(self) -> "Gate":
return self.wrapped_gate
def to_dict(self):
return {
"name": self.name,
"wrapped_gate": self.wrapped_gate.to_dict(),
}
def _n_qubits(matrix):
n_qubits = math.floor(math.log2(matrix.shape[0]))
if 2 ** n_qubits != matrix.shape[0] or 2 ** n_qubits != matrix.shape[1]:
raise ValueError("Gate's matrix has to be square with dimension 2^N")
return n_qubits
def _matrix_to_json(matrix: sympy.Matrix):
return [
[serialize_expr(element) for element in matrix.row(row_i)]
for row_i in range(matrix.shape[0])
]
def _matrix_from_json(
json_rows: List[List[str]], symbols_names: Iterable[str]
) -> sympy.Matrix:
return sympy.Matrix(
[
[deserialize_expr(element, symbols_names) for element in json_row]
for json_row in json_rows
]
)
@dataclass(frozen=True)
class FixedMatrixFactory:
"""Can be passed as `matrix_factory` when a gate matrix isn't lazily evaluated."""
matrix: sympy.Matrix
params_ordering: Tuple[Parameter, ...]
def __call__(self, *gate_params):
return self.matrix.subs(
{symbol: arg for symbol, arg in zip(self.params_ordering, gate_params)}
)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.params_ordering != other.params_ordering:
return False
if not _are_matrices_equal(self.matrix, other.matrix):
return False
return True
@dataclass(frozen=True)
class CustomGateDefinition:
"""Use this class to define a non-built-in gate.
User-defined gates are treated differently than the built-in ones,
because the built-in ones are defined in `zquantum.core` library, and so
we can assume that the definitions will be available during circuit deserialization.
User-provided gates can be defined in one repo (e.g. Orquestra step), serialized,
and passed to another project for deserialization. The other project must have access
to gate details, e.g. the gate matrix. This class is designed to keep track of
the gate details needed for deserialization.
Instances of this class are serialized by the Circuit objects, additionally to
Circuit operations.
"""
gate_name: str
matrix: sympy.Matrix
params_ordering: Tuple[sympy.Symbol, ...]
def __post_init__(self):
n_qubits = _n_qubits(self.matrix)
object.__setattr__(self, "_n_qubits", n_qubits)
def __call__(self, *params):
return MatrixFactoryGate(
self.gate_name,
FixedMatrixFactory(self.matrix, self.params_ordering),
params,
self._n_qubits,
)
def to_dict(self):
return {
"gate_name": self.gate_name,
"matrix": _matrix_to_json(self.matrix),
"params_ordering": list(map(serialize_expr, self.params_ordering)),
}
@classmethod
def from_dict(cls, dict_):
symbols = [sympy.Symbol(term) for term in dict_.get("params_ordering", [])]
return cls(
gate_name=dict_["gate_name"],
matrix=_matrix_from_json(dict_["matrix"], dict_.get("params_ordering", [])),
params_ordering=tuple(symbols),
)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.gate_name != other.gate_name:
return False
if self.params_ordering != other.params_ordering:
return False
if not _are_matrices_equal(self.matrix, other.matrix):
return False
return True
def _are_matrix_elements_equal(element, another_element):
"""Determine if two elements from gates' matrices are equal.
This is to be used in __eq__ method when comparing matrices elementwise.
Args:
element: first value to compare. It can be float, complex or some sympy expression.
another_element: second value to compare.
"""
difference = sympy.N(sympy.expand(element) - sympy.expand(another_element))
try:
return np.allclose(
float(sympy.re(difference)) + 1j * float(sympy.im(difference)), 0
)
except TypeError:
return False
def _are_matrices_equal(matrix, another_matrix):
return all(
_are_matrix_elements_equal(element, another_element)
for element, another_element in zip(matrix, another_matrix)
)
def _circuit_size_by_operations(operations):
return (
0
if not operations
else max(
qubit_index
for operation in operations
for qubit_index in operation.qubit_indices
)
+ 1
)
def _bind_operation(op: GateOperation, symbols_map) -> GateOperation:
return op.gate.bind(symbols_map)(*op.qubit_indices)
CIRCUIT_SCHEMA = SCHEMA_VERSION + "-circuit"
class Circuit:
"""ZQuantum representation of a quantum circuit."""
def __init__(
self,
operations: Optional[Iterable[GateOperation]] = None,
n_qubits: Optional[int] = None,
custom_gate_definitions: Optional[Iterable[CustomGateDefinition]] = None,
):
self._operations = list(operations) if operations is not None else []
self._n_qubits = (
n_qubits
if n_qubits is not None
else _circuit_size_by_operations(self._operations)
)
self._custom_gate_definitions = (
list(custom_gate_definitions) if custom_gate_definitions else []
)
@property
def operations(self):
"""Sequence of quantum gates to apply to qubits in this circuit."""
return self._operations
@property
def custom_gate_definitions(self):
return self._custom_gate_definitions
@property
def n_qubits(self):
"""Number of qubits in this circuit.
Not every qubit has to be used by a gate.
"""
return self._n_qubits
@property
def free_symbols(self):
"""Set of all the sympy symbols used as params of gates in the circuit."""
return reduce(
set.union,
(operation.gate.free_symbols for operation in self._operations),
set(),
)
def __eq__(self, other: "Circuit"):
if not isinstance(other, type(self)):
return False
if self.n_qubits != other.n_qubits:
return False
if list(self.operations) != list(other.operations):
return False
return True
def __add__(self, other: Union["Circuit"]):
return _append_to_circuit(other, self)
def bind(self, symbols_map: Dict[sympy.Symbol, Any]):
"""Create a copy of the current circuit with the parameters of each gate bound to
the values provided in the input symbols map
Args:
symbols_map: A map of the symbols/gate parameters to new values
"""
return type(self)(
operations=[_bind_operation(op, symbols_map) for op in self.operations],
n_qubits=self.n_qubits,
)
def to_dict(self):
"""Creates a dictionary representing a circuit.
The dictionary is serializable to JSON.
Returns:
A mapping with keys:
- "schema"
- "n_qubits"
- "symbolic_params"
- "gates"
"""
return {
"schema": CIRCUIT_SCHEMA,
"n_qubits": self.n_qubits,
**(
{
"operations": [
operation.to_dict() for operation in self.operations
],
}
if self.operations
else {}
),
**(
{
"custom_gate_definitions": [
gate_def.to_dict() for gate_def in self.custom_gate_definitions
]
}
if self.custom_gate_definitions
else {}
),
}
@classmethod
def from_dict(cls, dict_):
defs = [
CustomGateDefinition.from_dict(def_dict)
for def_dict in dict_.get("custom_gate_definitions", [])
]
return cls(
operations=[
_gate_operation_from_dict(op_dict, defs)
for op_dict in dict_.get("operations", [])
],
n_qubits=dict_["n_qubits"],
custom_gate_definitions=defs,
)
def __repr__(self):
return f"{type(self).__name__}(operations=[{', '.join(map(str, self.operations))}], n_qubits={self.n_qubits}, custom_gate_definitions={self.custom_gate_definitions})"
@singledispatch
def _append_to_circuit(other, circuit: Circuit):
raise NotImplementedError()
@_append_to_circuit.register
def _append_operation(other: GateOperation, circuit: Circuit):
n_qubits_by_operation = max(other.qubit_indices) + 1
return type(circuit)(
operations=[*circuit.operations, other],
n_qubits=max(circuit.n_qubits, n_qubits_by_operation),
)
@_append_to_circuit.register
def _append_circuit(other: Circuit, circuit: Circuit):
return type(circuit)(
operations=[*circuit.operations, *other.operations],
n_qubits=max(circuit.n_qubits, other.n_qubits),
)
|
python
|
import pygame, sys
from pygame.locals import *
import random
pygame.init() #initializing the pygame library
#creating sample questions
data = [
{
'question': 'Who is the president of America?',
'right-answer': 'Barack Obama',
'option1' : 'George Washington',
'option2' : 'Paul Kagame',
'option3' : 'Barack Obama'
},
{
'question': 'Who created Facebook?',
'right-answer': 'Mark Zuckeberg',
'option1': 'Bill Gates',
'option2': 'Mark Zuckeberg',
'option3': 'Steve Jobs'
},
{
'question': 'who is the richest person on earth?',
'right-answer': 'Bill Gates',
'option1': 'Bill Gates',
'option2': 'Jack Ma',
'option3': 'Peter Davinch'
},
{
'question': 'What is the capital of United Kingdom?',
'right-answer': 'London',
'option1': 'Manchester',
'option2': 'Arsenal',
'option3': 'London'
}
]
# preparing the surface
WINDOWWIDTH = 600
WINDOWHEIGHT = 500
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) #setting the main window size
pygame.display.set_caption('Smarter')
#storing colors to be used
BACKGROUNDCOLOR = (16, 78, 139)
WHITE = (255, 255, 255)
ORANGE = (255, 147, 51)
NAVYBLUE = (0, 0, 128)
YELLOW = (255, 221, 51)
#necessary variable constants
STARTED = False #controls the start of the game
STATE = 0 #stores the index of the first question
QUESTIONRECTWIDTH = None #holds the width of the Rectangle around the question
QUESTIONRECTHEIGHT = None # holds the height of the Rectangle around the question
WINDOWSPACING = 0 # determines the space to leave between the rectangle around the question and the main window
QUESTIONRECTANGLE = None #stores the drawn rectangle object around the question
OPTIONSLIST = [] #stores informations about the options
def drawingQuestion(textl):
""" This function draws the question to the screen """
global QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT, QUESTIONRECTANGLE, WINDOWSPACING
fontObj = pygame.font.SysFont('verdana', 20)
textSurfaceObj = fontObj.render(textl, True, WHITE)
textRectObj = textSurfaceObj.get_rect()
QUESTIONRECTWIDTH = textRectObj.width + 20 # the width of the text plus 20. we want the rectangle's width around the question to be greater than the question's width
QUESTIONRECTHEIGHT = textRectObj.height + 20 # same with the height, we want the height of the rectangle to be geater than the question height.
WINDOWSPACING = (WINDOWWIDTH - QUESTIONRECTWIDTH) / 2 # calculates the space to leave between the main window and the rectangle around the question.
QUESTIONRECTANGLE = pygame.Rect(WINDOWSPACING, 50, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
pygame.draw.rect(DISPLAYSURF, WHITE, QUESTIONRECTANGLE, 2)
DISPLAYSURF.blit(textSurfaceObj, ((QUESTIONRECTANGLE.topleft[0] + 10), (QUESTIONRECTANGLE.topleft[1] + 10)))
def drawingOptions(options):
""" This function draws the question's options to the screen. This function's parameter is a list."""
global QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT, OPTIONSLIST
current_height = 0 #this will helps to leave a space between the rectangles around the options.
counter = 0
random.shuffle(options) #randomly rearranging the question's options
for option in options:
fontObj = pygame.font.SysFont('verdana', 15)
optionSurfaceObj = fontObj.render(option, True, YELLOW)
optionRectObj = optionSurfaceObj.get_rect()
textwidth = optionRectObj.width
textheight = optionRectObj.height
spacing_width = (QUESTIONRECTWIDTH - textwidth) / 2 #calculating the width to leave between the rectangle around the option and the text.
spacing_height = (QUESTIONRECTHEIGHT - textheight) / 2 #calculating the height to leave between the rectangle around the option and the text.
if current_height == 0:
option_rectangle = pygame.Rect(5, 200, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
if counter == 0:
OPTIONSLIST.append({'option1':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter += 1
pygame.draw.rect(DISPLAYSURF, WHITE, option_rectangle, 1)
DISPLAYSURF.blit(optionSurfaceObj, ((option_rectangle.topleft[0] + spacing_width), (option_rectangle.topleft[1] + spacing_height)))
current_height = option_rectangle.bottomleft[1]
else:
current_height += 10
option_rectangle = pygame.Rect(5, current_height, QUESTIONRECTWIDTH, QUESTIONRECTHEIGHT)
if counter == 1:
OPTIONSLIST.append({'option2':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter += 1
else:
OPTIONSLIST.append({'option3':
{
'x': (option_rectangle.topleft[0], option_rectangle.topright[0]),
'y': (option_rectangle.topleft[1], option_rectangle.bottomleft[1]),
'rectangle' : option_rectangle,
'text' : option
}
})
counter = 0
# print option_rectangle
pygame.draw.rect(DISPLAYSURF, WHITE, option_rectangle, 1)
DISPLAYSURF.blit(optionSurfaceObj, ((option_rectangle.topleft[0] + spacing_width), (option_rectangle.topleft[1] + spacing_height)))
current_height = option_rectangle.bottomleft[1]
#the game loop
while True:
if STARTED == False:
DISPLAYSURF.fill(BACKGROUNDCOLOR)
drawingQuestion(data[STATE]['question']) #drawing the question on the screen
drawingOptions([data[STATE]['option1'], data[STATE]['option2'], data[STATE]['option3']]) #drawing the options on the screen
STARTED = True
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
|
python
|
# example of setting up, running, and graphing landau damping in 3D
import os
import py_platypus as plat
from py_platypus.utils.params import Parameters as Parameters
from py_platypus.vis.plotter import Plotter as Plotter
if __name__ == "__main__":
# override default parameters
params = {
"dimensions": 3,
"nppc": 4,
"landau": { # defaults for Landau damping
"amplitude": 0.8,
"mode": 3 # number of density peaks
},
"print_every": 1, # print current step at every step
"save_every": 1, # save data at every time step
"runtime": 1
}
# set up and run Landau damping simulation in 2D
plat.run_sim.landau("landau_3d", 3,
param_dict=params
)
# load parameters from a json file
param_json = os.path.join(plat.PLATYPUS_HOME, "py_platypus/out/landau_3d/params.json")
params = Parameters(3, load_file=param_json)
# create instance of plotting class and plot the energy
# for the three dimension case, only the energy can be plotted
plotter = Plotter("landau_3d", params)
plotter.plot_energy()
|
python
|
#from app import Util
import aiml
import sys
def formatOutPut(text):
text = text.replace('\\t','\t')
text = text.replace('\\n','\n')
return text
def stripCommand(text):
if(text[0] == '/'):
return text[1:]
return text
def main():
#create and configurate bot knowledbase
ctfbot = aiml.Kernel()
ctfbot.learn("resources/knowledgebase.aiml")
while True:
question = stripCommand(input("> "))
if question == 'quit':
return 0
response = ctfbot.respond(question)
# print( Util.formatOutPut(response))
print(formatOutPut(response))
if __name__ == '__main__':
sys.exit(int(main() or 0))
|
python
|
from trackstats.models import Domain, Metric
# Domains
Domain.objects.INVESTMENT = Domain.objects.register(
ref='investment',
name='investment'
)
# Metrics, these are associated with a domain
Metric.objects.INVESTMENT_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_count',
name='Number of investments in the system')
Metric.objects.INVESTMENT_WON_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_won_count',
name='Number of investments in the system at stage won')
Metric.objects.INVESTMENT_VERIFY_WIN_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_verify_win_count',
name='Number of investments in the system at stage verify win')
Metric.objects.INVESTMENT_ACTIVE_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_active_count',
name='Number of investments in the system at stage active')
Metric.objects.INVESTMENT_PIPELINE_COUNT = Metric.objects.register(
domain=Domain.objects.INVESTMENT,
ref='investment_pipeline_count',
name='Number of investments in the system not at stage verify win or win')
|
python
|
import logging
import json
import transaction
from pyramid.view import view_config
from pyramid.request import Response
__author__ = 'max'
log = logging.getLogger(__name__)
MODULE_DIR = "newsbomb_recommends.views"
@view_config(route_name='generic', renderer='json')
def api(request):
try:
module = request.matchdict["module"]
method = request.matchdict["method"]
if request.method == "GET":
params = json.loads(request.params.get("params", "{}"))
elif request.method == "POST":
params = request.params.mixed()
log.warning("params:%s" % params)
# Allow cross domain call for AJAX
request.response = Response()
request.response.headerlist = []
request.response.headerlist.extend(
(
('Access-Control-Allow-Origin', '*'),
('Content-Type', 'application/json; charset=UTF-8')
)
)
module_path = "%s.%s" % (MODULE_DIR, module)
# import module
mod = __import__(module_path)
components = module_path.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
func = getattr(mod, method, None)
if func:
result = None
with transaction.manager:
result = func(**params)
if method == "verify_email":
return result
return {"status": "success", "data": result}
else:
return {"status": "error", "data": ["No such method: %s." % method]}
# except ValidationError, ex:
# return {"status": "error", "data": [ex.message]}
except Exception, ex:
log.exception("%s, %s, %s" % (module, method, ex))
return {"status": "exception", "data": [ex.message]}
#EOF
|
python
|
"""
Define the abstract Game class for providing a structure/ interface for agent environments.
Notes:
- Base implementation done.
- Documentation 15/11/2020
"""
from abc import ABC, abstractmethod
import typing
import numpy as np
from utils.game_utils import GameState
class Game(ABC):
"""
This class specifies the base Game class. To define your own game, subclass this class and implement the
functions below. This works when the game is either single-player or two-player/ adversarial. Note that
the implementations of this class have to be stateless, all state information can be stored in GameState objects.
Optionally, one can also subclass Gym.Env for single-player games, and make use of the existing logic in
Games/gym/GymGame.py or Games/atari/AtariGame.py.
See Games/gym/GymGame.py for an example implementation of a single-player game.
See Games/hex/HexGame.py for an example implementation of a two-player game.
"""
def __init__(self, n_players: int = 1) -> None:
"""
Initialize the base variables for the Game class.
:param n_players: int The number of players/ adversaries within the implementation of Game (either 1 or 2)
:raise NotImplementedError: Error raised for n_players larger than 2.
"""
self.n_players = n_players
self.n_symmetries = 1
if self.n_players > 2:
raise NotImplementedError(f"Environments for more than 2 agents are not yet supported, {n_players} > 2")
@abstractmethod
def getInitialState(self) -> GameState:
"""
Initialize the environment and get the root-state wrapped in a GameState data structure.
:return: GameState Data structure containing the specifics of the current environment state.
"""
@abstractmethod
def getDimensions(self) -> typing.Tuple[int, ...]:
"""
Get the raw observation dimensions visible for a learning algorithm.
:return: tuple of integers representing the dimensions of observation data.
"""
@abstractmethod
def getActionSize(self) -> int:
"""
Get the number of atomic actions in the environment.
:return: int The number of atomic actions in the environment.
"""
@abstractmethod
def getNextState(self, state: GameState, action: int, **kwargs) -> typing.Tuple[GameState, float]:
"""
Perform an action in the environment and observe the transition and reward.
:param state: GameState Data structure containing the specifics of the current environment state.
:param action: int Integer action to perform on the environment.
:return: tuple containing the next environment state in a GameState object, along with a float reward.
"""
@abstractmethod
def getLegalMoves(self, state: GameState) -> np.ndarray:
"""
Determine the legal moves at the provided environment state.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: np.ndarray Array of length |action_space| with 0s for illegal and 1s for legal moves.
"""
@abstractmethod
def getGameEnded(self, state: GameState, **kwargs) -> typing.Union[float, int]:
"""
Determine whether the given state is a terminal state.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: float or int Always returns 0 until the game ends, then a terminal reward is returned.
"""
@abstractmethod
def buildObservation(self, state: GameState) -> np.ndarray:
"""
Compute some representation of the GameState, to be used as the input of a neural network.
:param state: GameState Data structure containing the specifics of the current environment state.
:return: np.ndarray Some game-specific representation of the current environment state.
"""
@abstractmethod
def getSymmetries(self, state: GameState, pi: np.ndarray) -> typing.List:
"""
@DEPRECATED: future will replace state with GameHistory to get symmetries over observation trajectories.
Compute every possible symmetry of the provided environment state with correctly oriented pi-vectors.
:param state: GameState Data structure containing the specifics of the current environment state.
:param pi: np.ndarray Raw move probability vector of size |action-space|.
:return: A list of the form [(state, pi)] where each tuple is a symmetrical form of the state and the
corresponding pi vector. This can be used for diversifying training examples.
"""
@abstractmethod
def getHash(self, state: GameState) -> typing.Union[str, bytes, int]:
"""
Compute a hashable representation of the provided environment state, h: StateSpace -> Universe
:param state: GameState Data structure containing the specifics of the current environment state.
:return: Some hashable datatype representing the provided GameState.
"""
def close(self, state: GameState) -> None:
"""
Clean up necessary variables within the environment/ class. If any.
:param state: GameState Data structure containing the specifics of the current environment state.
"""
def render(self, state: GameState):
"""
Base method for generating a visual rendering of the game implementation.
:param state: GameState Data structure containing the specifics of the current environment state.
:raises NotImplementedError: Error raised if the child class did not implement a rendering method.
"""
raise NotImplementedError(f"Render method not implemented for Game: {self}")
|
python
|
x="There are %d types of people." % 10
binary="binary"
do_not="dont't"
y="Those who know %s and those who %s." % (binary,do_not)
print(x)
print(y)
print("I said: %r" % x)
print("I also said: '%s'." % y)
hilarious=False
joke_evaluation="Isn't that joke so funny?! %r"
print(joke_evaluation % hilarious)
w="This is the left side of..."
e="a string with a right side"
print(w+e)
|
python
|
# -*- encoding: utf-8 -*-
from bs4 import Tag
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from scrapers.helpers import jsonify_seminars
#########################################################
# This part will seriously depend on the structure of the LAS page
def get_date_time_start_end(data, _time):
year = " {} ".format(datetime.now().year)
start = parse(data + year + _time)
end = start + relativedelta(hours=+1)
return start, end
def get_seminar_info(seminar):
try:
speaker = seminar('b')[0].getText().strip()
if speaker != 'NO SEMINAR':
title = seminar('i')[1].getText().strip()
abstract = "<br/> ".join(map(lambda s: s.getText().strip(),
seminar('i')[2:]))
return (speaker + ' - ' + title, abstract +
"<br/><br/><i>There will be tea in room 606 from 4:15-4:45pm</i>.")
except (IndexError, TypeError, AttributeError):
# log the error
pass
return None, None
def clean_couple(couple):
start, end = get_date_time_start_end(couple[0].string, "4:45 pm")
location = "UCL, first floor of 25 Gordon Street, room D103"
# TODO: Get both using regexp instead of creating this horrible warning
if couple[1].getText().count('Title') > 1:
start = start + relativedelta(hours=-1, minutes=-30)
title = "WARN: two seminars"
description = "There are probably two seminars. <a href='http://www.homepages.ucl.ac.uk/~ucahsze/seminars.html' target='_blank'>Click here for additional informations</a>."
else:
title, description = get_seminar_info(couple[1])
if title is not None:
seminar = {
'start': start,
'end': end,
'title': title,
'description': description,
'location': location
}
else:
seminar = None
return seminar
def admissible_couples(couple):
return (type(couple[0]) == Tag) and (couple[0].name == "dt") and (
type(couple[1]) == Tag) and (couple[1].name == "dd")
def get_event_list(soup):
data = soup.dl.contents
couples = filter(admissible_couples, ((data[i], data[i + 1])
for i in range(len(data) - 2)))
# We can accept empty abstracts but not empty titles
events = filter(lambda ev: ev is not None, map(clean_couple, couples))
return events
def get_nts(last_update=None):
"""Number Theory Seminar"""
return jsonify_seminars(
"http://www.homepages.ucl.ac.uk/~ucahsze/seminars.html", get_event_list,
last_update=last_update)
|
python
|
#============================== IBM Code Challenge =============================
# mat_gui.py
#
# Creates GUI for user to interact with
#
# Description:
# This generates a GUI that allows the user to enter two matrices, select
# a mathematical operation, and displays the operation result to the user.
# The class implements the __init__ function to construct the GUI and breaks
# each component out into a new function to help compartmentalize the
# construction.
#
# Todo:
# Add save/restore callback and link to menu items
#===============================================================================
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import re
import pdb
from mat_widgets import *
from mat_operation import *
class MatOpGUI(QMainWindow):
#===========================================================================
# Define class constants
#===========================================================================
# Define the list of mathematical operations the user can perform on the
# two operations.
OPERATIONS = ['Multiply',
'Sum of Column of Product',
'Product of Column of Product',
'Cumulative Sum Along Column of Product',
'Cumulative Product Along Column of Product',
'Sum of Row of Product',
'Product of Row of Product',
'Cumulative Sum Along Row of Product',
'Cumulative Product Along Row of Product',
'Min of Product',
'Max of Product',
'Mean of Product',
'Median of Product',
'Total Sum of Product',
'Total Product of Product']
# Define the operations that, when selected, will cause the operations
# selection row/column entry field to appear to the user.
OPS_TO_MAKE_ENTRY_VISIBLE = ['Sum of Column of Product',
'Product of Column of Product',
'Sum of Row of Product',
'Product of Row of Product']
# Define the operations that will act on a row of the resultant matrix.
# This will be used to determine the placeholder text of a line edit field
# for entering a row/column, to help the user.
OPS_ON_ROW = ['Sum of Row of Product',
'Product of Row of Product']
#===========================================================================
# Initialization function
#===========================================================================
def __init__(self):
"""
Initialization function for the MatOpGUI class. This will construct the
GUI, primarily through the __createGUI method, and display it to the user.
"""
# Call the super class init function to make sure this generates properly
super().__init__()
# -- Define Instance Variables -----------------------------------------
# Define variables for GUI properties
self.__fontFamily = 'Calibri'
self.__fontColor = QColor(250,250,250)
self.__guiColor = QColor(162, 62, 72) # Official GUI color
# Define counter for the number of operations performed
self.__opCounter = 0
# -- Set Window Properties ---------------------------------------------
self.setAcceptDrops(True)
self.setWindowTitle('Matrix Operations')
self.resize(800,400)
self.setWindowIcon(QIcon('imgs/icon.png'))
# -- Create and Show the GUI -------------------------------------------
# Create and show the GUI
self.__createGUI()
self.show()
def frame(func):
"""
Wrapper function for creating frames in a widget. This allows a function
to simply add content to a frame without handling the process of creating
grids and frames. This will automatically use a QGridLayout.
"""
def wrapper(self, pos, *args, grid = None, gridMargin = 0, gridSpacing = 0,
bgcolor = QColor(255,255,255,255), frameShape = None,
frameShadow = None, lineWidth = 0, **kwargs):
# Create the QFrame and QGridLayout
kwargs['frame'] = QFrame(frameShape = frameShape, frameShadow = frameShadow, lineWidth = lineWidth)
kwargs['grid'] = QGridLayout(margin = gridMargin, spacing = gridSpacing)
# Call the wrapped function
func(self, *args, **kwargs)
# Set the background color of the frame
kwargs['frame'].setAutoFillBackground(True)
p = kwargs['frame'].palette()
p.setColor(kwargs['frame'].backgroundRole(), bgcolor)
kwargs['frame'].setPalette(p)
# Set the grid created in this function as the frame's layout and
# add the frame to the parent's grid at the position provided
kwargs['frame'].setLayout(kwargs['grid'])
if grid is not None:
grid.addWidget(kwargs['frame'], *pos)
else:
self.grid.addWidget(kwargs['frame'], *pos)
# Return the frame
return kwargs['frame']
return wrapper
#===========================================================================
# Level 0: Top level GUI creation and menu bar
#===========================================================================
def __createGUI(self):
"""
Highest level function used to create the GUI components
"""
# -- Define Top-level Components ---------------------------------------
self.widget = QWidget(self) # The central widget in the main window
self.grid = QGridLayout() # The layout manager of the central widget
self.__createMenuBar() # The menu bar
# -- Main Gui Components -----------------------------------------------
self.__headerBar = self.__createHeaderBar((0,0), gridMargin = 5, gridSpacing = 15, bgcolor = self.__guiColor)
self.__contentFrame = self.__createContentFrame((1,0), gridMargin = 5, gridSpacing = 5)
# -- Setup the Grid ----------------------------------------------------
self.grid.setContentsMargins(0,0,0,0)
self.grid.setSpacing(0)
self.grid.setRowStretch(1,1)
# -- Set the Main Widget Properties ------------------------------------
self.widget.setLayout(self.grid)
self.setCentralWidget(self.widget)
def __createMenuBar(self):
"""
Creates the menu bar of the GUI and adds various menus and items to
perform tasks.
"""
# Use the PyQt menu construct. This is particularly important for Macs
# because it will keep the menubar with the GUI window rather than
# placing it at the top of the screen, as is usual for Macs. We don't
# want this to happen because Macs take control of the menus if you have
# it up there and can cause unexpected results.
self.menuBar().setNativeMenuBar(False)
# -- File Menu ---------------------------------------------------------
fileMenu = self.menuBar().addMenu('File')
fileMenu.setTearOffEnabled(True)
# Save Menu Item
saveMenuItem = QAction('Save', fileMenu, shortcut = 'Ctrl+S')
saveMenuItem.triggered.connect(self.__save)
fileMenu.addAction(saveMenuItem)
# Load Menu Item
loadMenuItem = QAction('Load', fileMenu, shortcut = 'Ctrl+L')
loadMenuItem.triggered.connect(self.__askForFileAndLoad)
fileMenu.addAction(loadMenuItem)
# -- Options Menu ------------------------------------------------------
optionsMenu = self.menuBar().addMenu('Options')
optionsMenu.setTearOffEnabled(True)
# Clear Menu Item
clearMenuItem = QAction('Clear All', optionsMenu, shortcut = 'Ctrl+A')
clearMenuItem.triggered.connect(self.__clearAll)
optionsMenu.addAction(clearMenuItem)
optionsMenu.addSeparator()
# Quit Menu Item
quitMenuItem = QAction('Quit', optionsMenu, shortcut = 'Ctrl+Q')
quitMenuItem.triggered.connect(self.close)
optionsMenu.addAction(quitMenuItem)
#===========================================================================
# Level 1: Header and Main Content Frame
#===========================================================================
@frame
def __createHeaderBar(self, *args, **kwargs):
"""
Create the large header bar at the top of the GUI. This just adds a nice,
convenient banner at the top for branding.
"""
# Create the Matrix Operations Label, configure it, and add it to the grid
matOpLabel = QLabel('Matrix Operations')
configureQLabel(matOpLabel, font = self.__fontFamily, font_size = 20,
font_color = self.__fontColor, alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(matOpLabel, 0, 1)
@frame
def __createContentFrame(self, *args, **kwargs):
"""
Create the main content of the GUI. This is a second frame below the header.
This calls several sub-functions that create specific elements of the main
content frame.
"""
# Create the frame at the top for entering the name of the run
runNameFrame = self.__createRunNameFrame(
(0,0,1,2), grid = kwargs['grid'], gridmargin = 5, gridSpacing = 5,
)
# Set the tool tip for this frame to help the user out
runNameFrame.setToolTip('Optionally choose a name for your run.')
# -- Create Matrix Input Frames ----------------------------------------
# Create the two frames which allows the user to input the two matrices
self.__matrixAFrame = self.__createMatrixAInputFrame(
(1,0), grid = kwargs['grid'], gridMargin = 5, gridSpacing = 5,
frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0,
)
self.__matrixBFrame = self.__createMatrixBInputFrame(
(1,1), grid = kwargs['grid'], gridMargin = 5, gridSpacing = 5,
frameShape = QFrame.StyledPanel, frameShadow = QFrame.Sunken, lineWidth = 0,
)
# Set the tool tips for this frame to help the user out.
self.__matrixAFrame.setToolTip((
'Enter values for Matrix A here. You can change the matrix size to\n'
'a max of 10x10 and also randomly generate values for the matrix.'
))
self.__matrixBFrame.setToolTip((
'Enter values for Matrix B here. You can change the matrix size to\n'
'a max of 10x10 and also randomly generate values for the matrix.'
))
# -- Create Operation Selection Frame ----------------------------------
# Create the frame below the two matrices for selecting the matrix
# operation to perform
opSelectFrame = self.__createOperationSelectionFrame(
(2,0,1,2), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 5
)
# Set the tool tip for this frame to help the user out
opSelectFrame.setToolTip((
'Select an operation to perform from the dropdown list. Some\n'
'operations act on a single row/column of a matrix. Hit Go!\n'
'to perform the operation.'
))
# -- Create Output Text Box --------------------------------------------
# Create the output text box.
self.__outputTextBox = QTextEdit()
# Make it so user's can't modify the text
self.__outputTextBox.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.TextSelectableByKeyboard)
# Do not allow text wrapping (to prevent the output from becoming too
# confusing).
self.__outputTextBox.setLineWrapMode(QTextEdit.NoWrap)
# Update the font to a monospaced font.
font = self.__outputTextBox.currentFont()
font.setPointSize(8)
font.setFamily('courier')
self.__outputTextBox.setFont(font)
# Make it a least 400 pixels tall
self.__outputTextBox.setMinimumHeight(400)
# Make the text box initially invisible until data needs to be displayed
# so as not to confuse the user.
self.__outputTextBox.setVisible(False)
# Add to the grid
kwargs['grid'].addWidget(self.__outputTextBox, 3, 0, 1, 2)
# -- Set Grid Properties -----------------------------------------------
kwargs['grid'].setRowStretch(1, 1)
kwargs['grid'].setColumnStretch(0, 1)
kwargs['grid'].setColumnStretch(1, 1)
#===========================================================================
# Level 2: Name, Matrix, Options, and Output Frames
#===========================================================================
@frame
def __createRunNameFrame(self, *args, **kwargs):
"""
Create the frame which allows users to enter the name of the run
"""
# Create the QLabel giving direction to the user
kwargs['grid'].addWidget(QLabel('Name Your Run'), 0, 0)
# Create the line edit for the user to enter the name
self.__nameLineEdit = QLineEdit()
self.__nameLineEdit.setPlaceholderText('Enter run name...')
kwargs['grid'].addWidget(self.__nameLineEdit, 0, 1)
# Set grid properties
kwargs['grid'].setColumnStretch(1, 1)
@frame
def __createMatrixAInputFrame(self, *args, **kwargs):
"""
Create the input frame for defining Matrix A. This has a label at the
top demarking this as Matrix A. It has a sub-frame for changing the size
of the frame, a table for defining the matrix, and a sub-frame for choosing
to randomly generate the matrix.
"""
# Create the label at the top of this frame, labeling this as Matrix A
sectionLabel = QLabel('Matrix A')
configureQLabel(sectionLabel, font = self.__fontFamily, font_size = 16,
alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(sectionLabel, 0, 0)
# Create section for specifying the matrix size
self.__createMatrixASizeFrame(
(1,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Create section for inputing the matrix. Default to a 3x3 matrix.
self.__matrixAInputTable = QTableWidget(3, 3)
font = self.__matrixAInputTable.horizontalHeader().font()
font.setWeight(QFont.Bold)
self.__matrixAInputTable.setAlternatingRowColors(True)
self.__matrixAInputTable.horizontalHeader().setFont(font)
self.__matrixAInputTable.verticalHeader().setFont(font)
for row in range(3):
for col in range(3):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
kwargs['grid'].addWidget(self.__matrixAInputTable, 2, 0)
# Create section for random matrix generation
self.__createMatrixARandFrame(
(3,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Set the grid properties|
kwargs['grid'].setRowStretch(2,1)
@frame
def __createMatrixBInputFrame(self, *args, **kwargs):
"""
Create the input frame for defining Matrix B. This has a label at the
top demarking this as Matrix B. It has a sub-frame for changing the size
of the frame, a table for defining the matrix, and a sub-frame for choosing
to randomly generate the matrix.
"""
# Create the label at the top of this frame, labeling this as Matrix B
sectionLabel = QLabel('Matrix B')
configureQLabel(sectionLabel, font = self.__fontFamily, font_size = 16,
alignment = Qt.AlignCenter)
kwargs['grid'].addWidget(sectionLabel, 0, 0)
# Create section for specifying the matrix size
self.__createMatrixBSizeFrame(
(1,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Create section for inputing the matrix
self.__matrixBInputTable = QTableWidget(3, 3)
font = self.__matrixBInputTable.horizontalHeader().font()
font.setWeight(QFont.Bold)
self.__matrixBInputTable.setAlternatingRowColors(True)
self.__matrixBInputTable.horizontalHeader().setFont(font)
self.__matrixBInputTable.verticalHeader().setFont(font)
for row in range(3):
for col in range(3):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
kwargs['grid'].addWidget(self.__matrixBInputTable, 2, 0)
# Create section for random matrix generation
self.__createMatrixBRandFrame(
(3,0), grid = kwargs['grid'], gridMargin = 0, gridSpacing = 0
)
# Set the grid properties|
kwargs['grid'].setRowStretch(2,1)
@frame
def __createOperationSelectionFrame(self, *args, **kwargs):
"""
Create the frame which allows the user select the math operation to
perform.
"""
kwargs['grid'].addWidget(QLabel('Select the Operation:'), 2, 0)
# Create the dropdown list of operations
self.__opSelectComboBox = QComboBox()
self.__opSelectComboBox.addItems(MatOpGUI.OPERATIONS)
self.__opSelectComboBox.currentIndexChanged.connect(self.__opSelectChanged)
kwargs['grid'].addWidget(self.__opSelectComboBox, 2, 1)
# Create the row/column entry field, for operations which return a
# result from just a single column/row. This will be where the user
# enters the row/column the return the result from. This will intially
# be invisible as the default matrix operation is to multiply the two
# together, which does not require this widget to exist. When an operation
# is selected that will require this widget, it will be shown to the user.
self.__opEntryField = QLineEdit()
self.__opEntryField.setVisible(False)
kwargs['grid'].addWidget(self.__opEntryField, 2, 2)
# Create the Go! button
self.__goButton = QPushButton('Go!')
self.__goButton.clicked.connect(self.__goButtonClicked)
kwargs['grid'].addWidget(self.__goButton, 2, 3)
# Set the grid properties
kwargs['grid'].setColumnStretch(1,1)
#===========================================================================
# Level 3: Matrix Size and Random Generation Collapsable Frames
#===========================================================================
# == Matrix A ==============================================================
@frame
def __createMatrixASizeFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to change
the size of the matrix. This is just a text box for entering both the row
and column and a button to change the size.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixASizeCollapsable = CollapsableSection('Matrix Size', True)
# Create the row size entry
self.__matrixARowSize = QLineEdit('3')
self.__matrixARowSize.setMaximumWidth(30)
self.__matrixARowSize.setPlaceholderText('Row')
self.__matrixASizeCollapsable.addWidget(self.__matrixARowSize, 0, 0)
# Create the 'X' label
self.__matrixASizeCollapsable.addWidget(QLabel('X'), 0, 1)
# Create the col size entry
self.__matrixAColSize = QLineEdit('3')
self.__matrixAColSize.setMaximumWidth(30)
self.__matrixAColSize.setPlaceholderText('Col')
self.__matrixASizeCollapsable.addWidget(self.__matrixAColSize, 0, 2)
# Create the Set Size button
self.__matrixASizeButton = QPushButton('Set Size')
self.__matrixASizeButton.clicked.connect(self.__matrixASetSizeClicked)
self.__matrixASizeCollapsable.addWidget(self.__matrixASizeButton, 0, 3)
# Set the grid properties
self.__matrixASizeCollapsable.setColumnStretch(4,1)
kwargs['grid'].addWidget(self.__matrixASizeCollapsable, 1, 0)
@frame
def __createMatrixARandFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to randomly
populate the matrix. The collapsable section has a section for defining
the range to use and for selecting to generate either decimals or integers.
Finally there's a button to actually generate the matrix content.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixARandCollapsable = CollapsableSection('Random Generation', True)
# -- Create range section ----------------------------------------------
self.__matrixARandCollapsable.addWidget(QLabel('Range:'), 0, 0)
# Create the minimum line edit
self.__matrixAMinRandRange = QLineEdit('0.0')
self.__matrixAMinRandRange.setMaximumWidth(50)
self.__matrixAMinRandRange.setPlaceholderText('min')
self.__matrixARandCollapsable.addWidget(self.__matrixAMinRandRange, 0, 1)
# Create the '-' label
self.__matrixARandCollapsable.addWidget(QLabel('-', alignment = Qt.AlignCenter), 0, 2)
# Create the maximum line edit
self.__matrixAMaxRandRange = QLineEdit('1.0')
self.__matrixAMaxRandRange.setMaximumWidth(50)
self.__matrixAMaxRandRange.setPlaceholderText('max')
self.__matrixARandCollapsable.addWidget(self.__matrixAMaxRandRange, 0, 3, 1, 2)
# -- Create number type section ----------------------------------------
self.__matrixARandCollapsable.addWidget(QLabel('Type:'), 1, 0)
# Create the button group for the number type radio buttons
self.__matrixARandButtonGroup = QButtonGroup()
# Create the 'decimal' radio button
decimalButton = QRadioButton('Decimal')
decimalButton.setChecked(True)
self.__matrixARandButtonGroup.addButton(decimalButton, 0)
self.__matrixARandCollapsable.addWidget(decimalButton, 1, 1, 1, 3)
# Create the 'integer' radio button
integerButton = QRadioButton('Integer')
self.__matrixARandButtonGroup.addButton(integerButton, 1)
self.__matrixARandCollapsable.addWidget(integerButton, 1, 4, 1, 1)
# -- Create generation button ------------------------------------------
self.__matrixARandGenButton = QPushButton('Generate')
self.__matrixARandGenButton.clicked.connect(self.__matrixARandGenClicked)
self.__matrixARandCollapsable.addWidget(self.__matrixARandGenButton, 2, 0, 1, 5)
# Set the grid properties
self.__matrixARandCollapsable.setColumnStretch(5, 1)
kwargs['grid'].addWidget(self.__matrixARandCollapsable, 3, 0)
# == Matrix B ==============================================================
def __createMatrixBSizeFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to change
the size of the matrix. This is just a text box for entering both the row
and column and a button to change the size.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixBSizeCollapsable = CollapsableSection('Matrix Size', True)
# Create the row size entry
self.__matrixBRowSize = QLineEdit('3')
self.__matrixBRowSize.setMaximumWidth(30)
self.__matrixBRowSize.setPlaceholderText('Row')
self.__matrixBSizeCollapsable.addWidget(self.__matrixBRowSize, 0, 0)
# Create the 'X' label
self.__matrixBSizeCollapsable.addWidget(QLabel('X'), 0, 1)
# Create the col size entry
self.__matrixBColSize = QLineEdit('3')
self.__matrixBColSize.setMaximumWidth(30)
self.__matrixBColSize.setPlaceholderText('Col')
self.__matrixBSizeCollapsable.addWidget(self.__matrixBColSize, 0, 2)
# Create the Set Size button
self.__matrixBSizeButton = QPushButton('Set Size')
self.__matrixBSizeButton.clicked.connect(self.__matrixBSetSizeClicked)
self.__matrixBSizeCollapsable.addWidget(self.__matrixBSizeButton, 0, 3)
# Set the grid properties
self.__matrixBSizeCollapsable.setColumnStretch(4,1)
kwargs['grid'].addWidget(self.__matrixBSizeCollapsable, 1, 0)
@frame
def __createMatrixBRandFrame(self, *args, **kwargs):
"""
Create a frame with a collapsable section for allowing the user to randomly
populate the matrix. The collapsable section has a section for defining
the range to use and for selecting to generate either decimals or integers.
Finally there's a button to actually generate the matrix content.
"""
# Create a collapsable section to add the various widgets to. This will
# make the GUI output a bit cleaner and only show this to the user if
# they need to see it.
self.__matrixBRandCollapsable = CollapsableSection('Random Generation', True)
# -- Create range section ----------------------------------------------
self.__matrixBRandCollapsable.addWidget(QLabel('Range:'), 0, 0)
# Create the minimum line edit
self.__matrixBMinRandRange = QLineEdit('0.0')
self.__matrixBMinRandRange.setMaximumWidth(50)
self.__matrixBMinRandRange.setPlaceholderText('min')
self.__matrixBRandCollapsable.addWidget(self.__matrixBMinRandRange, 0, 1)
# Create the '-' label
self.__matrixBRandCollapsable.addWidget(QLabel('-', alignment = Qt.AlignCenter), 0, 2)
# Create the maximum line edit
self.__matrixBMaxRandRange = QLineEdit('1.0')
self.__matrixBMaxRandRange.setMaximumWidth(50)
self.__matrixBMaxRandRange.setPlaceholderText('max')
self.__matrixBRandCollapsable.addWidget(self.__matrixBMaxRandRange, 0, 3, 1, 2)
# -- Create number type section ----------------------------------------
self.__matrixBRandCollapsable.addWidget(QLabel('Type:'), 1, 0)
# Create the button group for the number type radio buttons
self.__matrixBRandButtonGroup = QButtonGroup()
# Create the 'decimal' radio button
decimalButton = QRadioButton('Decimal')
decimalButton.setChecked(True)
self.__matrixBRandButtonGroup.addButton(decimalButton, 0)
self.__matrixBRandCollapsable.addWidget(decimalButton, 1, 1, 1, 3)
# Create the 'integer' radio button
integerButton = QRadioButton('Integer')
self.__matrixBRandButtonGroup.addButton(integerButton, 1)
self.__matrixBRandCollapsable.addWidget(integerButton, 1, 4, 1, 1)
# -- Create generation button ------------------------------------------
self.__matrixBRandGenButton = QPushButton('Generate')
self.__matrixBRandGenButton.clicked.connect(self.__matrixBRandGenClicked)
self.__matrixBRandCollapsable.addWidget(self.__matrixBRandGenButton, 2, 0, 1, 5)
# Set the grid properties
self.__matrixBRandCollapsable.setColumnStretch(5, 1)
kwargs['grid'].addWidget(self.__matrixBRandCollapsable, 3, 0)
#===========================================================================
# Widget Callbacks and Events
#===========================================================================
def dragEnterEvent(self, event):
"""Callback for a drag enter event"""
# If something was dragged into this window, set it as a move event
event.setDropAction(Qt.MoveAction)
# If the event has a URL to a file, check if only one file is being dropped
# in and that file has a .matop extension. If it meets those conditions,
# accept it, otherwise, ignore it.
if event.mimeData().hasUrls():
if len(event.mimeData().urls()) > 1:
event.ignore()
elif not event.mimeData().urls()[0].toLocalFile().endswith('.matop'):
event.ignore()
else:
event.accept()
# Ignore everything else
else:
event.ignore()
def dropEvent(self, event):
"""Callback for file drop event to load a file"""
for url in event.mimeData().urls():
filename = url.toLocalFile()
self.__load(filename)
def __save(self):
"""Callback for saving the output data"""
# Ask for the file to save to
outfile, _ = QFileDialog.getSaveFileName(self, 'Select a file to save to', QDir.currentPath(), 'MatOp (*.matop)')
# If a file was provided, grab all the text from the output text area and
# write it to that file.
if outfile:
with open(outfile, 'w') as file:
file.write(self.__outputTextBox.toPlainText())
def __askForFileAndLoad(self):
"""Callback for loading from a file, after asking the user for the file"""
# Ask for the file to load from
filename, _ = QFileDialog.getOpenFileName(self, 'Select a file to load', QDir.currentPath(), 'MatOp (*.matop)')
if filename:
self.__load(filename)
def __load(self, filename):
"""Callback for loading from a file, given one is provided"""
# Load the file's content
with open(filename, 'r') as file:
content = file.readlines()
content = ''.join(content)
# Set the textbox output to the loaded content
self.__outputTextBox.setText(content)
# Now use regex to scan through the content and figure out the operation
# counter, so it can be set.
matches = re.findall('Operation (?P<counter>\d+)', content)
self.__opCounter = max(map(int, matches)) if matches else 0
# And finally, set the textbox output to visible
self.__outputTextBox.setVisible(True)
def __clearAll(self):
"""
Callback for clearing all the input/output of the GUI. This is connected
to the "Clear All" menu item.
"""
# Clear the table for Matrix A. This is done by removing all rows/columns,
# setting them to the correct amount, then redefining the widget items in
# the table.
rowNum = self.__matrixAInputTable.rowCount()
colNum = self.__matrixAInputTable.columnCount()
self.__matrixAInputTable.setRowCount(0)
self.__matrixAInputTable.setRowCount(rowNum)
self.__matrixAInputTable.setColumnCount(0)
self.__matrixAInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
# Clear the table for Matrix B in the same way as Matrix A.
rowNum = self.__matrixBInputTable.rowCount()
colNum = self.__matrixBInputTable.columnCount()
self.__matrixBInputTable.setRowCount(0)
self.__matrixBInputTable.setRowCount(rowNum)
self.__matrixBInputTable.setColumnCount(0)
self.__matrixBInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
# Clear out the output text box and set the operation counter to zero again.
self.__outputTextBox.setText('')
self.__opCounter = 0
def __opSelectChanged(self):
"""
Callback for when the user has selected a new math operation to perform
from the dropdown list. This exists because for some operations, the user
needs to add a row or column to perform the operation on. The text box
for entering this should only be displayed when it is necessary.
"""
# Check if the new selection is in the operations that makes the entry
# field appear. If it is, set it as visible, then set the placeholder
# text to the appropriate text directing them to input a row or a column
# as appropriate. Otherwise, just make the entry field invisible.
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_TO_MAKE_ENTRY_VISIBLE:
self.__opEntryField.setVisible(True)
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW:
self.__opEntryField.setPlaceholderText('Enter a row...')
else:
self.__opEntryField.setPlaceholderText('Enter a column...')
else:
self.__opEntryField.setVisible(False)
# Finally, clear the entry field so they can see the placeholder text and
# to reset the field.
self.__opEntryField.clear()
def __goButtonClicked(self):
"""
Callback to execute when the Go! button is clicked to perform the mathematical
operation. A variety of error checking is performed that may result in early
termination of this method. In every case where the function returns early,
it will output a messagebox to the user with a message detailing the nature
of the problem.
"""
# -- Perform Error Checking --------------------------------------------
# If the entry field is visible for specifying the row/column for operations
# that act only on a single row/column, make sure the user input a value
# for it. If no value is found, then let the user know they need to input
# one.
if self.__opEntryField.isVisible():
opEntryFieldText = self.__opEntryField.text()
opRowOrCol = 'Row' if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW else 'Column'
# Verify the size is not an empty string
if not opEntryFieldText:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} for the matrix operation is not provided.')
return None
# Verify the input is a valid number
try:
opEntryFieldFloat = float(opEntryFieldText)
opEntryFieldInt = int(opEntryFieldFloat)
except:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} of {opEntryFieldText} for the matrix operation is not a valid number.')
return None
# Make sure row input is an integer
if opEntryFieldFloat != opEntryFieldInt:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} of {opEntryFieldText} for the matrix operation is not a integer.')
return None
# -- Get Matrices from Table -------------------------------------------
# This will get the two matrices from the table operate on. If either one
# is None, that means a valid matrix was not defined in the table and an
# error was already shown to the user. In that case, just return.
matrixA = self.__getMatrix(self.__matrixAInputTable, 'A')
if matrixA is None: return
matrixB = self.__getMatrix(self.__matrixBInputTable, 'B')
if matrixB is None: return
# -- Create Matrix Operation Object ------------------------------------
# This process is not optimal as it makes a new MatrixOperation object
# every time. A better process would be to keep a record of all previously
# generated MatrixOperation objects and pull from that history.
try:
matop = MatrixOperation(self.__nameLineEdit.text(), matrixA, matrixB)
except MatrixOperationError as e:
QMessageBox.critical(self, 'Invalid Matrices', str(e))
return
# -- Perform Additional Error Checking ---------------------------------
# Now that the matrices are found, one more error check can be performed,
# which is to verify that the row/column provided for the operation is
# within range, based on the matrix sizes. Of course, only check this if
# it is necessary.
if self.__opEntryField.isVisible():
if self.__opSelectComboBox.currentText() in MatOpGUI.OPS_ON_ROW:
upperOpLimit = matop.productRows
else:
upperOpLimit = matop.productCols
if opEntryFieldInt < 1 or upperOpLimit < opEntryFieldInt:
QMessageBox.critical(self, f'Invalid Operation {opRowOrCol}', f'{opRowOrCol} {opEntryFieldText} for the matrix is out of bounds [1,{upperOpLimit}].')
return None
# -- Get Matrix Operation Result ---------------------------------------
# Call the right function based on the user's requested operation
if self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[0]:
result = matop.product
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[1]:
result = matop.getProductColSum(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[2]:
result = matop.getProductColProd(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[3]:
result = matop.getProductColCumSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[4]:
result = matop.getProductColCumProd()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[5]:
result = matop.getProductRowSum(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[6]:
result = matop.getProductRowProd(opEntryFieldInt - 1)
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[7]:
result = matop.getProductRowCumSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[8]:
result = matop.getProductRowCumProd()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[9]:
result = matop.getProductTotalMin()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[10]:
result = matop.getProductTotalMax()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[11]:
result = matop.getProductTotalMean()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[12]:
result = matop.getProductTotalMedian()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[13]:
result = matop.getProductTotalSum()
elif self.__opSelectComboBox.currentText() == MatOpGUI.OPERATIONS[14]:
result = matop.getProductTotalProd()
else:
# If this point is reached, somehow the text of the combo box doesn't
# match any text added to it. This point should never be reached, but
# if it is, present an error to the user. This should not be the user's
# fault and there would be nothing they could do to fix it, but better
# to provide some sort of feedback to the user about the issue.
QMessageBox.critical(self, 'Invalid Operation Selection', 'Invalid Operation Selection: '+self.__opSelectComboBox.currentText())
return
# -- Print Output ------------------------------------------------------
# Make the output text area visible if it is not
self.__outputTextBox.setVisible(True)
# Increment the operation counter
self.__opCounter += 1
# Construct and print the header for the operation
header = '\n\n' if self.__opCounter > 1 else ''
header += '=' * 80 + '\n'
header += f'= Operation {self.__opCounter}'
if self.__nameLineEdit.text():
header += ': ' + self.__nameLineEdit.text() + ' '
header += '\n'
header += '=' * 80 + '\n'
self.__outputTextBox.append(header)
# Output the matrices being multiplied
self.__outputTextBox.append('Matrix A:\n')
self.__outputTextBox.append(str(matrixA) + '\n')
self.__outputTextBox.append('Matrix B:\n')
self.__outputTextBox.append(str(matrixB) + '\n')
# Output the operation result
self.__outputTextBox.append(self.__opSelectComboBox.currentText() + ' Result:\n')
self.__outputTextBox.append(str(result))
# == Matrix A ==============================================================
def __matrixASetSizeClicked(self):
"""
Callback for when the set size button is clicked to change the size input
for matrix A. This will update the QTableWidget's rows and columns to be
the appropriate size based on the user's inputs. Some error checking is
performed to ensure the user's inputs are valid. If a problem is found,
this will return early with a messagebox indicating the nature of the issue.
"""
# TODO: save/restore the values already entered so they don't get erased
# when the size changes.
# -- Perform Error Checking --------------------------------------------
# Validate the provided row. If it's invalid, return
rowNum = self.__validateSize(self.__matrixARowSize, 'A', 'Row')
if rowNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixARowSize.setText(str(rowNum))
# Validate the provided column. If it's invalid, return
colNum = self.__validateSize(self.__matrixAColSize, 'A', 'Col')
if colNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixAColSize.setText(str(colNum))
# -- Update matrix size ------------------------------------------------
self.__matrixAInputTable.setRowCount(0)
self.__matrixAInputTable.setRowCount(rowNum)
self.__matrixAInputTable.setColumnCount(0)
self.__matrixAInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixAInputTable.setItem(row, col, QTableWidgetItem(''))
def __matrixARandGenClicked(self):
"""
Callback for when the generate button is clicked to generate a random
matrix for matrix A. After some basic error checking, this just generates
a random matrix, based on the inputs provided by the user (such as whether
to generate decimals or integers, and what range to use.
If an error is found, such as an invalid range value input by the user,
a messagebox will be displayed with information about the issue and
the function will return.
"""
# -- Perform Error Checking --------------------------------------------
# Validate the minimum range value
minRangeLimit = self.__validateRange(
self.__matrixAMinRandRange, 'A', 'Min', self.__matrixARandButtonGroup.checkedId() == 1
)
if minRangeLimit is None: return
# Set the text to the returned value.
self.__matrixAMinRandRange.setText(str(minRangeLimit))
# Validate the maximum range value
maxRangeLimit = self.__validateRange(
self.__matrixAMaxRandRange, 'A', 'Max', self.__matrixARandButtonGroup.checkedId() == 1
)
if maxRangeLimit is None: return
# Set the text to the returned value.
self.__matrixAMaxRandRange.setText(str(maxRangeLimit))
# -- Populate the matrix with random values ----------------------------
# Get the matrix size
rowNum = self.__matrixAInputTable.rowCount()
colNum = self.__matrixAInputTable.columnCount()
# Generate the matrix
if self.__matrixARandButtonGroup.checkedId() == 0: # Decimal
matrix = (np.random.rand(rowNum, colNum) * (maxRangeLimit - minRangeLimit)) + minRangeLimit
else: # Integer
matrix = np.random.randint(minRangeLimit, maxRangeLimit, size = (rowNum, colNum))
# Finally, populate the table with the generated matrix
self.__setMatrix(self.__matrixAInputTable, matrix)
# == Matrix B ==============================================================
def __matrixBSetSizeClicked(self):
"""
Callback for when the set size button is clicked to change the size input
for matrix A. This will update the QTableWidget's rows and columns to be
the appropriate size based on the user's inputs. Some error checking is
performed to ensure the user's inputs are valid. If a problem is found,
this will return early with a messagebox indicating the nature of the issue.
"""
# TODO: save/restore the values already entered so they don't get erased
# when the size changes.
# -- Perform Error Checking --------------------------------------------
# Validate the provided row. If it's invalid, return
rowNum = self.__validateSize(self.__matrixBRowSize, 'B', 'Row')
if rowNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixBRowSize.setText(str(rowNum))
# Validate the provided column. If it's invalid, return
colNum = self.__validateSize(self.__matrixBColSize, 'B', 'Col')
if colNum is None: return
# Set the text to the returned value, which should guarantee the input
# always looks like an integer.
self.__matrixBColSize.setText(str(colNum))
# -- Update matrix size ------------------------------------------------
self.__matrixBInputTable.setRowCount(0)
self.__matrixBInputTable.setRowCount(rowNum)
self.__matrixBInputTable.setColumnCount(0)
self.__matrixBInputTable.setColumnCount(colNum)
for row in range(rowNum):
for col in range(colNum):
self.__matrixBInputTable.setItem(row, col, QTableWidgetItem(''))
def __matrixBRandGenClicked(self):
"""
Callback for when the generate button is clicked to generate a random
matrix for matrix A. After some basic error checking, this just generates
a random matrix, based on the inputs provided by the user (such as whether
to generate decimals or integers, and what range to use.
If an error is found, such as an invalid range value input by the user,
a messagebox will be displayed with information about the issue and
the function will return.
"""
# -- Perform Error Checking --------------------------------------------
# Validate the minimum range value
minRangeLimit = self.__validateRange(
self.__matrixBMinRandRange, 'B', 'Min', self.__matrixBRandButtonGroup.checkedId() == 1
)
if minRangeLimit is None: return
# Set the text to the returned value.
self.__matrixBMinRandRange.setText(str(minRangeLimit))
# Validate the maximum range value
maxRangeLimit = self.__validateRange(
self.__matrixBMaxRandRange, 'B', 'Max', self.__matrixBRandButtonGroup.checkedId() == 1
)
if maxRangeLimit is None: return
# Set the text to the returned value.
self.__matrixBMaxRandRange.setText(str(maxRangeLimit))
# -- Populate the matrix with random values ----------------------------
# Get the matrix size
rowNum = self.__matrixBInputTable.rowCount()
colNum = self.__matrixBInputTable.columnCount()
# Generate the matrix
if self.__matrixBRandButtonGroup.checkedId() == 0: # Decimal
matrix = (np.random.rand(rowNum, colNum) * (maxRangeLimit - minRangeLimit)) + minRangeLimit
else: # Integer
matrix = np.random.randint(minRangeLimit, maxRangeLimit, size = (rowNum, colNum))
# Finally, populate the table with the generated matrix
self.__setMatrix(self.__matrixBInputTable, matrix)
#===========================================================================
# Utilities
#===========================================================================
def __setMatrix(self, table, matrix):
"""
Set the QTableWidget cells with the content from a numpy matrix. Note
that the table and matrix should have the same dimensions.
Input:
table: A QTableWidget object to set the cell values of.
matrix: A numpy array which has values to store in the table.
"""
# No error checking is performed here to confirm that the table and matrix
# have the correct size. Since this is an internal function, it is assumed
# the calling functions are already making sure this isn't an issue.
# In addition, if an issue were found, there'd be no easy way to handle it
# as it wouldn't be the user's fault.
for row in range(np.shape(matrix)[0]):
for col in range(np.shape(matrix)[1]):
# Get the item at the current row/column of the table and set the
# text to the value in the matrix.
item = table.item(row, col)
item.setText(str(matrix[row,col]))
def __getMatrix(self, table, matrixName):
"""
Extract a numpy array from a QTableWidget. The output array will have the
same size as the table. If the table does not have a valid value in it, a
messagebox will be shown to the user with information about the problem and
the method will return early with None.
Input:
table: The QTableWidget object to pull data from for constructing the
numpy array.
matrixName: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
Output:
Returns a numpy array of the same dimensions as the table and with values
from the table. The values are set as floats by default. If the table
has invalid entries (either because it's empty or not a float), None
will be returned.
"""
# Extract the row and column number of the table
rowNum = table.rowCount()
colNum = table.columnCount()
# Create a matrix to return, initially all zeros. Make it all floating type.
result = np.zeros((rowNum, colNum), dtype = np.float)
for row in range(rowNum):
for col in range(colNum):
value = table.item(row, col).text()
# Verify the value is not an empty string
if not value:
row += 1
col += 1
QMessageBox.critical(self, 'Invalid Matrix Entry', f'Value for cell ({row}, {col}) of matrix {matrixName} is not provided.')
return None
# Verify the input is a valid number
try:
num = float(value)
except:
row += 1
col += 1
QMessageBox.critical(self, 'Invalid Matrix Entry', f'Value of {value} for cell ({row}, {col}) of matrix {matrixName} is not a valid number.')
return None
# If no issues, store the number in the matrix
result[row,col] = num
return result
def __validateSize(self, lineEdit, matrix, direction):
"""
Utility function for verifying the size provided by the user in a text box
Input:
lineEdit: The QLineEdit object that has data in it about the size to
extract.
matrix: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
direction: A string, either 'Row' or 'Column'. Used to populate the
error message displayed to the user in the event of an issue.
Output:
Returns the size pulled from the QLineEdit widget as an integer. If
an error is found (e.g., nothing was provided or the input was not
an int), then None is returned and a messagebox is presented to the
user with information about the nature of the issue.
"""
# Pull out the size from the line edit field
sizeNum = lineEdit.text()
# Verify the size is not an empty string
if not sizeNum:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size for matrix {matrix} is not provided.')
return None
# Verify the input is a valid number
try:
sizeNumFloat = float(sizeNum)
sizeNumInt = int(sizeNumFloat)
except:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is not a valid number.')
return None
# Make sure row input is an integer
if sizeNumFloat != sizeNumInt:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is not a integer.')
return None
# Make sure row input is in valid range
if sizeNumInt < 1 or 10 < sizeNumInt:
QMessageBox.critical(self, f'Invalid {direction} Size', f'{direction} size of {sizeNum} for matrix {matrix} is outside valid range of [1,10].')
return None
return sizeNumInt
def __validateRange(self, lineEdit, matrix, end, isInt):
"""
Utility function for verifying the range provided by the user in a text box
Input:
lineEdit: The QLineEdit object that has data in it about the range to
extract.
matrix: A string, either 'A' or 'B'. Used to populate the error
message displayed to the user in the event of an issue.
end: A string, either 'Row' or 'Column'. Used to populate the
error message displayed to the user in the event of an issue.
isInt: A boolean indicating if the output is supposed to be an integer
or a decimal.
Output:
Returns the range pulled from the QLineEdit widget as an integer, or
float. If an error is found (e.g., nothing was provided or the input
was not an int as requested), then None is returned and a messagebox
is presented to the user with information about the nature of the issue.
"""
# Pull out the range from the line edit field
rangeLimit = lineEdit.text()
# Verify the limit is not an empty string
if not rangeLimit:
QMessageBox.critical(self, f'Invalid {end} Range', f'{end} range limit for matrix {matrix} is not provided.')
return None
try:
rangeLimitFloat = float(rangeLimit)
rangeLimitInt = int(rangeLimitFloat)
except:
QMessageBox.critical(self, 'Invalid {end} Range', f'{end} range limit of {rangeLimit} for matrix {matrix} is not a valid number.')
return None
# Make sure the range value is an integer, if it's supposed to be
if isInt and rangeLimitInt != rangeLimitFloat:
QMessageBox.critical(self, 'Invalid {end} Range', f'{end} range limit of {rangeLimit} for matrix {matrix} is not an integer, but integer was selected.')
return None
return rangeLimitInt if isInt else rangeLimitFloat
|
python
|
"""
Uses docspec to parse docstrings to markdown.
Intended for use with static site generators where further linting / linking / styling is done downstream.
Loosely based on Numpy-style docstrings.
Automatically infers types from signature typehints. Explicitly documented types are NOT supported in docstrings.
"""
import logging
import docspec
import docspec_python
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
line_break_chars = ['-', '_', '!', '|', '>', ':']
def _is_property(mem):
if mem.decorations is not None:
for dec in mem.decorations:
if dec.name == 'property':
return True
return False
def _is_setter(mem):
if mem.decorations is not None:
for dec in mem.decorations:
if 'setter' in dec.name:
return True
return False
def extract_text_block(splits_enum, splits, indented_block=False, is_hint_block=False):
"""
Parses a block of text and decides whether or not to wrap.
Return if iters finish or on end of indentation (optional) or on start of new heading
"""
block = []
while True:
# feed
lookahead_idx, next_line = next(splits_enum)
# return if indented block and next is not indented (but don't get tripped up with empty lines)
if indented_block and not next_line.startswith(' ') and not next_line.strip() == '':
return lookahead_idx, next_line, '\n'.join(block)
# return if the next next-line would be a new heading
elif lookahead_idx < len(splits) and splits[lookahead_idx].startswith('---'):
return lookahead_idx, next_line, '\n'.join(block)
# return if inside a hint block and the end of the hint block has been encountered
elif is_hint_block and next_line.strip().startswith(':::'):
return lookahead_idx, next_line, '\n'.join(block)
# be careful with stripping content for lines with intentional breaks, e.g. indented bullets...
# if parsing indented blocks, strip the first four spaces
if indented_block:
next_line = next_line[4:]
# code blocks
if next_line.strip().startswith('```'):
code_block = next_line.strip() + '\n'
while True:
lookahead_idx, next_line = next(splits_enum)
if indented_block:
next_line = next_line[4:]
code_block += next_line + '\n'
if next_line.startswith('```'):
break
block.append(code_block)
# tip blocks
elif next_line.strip().startswith(':::'):
hint_in = '\n' + next_line.strip() + '\n\n'
# unpacks hint block
lookahead_idx, next_line, hint_block = extract_text_block(splits_enum,
splits,
indented_block=indented_block,
is_hint_block=True)
# next line will be closing characters, i.e. ':::', insert manually to add newline
block.append(hint_in + hint_block + '\n:::')
# if no block content exists yet
elif not len(block):
block.append(next_line)
# keep blank lines
elif next_line.strip() == '':
block.append('')
# don't wrap if the previous line is blank
elif block[-1] == '':
block.append(next_line)
# don't wrap if the line starts with a bullet point, picture, or table character
elif next_line.strip()[0] in line_break_chars:
block.append(next_line)
# or if the previous line ends with a bullet point, picture, or table character
elif block[-1].strip()[-1] in line_break_chars:
block.append(next_line)
# otherwise wrap
else:
# should be safe to strip text when wrapping
block[-1] += ' ' + next_line.strip()
# return if iters exhausted
if lookahead_idx == len(splits):
return lookahead_idx, next_line, '\n'.join(block)
def process_member(member, lines, config, class_name=None):
# this method only processes functions and classes
if not isinstance(member, (docspec.Function, docspec.Class)):
return
# don't process private members
if (member.name.startswith('_') and not member.name == '__init__') or _is_setter(member):
return
# keep track of the arguments and their types for automatically building function parameters later-on
arg_types_map = {}
# escape underscores in class / method / function names
member_name = member.name.replace('_', '\_')
if class_name is not None:
class_name_esc = class_name.replace('_', '\_')
# if a class definition use the class template
if isinstance(member, docspec.Class):
# when the class is passed-in directly its name is captured in the member_name
lines.append(config['class_name_template'].format(class_name=class_name_esc))
# if the class __init__, then display the class name and .__init__
elif class_name and member.name == '__init__':
lines.append(config['function_name_template'].format(function_name=f'{class_name_esc}'))
# if a class property
elif class_name is not None and _is_property(member):
lines.append(config['class_property_template'].format(prop_name=f'{class_name_esc}.{member_name}'))
# if a class method
elif class_name is not None:
lines.append(config['function_name_template'].format(function_name=f'{class_name_esc}.{member_name}'))
# otherwise a function
else:
lines.append(config['function_name_template'].format(function_name=member_name))
# process the member's signature if a method or a function - classes won't have args
if hasattr(member, 'args') and not _is_property(member):
# prepare the signature string - use member.name instead of escaped versions
if class_name is not None and member.name == '__init__':
signature = f'{class_name}('
elif class_name is not None:
signature = f'{class_name}.{member.name}('
else:
signature = f'{member.name}('
# the spacer is used for lining up wrapped lines
spacer = len(signature)
# unpack the arguments and add
for idx, arg in enumerate(member.args):
# ignore self parameter
if arg.name == 'self':
continue
# param name
param_name = arg.name
# add to the arg_types_map map using the function / method name and param name
arg_types_map[param_name] = arg.datatype
# if the argument type is KeywordRemainder then add the symbols
if arg.type.name == 'KeywordRemainder':
param_name = '**' + param_name
# first argument is wedged against bracket
# except for classes where self parameters are ignored and second argument is wedged
if idx == 0 or class_name is not None and idx == 1:
signature += param_name
# other arguments start on a new line
else:
signature += f'{" " * spacer}{param_name}'
# add default values where present
if arg.default_value is not None:
signature += f'={arg.default_value}'
# if not the last argument, add a comma
if idx != len(member.args) - 1:
signature += ',\n'
# close the signature
signature += ')'
# add the return type if present
if member.return_type is not None:
signature += f'\n{" " * spacer}-> {member.return_type}'
# set into the template
signature = config['signature_template'].format(signature=signature)
lines.append(signature)
# process the docstring
if member.docstring is not None:
# split the docstring at new lines
splits = member.docstring.split('\n')
# iter the docstring with a lookahead index
splits_enum = enumerate(splits, start=1)
try:
# skip and go straight to headings if no introductory text
if len(splits) > 1 and splits[1].startswith('---'):
lookahead_idx, next_line = next(splits_enum)
# otherwise, look for introductory text
else:
lookahead_idx, next_line, text_block = extract_text_block(splits_enum, splits)
if len(text_block):
lines.append(text_block)
# look for headings
while lookahead_idx < len(splits):
# break if not a heading
if not splits[lookahead_idx].startswith('---'):
raise ValueError('Parser out of lockstep with headings.')
heading = next_line.strip()
lines.append(config['heading_template'].format(heading=heading))
# skip the underscore line
next(splits_enum)
# if not param-type headings - just extract the text blocks
if heading not in ['Parameters', 'Returns', 'Yields', 'Raises']:
lookahead_idx, next_line, text_block = extract_text_block(splits_enum, splits)
if len(text_block):
lines.append(text_block)
# otherwise iterate the parameters and their indented arguments
else:
# initial prime to move from heading to parameter name
lookahead_idx, next_line = next(splits_enum)
# Iterate nested parameters
while True:
# this parser doesn't process typehints, use typehints in function declarations instead
if ' ' in next_line.strip() or ':' in next_line.strip():
raise ValueError('Parser does not support types in docstrings. Use type-hints instead.')
# extract the parameter name
param_name = next_line.strip()
# process the indented parameter description
lookahead_idx, next_line, param_description = extract_text_block(splits_enum,
splits,
indented_block=True)
# only include type information for Parameters
if heading == 'Parameters':
param_type = arg_types_map[param_name]
param = config['param_template'].format(name=param_name,
type=param_type,
description=param_description)
else:
param = config['return_template'].format(name=param_name,
description=param_description)
lines.append(param)
# break if a new heading found
if lookahead_idx == len(splits) or splits[lookahead_idx].startswith('---'):
break
# catch exhausted enum
except StopIteration:
pass
def parse(module_name: str,
module: docspec_python.Module,
config: dict):
lines = []
# frontmatter
if config['frontmatter_template'] is not None:
lines.append(config['frontmatter_template'])
# module name
lines.append(config['module_name_template'].format(module_name=module_name).replace('_', '\_'))
# module docstring
if module.docstring is not None:
lines.append(module.docstring.strip().replace('\n', ' '))
if config['toc_template'] is not None:
lines.append(config['toc_template'])
# iterate the module's members
for member in module.members:
# ignores module-level variables
if isinstance(member, docspec.Data):
continue
# process functions
elif isinstance(member, docspec.Function):
process_member(member, lines, config)
# process classes and nested methods
elif isinstance(member, docspec.Class):
class_name = member.name
process_member(member, lines, config, class_name)
for nested_member in member.members:
process_member(nested_member, lines, config, class_name)
return lines
|
python
|
from django.shortcuts import render, get_object_or_404, redirect
from django.utils import timezone
from django.urls import reverse_lazy
from status import models, forms
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def add_comment(request,pk):
post = get_object_or_404(models.Post, pk=pk)
if request.method == 'POST':
form = forms.CommentForm(request.POST)
if form.is_valid():
comment = form.save(commit=False)
comment.post = post
comment.save()
return redirect('post_detail', pk=post.pk)
else:
form = forms.CommentForm()
return render(request, 'status/comment_form.html', {'form':form})
@login_required
def comment_approval(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
comment.approve()
return redirect('post_detail', pk=comment.post.pk)
@login_required
def comment_remove(request,pk):
comment = get_object_or_404(models.Comment,pk=pk)
post_pk = comment.post.pk
comment.delete()
return redirect('post_detail', pk=post_pk)
|
python
|
def countingSort(arr):
counter = [0]*100 # as per the constraint that arr[i] < 100
for num in arr:
counter[num] += 1
sorted = []
for num, cnt in enumerate(counter):
sorted += [num]*cnt
return sorted
|
python
|
#
# relu paddle model generator
#
import numpy as np
from save_model import saveModel
import sys
def relu(name: str, x):
import paddle as pdpd
pdpd.enable_static()
node_x = pdpd.static.data(name='x', shape=x.shape, dtype='float32')
out = pdpd.nn.functional.relu(node_x)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out],
inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
data = np.array([-2, 0, 1]).astype('float32')
relu("relu", data)
if __name__ == "__main__":
main()
|
python
|
import os
import os.path
from dataclasses import dataclass
from os import path
import sys
from typing import List
import requests
from bs4 import BeautifulSoup
import re
import win32console # needs pywin32
import time
_stdin = win32console.GetStdHandle(win32console.STD_INPUT_HANDLE)
def input_def(prompt, default=''):
keys = []
for c in str(default):
evt = win32console.PyINPUT_RECORDType(win32console.KEY_EVENT)
evt.Char = c
evt.RepeatCount = 1
evt.KeyDown = True
keys.append(evt)
_stdin.WriteConsoleInput(keys)
return input(prompt)
@dataclass
class Scene:
title: str
performers: List[str]
number: int
@dataclass
class Movie:
title: str
year: str
date: str
scenes: List[Scene]
def get_scene_performers(div):
try:
return list(map(lambda performer: performer.text, div.parent.find_all('a')))
except AttributeError:
return []
def get_movie_data(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, features="html.parser")
title = soup.find('h1').text.strip().split("\n")[0].strip()
try:
year = soup.find('div', {'class': 'item-info'}).find('small').text
except AttributeError:
year = ""
studio = soup.find('div', {'class': 'item-info'}).find('a', {'label': 'Studio'}).text
date = "" # todo
scene_rows = list(map(lambda row: row.parent, soup.find_all('div', {'class': 'col-sm-6 m-b-1'})))
scenes = []
for i, row in enumerate(scene_rows):
scene_title = row.find('a', {'label': 'Scene Title'}).text.strip()
scene_performers = get_scene_performers(row.find(text=re.compile(r'Starring:')))
scenes.append(Scene(scene_title, scene_performers, i + 1))
#print(title)
# print(year)
#print(studio)
#print(scenes)
return Movie(title, year, date, scenes)
def determine_files(folder):
return list(filter(lambda element: path.isfile(path.join(folder, element)), os.listdir(folder)))
def handle_file(folder, index, file, data):
index = ask_index(index, file)
if index >= 0:
new_filename = get_new_filename(index, file, data)
rename(folder, file, new_filename)
def ask_index(index, filename):
#input = input_def('ENTER SCENE NUMBER FOR "' + filename + '"\nIndex: ', index + 1)
input = input_def('ENTER SCENE NUMBER FOR "' + filename + ' (0 to skip)"\nIndex: ', "")
time.sleep(0.5)
return int(input) - 1
def get_new_filename(index, file, data):
scene = data.scenes[index]
filename = '{title}{year} - Scene {index:02d}{performer}{scene_title}{ext}'.format(
title=data.title,
year=build_year_string(data.year),
index=scene.number,
performer=build_performer_string(scene.performers),
scene_title=' - ' + scene.title,
ext=path.splitext(file)[-1]
)
filename = re.sub(r'[<>/\\|?*]', '', filename)
filename = re.sub(r'"', '\'', filename)
filename = re.sub(r':', ' -', filename)
return filename
def build_year_string(year):
if year == "":
return ""
else:
return f" {year}"
def build_performer_string(performers):
if len(performers) > 2:
return ' - ' + ', '.join(performers[:-1]) + ' & ' + str(performers[-1])
elif len(performers) == 2:
return ' - ' + ' & '.join(performers)
elif len(performers) == 1:
return ' - ' + performers[0]
else:
return ''
def rename(folder, file, new_filename):
os.rename(path.join(folder, file), path.join(folder, new_filename))
print(f'Rename {file} to {new_filename}')
if __name__ == '__main__':
folder = sys.argv[1]
#movie_url = sys.argv[2]
files = determine_files(folder)
files.sort()
for index, file in enumerate(files):
print()
movie_url = input(f'URL for {file}: ')
data = get_movie_data(movie_url)
handle_file(folder, index, file, data)
|
python
|
import numpy as np
def get_box_from_point(x,y,kernel,pad,stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
x_min = (x - 1) * stride_x + 1 - pad_x
x_max = (x - 1) * stride_x - pad_x + kernel_x
y_min = (y - 1) * stride_y + 1 - pad_y
y_max = (y - 1) * stride_y - pad_y + kernel_y
return x_min, y_min, x_max, y_max
def get_convd_size(W, H, kernel, pad, stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
H_res = int((H + 2 * pad_y - kernel_y)/stride_y) + 1
W_res = int((W + 2 * pad_x - kernel_x)/stride_x) + 1
return W_res, H_res
def get_original_size(W, H, kernel, pad, stride):
kernel_x = kernel[0]
kernel_y = kernel[1]
pad_x = pad[0]
pad_y = pad[1]
stride_x = stride[0]
stride_y = stride[1]
H_res = (H - 1) * stride_y + kernel_y - 2 * pad_y
W_res = (W - 1) * stride_x + kernel_x - 2 * pad_x
return W_res, H_res
def single_map_value(value_rec, kernel, pad, stride):
W = value_rec.shape[0]
H = value_rec.shape[1]
W_ori, H_ori = get_original_size(W, H, kernel, pad, stride)
res_rec = np.full([W_ori, H_ori],0.)
for i in range(W):
for j in range(H):
tmp_v = value_rec[i, j]
x_min, y_min, x_max, y_max = get_box_from_point(i, j, kernel, pad, stride)
give_v = (tmp_v+0.)/((x_max + 1 - x_min)*(y_max + 1 - y_min))
for p in range(x_min, x_max+1):
for q in range(y_min, y_max+1):
if p >= 0 and p < W_ori and q >=0 and q < H_ori:
res_rec[p, q] += give_v
return res_rec
def multiple_map_value(value_rec, params_list):
tmp_res = value_rec
for params in params_list:
kernel = params[0]
pad = params[1]
stride = params[2]
tmp_res = single_map_value(tmp_res, kernel, pad, stride)
return tmp_res
# tst_area = np.full([40,40],0.)
# tst_area[20,20] = 1.
# res_area = single_map_value(value_rec=tst_area, kernel=[5, 5], pad=[2,2], stride=[1,1])
# for i in range(res_area.shape[0]):
# for j in range(res_area.shape[1]):
# print i,j,res_area[i, j]
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path("", views.OrderDetailsView.as_view()),
path("<uuid:order_id>", views.OrderDetailsView.as_view()),
path("status/<str:order_status_value>", views.OrderStatusView.as_view()),
path("incoming", views.IncomingOrders.as_view()),
path("statuses", views.OrderStatusesList.as_view()),
path("current", views.OrderCurrent.as_view()),
path("list", views.OrderList.as_view()),
path("complaint", views.ComplaintView.as_view()),
]
|
python
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import argparse
from typing import Callable, Union, List, Tuple
import numpy as np
import cv2
import scipy
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlehub.module.module import moduleinfo
import paddlehub.vision.segmentation_transforms as T
from paddlehub.module.module import moduleinfo, runnable, serving
from modnet_mobilenetv2_matting.mobilenetv2 import MobileNetV2
import modnet_mobilenetv2_matting.processor as P
@moduleinfo(
name="modnet_mobilenetv2_matting",
type="CV",
author="paddlepaddle",
summary="modnet_mobilenetv2_matting is a matting model",
version="1.0.0"
)
class MODNetMobilenetV2(nn.Layer):
"""
The MODNet implementation based on PaddlePaddle.
The original article refers to
Zhanghan Ke, et, al. "Is a Green Screen Really Necessary for Real-Time Portrait Matting?"
(https://arxiv.org/pdf/2011.11961.pdf).
Args:
hr_channels(int, optional): The channels of high resolutions branch. Defautl: None.
pretrained(str, optional): The path of pretrianed model. Defautl: None.
"""
def __init__(self, hr_channels:int = 32, pretrained=None):
super(MODNetMobilenetV2, self).__init__()
self.backbone = MobileNetV2()
self.pretrained = pretrained
self.head = MODNetHead(
hr_channels=hr_channels, backbone_channels=self.backbone.feat_channels)
self.blurer = GaussianBlurLayer(1, 3)
self.transforms = P.Compose([P.LoadImages(), P.ResizeByShort(), P.ResizeToIntMult(), P.Normalize()])
if pretrained is not None:
model_dict = paddle.load(pretrained)
self.set_dict(model_dict)
print("load custom parameters success")
else:
checkpoint = os.path.join(self.directory, 'modnet-mobilenetv2.pdparams')
model_dict = paddle.load(checkpoint)
self.set_dict(model_dict)
print("load pretrained parameters success")
def preprocess(self, img: Union[str, np.ndarray] , transforms: Callable, trimap: Union[str, np.ndarray] = None):
data = {}
data['img'] = img
if trimap is not None:
data['trimap'] = trimap
data['gt_fields'] = ['trimap']
data['trans_info'] = []
data = self.transforms(data)
data['img'] = paddle.to_tensor(data['img'])
data['img'] = data['img'].unsqueeze(0)
if trimap is not None:
data['trimap'] = paddle.to_tensor(data['trimap'])
data['trimap'] = data['trimap'].unsqueeze((0, 1))
return data
def forward(self, inputs: dict):
x = inputs['img']
feat_list = self.backbone(x)
y = self.head(inputs=inputs, feat_list=feat_list)
return y
def predict(self, image_list: list, trimap_list: list = None, visualization: bool =False, save_path: str = "modnet_mobilenetv2_matting_output"):
self.eval()
result = []
with paddle.no_grad():
for i, im_path in enumerate(image_list):
trimap = trimap_list[i] if trimap_list is not None else None
data = self.preprocess(img=im_path, transforms=self.transforms, trimap=trimap)
alpha_pred = self.forward(data)
alpha_pred = P.reverse_transform(alpha_pred, data['trans_info'])
alpha_pred = (alpha_pred.numpy()).squeeze()
alpha_pred = (alpha_pred * 255).astype('uint8')
alpha_pred = P.save_alpha_pred(alpha_pred, trimap)
result.append(alpha_pred)
if visualization:
if not os.path.exists(save_path):
os.makedirs(save_path)
img_name = str(time.time()) + '.png'
image_save_path = os.path.join(save_path, img_name)
cv2.imwrite(image_save_path, alpha_pred)
return result
@serving
def serving_method(self, images: list, trimaps:list = None, **kwargs):
"""
Run as a service.
"""
images_decode = [P.base64_to_cv2(image) for image in images]
if trimaps is not None:
trimap_decoder = [cv2.cvtColor(P.base64_to_cv2(trimap), cv2.COLOR_BGR2GRAY) for trimap in trimaps]
else:
trimap_decoder = None
outputs = self.predict(image_list=images_decode, trimap_list= trimap_decoder, **kwargs)
serving_data = [P.cv2_to_base64(outputs[i]) for i in range(len(outputs))]
results = {'data': serving_data}
return results
@runnable
def run_cmd(self, argvs: list):
"""
Run as a command.
"""
self.parser = argparse.ArgumentParser(
description="Run the {} module.".format(self.name),
prog='hub run {}'.format(self.name),
usage='%(prog)s',
add_help=True)
self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
self.arg_config_group = self.parser.add_argument_group(
title="Config options", description="Run configuration for controlling module behavior, not required.")
self.add_module_config_arg()
self.add_module_input_arg()
args = self.parser.parse_args(argvs)
if args.trimap_path is not None:
trimap_list = [args.trimap_path]
else:
trimap_list = None
results = self.predict(image_list=[args.input_path], trimap_list=trimap_list, save_path=args.output_dir, visualization=args.visualization)
return results
def add_module_config_arg(self):
"""
Add the command config options.
"""
self.arg_config_group.add_argument(
'--output_dir', type=str, default="modnet_mobilenetv2_matting_output", help="The directory to save output images.")
self.arg_config_group.add_argument(
'--visualization', type=bool, default=True, help="whether to save output as images.")
def add_module_input_arg(self):
"""
Add the command input options.
"""
self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")
self.arg_input_group.add_argument('--trimap_path', type=str, default=None, help="path to image.")
class MODNetHead(nn.Layer):
"""
Segmentation head.
"""
def __init__(self, hr_channels: int, backbone_channels: int):
super().__init__()
self.lr_branch = LRBranch(backbone_channels)
self.hr_branch = HRBranch(hr_channels, backbone_channels)
self.f_branch = FusionBranch(hr_channels, backbone_channels)
def forward(self, inputs: paddle.Tensor, feat_list: list):
pred_semantic, lr8x, [enc2x, enc4x] = self.lr_branch(feat_list)
pred_detail, hr2x = self.hr_branch(inputs['img'], enc2x, enc4x, lr8x)
pred_matte = self.f_branch(inputs['img'], lr8x, hr2x)
if self.training:
logit_dict = {
'semantic': pred_semantic,
'detail': pred_detail,
'matte': pred_matte
}
return logit_dict
else:
return pred_matte
class FusionBranch(nn.Layer):
def __init__(self, hr_channels: int, enc_channels: int):
super().__init__()
self.conv_lr4x = Conv2dIBNormRelu(
enc_channels[2], hr_channels, 5, stride=1, padding=2)
self.conv_f2x = Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1)
self.conv_f = nn.Sequential(
Conv2dIBNormRelu(
hr_channels + 3, int(hr_channels / 2), 3, stride=1, padding=1),
Conv2dIBNormRelu(
int(hr_channels / 2),
1,
1,
stride=1,
padding=0,
with_ibn=False,
with_relu=False))
def forward(self, img: paddle.Tensor, lr8x: paddle.Tensor, hr2x: paddle.Tensor):
lr4x = F.interpolate(
lr8x, scale_factor=2, mode='bilinear', align_corners=False)
lr4x = self.conv_lr4x(lr4x)
lr2x = F.interpolate(
lr4x, scale_factor=2, mode='bilinear', align_corners=False)
f2x = self.conv_f2x(paddle.concat((lr2x, hr2x), axis=1))
f = F.interpolate(
f2x, scale_factor=2, mode='bilinear', align_corners=False)
f = self.conv_f(paddle.concat((f, img), axis=1))
pred_matte = F.sigmoid(f)
return pred_matte
class HRBranch(nn.Layer):
"""
High Resolution Branch of MODNet
"""
def __init__(self, hr_channels: int, enc_channels:int):
super().__init__()
self.tohr_enc2x = Conv2dIBNormRelu(
enc_channels[0], hr_channels, 1, stride=1, padding=0)
self.conv_enc2x = Conv2dIBNormRelu(
hr_channels + 3, hr_channels, 3, stride=2, padding=1)
self.tohr_enc4x = Conv2dIBNormRelu(
enc_channels[1], hr_channels, 1, stride=1, padding=0)
self.conv_enc4x = Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1)
self.conv_hr4x = nn.Sequential(
Conv2dIBNormRelu(
2 * hr_channels + enc_channels[2] + 3,
2 * hr_channels,
3,
stride=1,
padding=1),
Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1))
self.conv_hr2x = nn.Sequential(
Conv2dIBNormRelu(
2 * hr_channels, 2 * hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
2 * hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(hr_channels, hr_channels, 3, stride=1, padding=1))
self.conv_hr = nn.Sequential(
Conv2dIBNormRelu(
hr_channels + 3, hr_channels, 3, stride=1, padding=1),
Conv2dIBNormRelu(
hr_channels,
1,
1,
stride=1,
padding=0,
with_ibn=False,
with_relu=False))
def forward(self, img: paddle.Tensor, enc2x: paddle.Tensor, enc4x: paddle.Tensor, lr8x: paddle.Tensor):
img2x = F.interpolate(
img, scale_factor=1 / 2, mode='bilinear', align_corners=False)
img4x = F.interpolate(
img, scale_factor=1 / 4, mode='bilinear', align_corners=False)
enc2x = self.tohr_enc2x(enc2x)
hr4x = self.conv_enc2x(paddle.concat((img2x, enc2x), axis=1))
enc4x = self.tohr_enc4x(enc4x)
hr4x = self.conv_enc4x(paddle.concat((hr4x, enc4x), axis=1))
lr4x = F.interpolate(
lr8x, scale_factor=2, mode='bilinear', align_corners=False)
hr4x = self.conv_hr4x(paddle.concat((hr4x, lr4x, img4x), axis=1))
hr2x = F.interpolate(
hr4x, scale_factor=2, mode='bilinear', align_corners=False)
hr2x = self.conv_hr2x(paddle.concat((hr2x, enc2x), axis=1))
pred_detail = None
if self.training:
hr = F.interpolate(
hr2x, scale_factor=2, mode='bilinear', align_corners=False)
hr = self.conv_hr(paddle.concat((hr, img), axis=1))
pred_detail = F.sigmoid(hr)
return pred_detail, hr2x
class LRBranch(nn.Layer):
"""
Low Resolution Branch of MODNet
"""
def __init__(self, backbone_channels: int):
super().__init__()
self.se_block = SEBlock(backbone_channels[4], reduction=4)
self.conv_lr16x = Conv2dIBNormRelu(
backbone_channels[4], backbone_channels[3], 5, stride=1, padding=2)
self.conv_lr8x = Conv2dIBNormRelu(
backbone_channels[3], backbone_channels[2], 5, stride=1, padding=2)
self.conv_lr = Conv2dIBNormRelu(
backbone_channels[2],
1,
3,
stride=2,
padding=1,
with_ibn=False,
with_relu=False)
def forward(self, feat_list: list):
enc2x, enc4x, enc32x = feat_list[0], feat_list[1], feat_list[4]
enc32x = self.se_block(enc32x)
lr16x = F.interpolate(
enc32x, scale_factor=2, mode='bilinear', align_corners=False)
lr16x = self.conv_lr16x(lr16x)
lr8x = F.interpolate(
lr16x, scale_factor=2, mode='bilinear', align_corners=False)
lr8x = self.conv_lr8x(lr8x)
pred_semantic = None
if self.training:
lr = self.conv_lr(lr8x)
pred_semantic = F.sigmoid(lr)
return pred_semantic, lr8x, [enc2x, enc4x]
class IBNorm(nn.Layer):
"""
Combine Instance Norm and Batch Norm into One Layer
"""
def __init__(self, in_channels: int):
super().__init__()
self.bnorm_channels = in_channels // 2
self.inorm_channels = in_channels - self.bnorm_channels
self.bnorm = nn.BatchNorm2D(self.bnorm_channels)
self.inorm = nn.InstanceNorm2D(self.inorm_channels)
def forward(self, x):
bn_x = self.bnorm(x[:, :self.bnorm_channels, :, :])
in_x = self.inorm(x[:, self.bnorm_channels:, :, :])
return paddle.concat((bn_x, in_x), 1)
class Conv2dIBNormRelu(nn.Layer):
"""
Convolution + IBNorm + Relu
"""
def __init__(self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
padding: int = 0,
dilation:int = 1,
groups: int = 1,
bias_attr: paddle.ParamAttr = None,
with_ibn: bool = True,
with_relu: bool = True):
super().__init__()
layers = [
nn.Conv2D(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias_attr=bias_attr)
]
if with_ibn:
layers.append(IBNorm(out_channels))
if with_relu:
layers.append(nn.ReLU())
self.layers = nn.Sequential(*layers)
def forward(self, x: paddle.Tensor):
return self.layers(x)
class SEBlock(nn.Layer):
"""
SE Block Proposed in https://arxiv.org/pdf/1709.01507.pdf
"""
def __init__(self, num_channels: int, reduction:int = 1):
super().__init__()
self.pool = nn.AdaptiveAvgPool2D(1)
self.conv = nn.Sequential(
nn.Conv2D(
num_channels,
int(num_channels // reduction),
1,
bias_attr=False), nn.ReLU(),
nn.Conv2D(
int(num_channels // reduction),
num_channels,
1,
bias_attr=False), nn.Sigmoid())
def forward(self, x: paddle.Tensor):
w = self.pool(x)
w = self.conv(w)
return w * x
class GaussianBlurLayer(nn.Layer):
""" Add Gaussian Blur to a 4D tensors
This layer takes a 4D tensor of {N, C, H, W} as input.
The Gaussian blur will be performed in given channel number (C) splitly.
"""
def __init__(self, channels: int, kernel_size: int):
"""
Args:
channels (int): Channel for input tensor
kernel_size (int): Size of the kernel used in blurring
"""
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert self.kernel_size % 2 != 0
self.op = nn.Sequential(
nn.Pad2D(int(self.kernel_size / 2), mode='reflect'),
nn.Conv2D(
channels,
channels,
self.kernel_size,
stride=1,
padding=0,
bias_attr=False,
groups=channels))
self._init_kernel()
self.op[1].weight.stop_gradient = True
def forward(self, x: paddle.Tensor):
"""
Args:
x (paddle.Tensor): input 4D tensor
Returns:
paddle.Tensor: Blurred version of the input
"""
if not len(list(x.shape)) == 4:
print('\'GaussianBlurLayer\' requires a 4D tensor as input\n')
exit()
elif not x.shape[1] == self.channels:
print('In \'GaussianBlurLayer\', the required channel ({0}) is'
'not the same as input ({1})\n'.format(
self.channels, x.shape[1]))
exit()
return self.op(x)
def _init_kernel(self):
sigma = 0.3 * ((self.kernel_size - 1) * 0.5 - 1) + 0.8
n = np.zeros((self.kernel_size, self.kernel_size))
i = int(self.kernel_size / 2)
n[i, i] = 1
kernel = scipy.ndimage.gaussian_filter(n, sigma)
kernel = kernel.astype('float32')
kernel = kernel[np.newaxis, np.newaxis, :, :]
paddle.assign(kernel, self.op[1].weight)
|
python
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import hungrybotlib as l
from random import choice
import config as cfg
l.cacheMenus(cfg.menu_file, cfg.menu_url, cfg.days)
with open("counter", "r") as f:
count = int(f.read())
text = ["Une nouvelle semaine de bouffe !", "Et voila, je viens de mettre à jour les menus.", "Le RAK vient de me dire ce qu'on mange la semaine prochaine..."]
if count == 1:
l.sayFood("Bonjour ! Je suis fier de me présenter, je suis HungryBot, je vais vous guider tout au long de vos études en vous indiquant ce que vous pourrez manger de bon au RAK de TB !", cfg.channels, cfg.token)
else:
l.sayFood(choice(text)+"\nC'est la "+str(count)+"ème semaine que je suis à votre service.", cfg.channels, cfg.token)
with open("counter", "w") as f:
f.write(str(count+1))
|
python
|
"""Output an NML file cataloging a set of audio files.
NML is an XML-based file format used by Traktor. This code generates
NML version 11, which is used by Traktor Pro.
"""
import time
import xml.sax.saxutils
from chirp.common import timestamp
from chirp.common import unicode_util
from chirp.library import artists
from chirp.library import order
_UNKNOWN_ARTIST = "* Artist Not Known *"
_UNKNOWN_ALBUM = "* Album Not Known *"
_UNKNOWN_SONG = "* Title Not Known *"
# The following are templates used to produce NML files.
# Boilerplate that goes at the beginning of every NML file. The one
# format parameter is an integer giving the total number of entries to
# be found in the file.
_NML_PREFIX = u"""<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<NML VERSION="14"><HEAD COMPANY="www.native-instruments.com" PROGRAM="Traktor - Native Instruments"></HEAD>
<MUSICFOLDERS></MUSICFOLDERS>
<COLLECTION ENTRIES="%10d">"""
# A template for producing individual song entries.
_NML_ENTRY = u"""<ENTRY MODIFIED_DATE=%(modified_date)s MODIFIED_TIME=%(modified_time)s TITLE=%(song)s ARTIST=%(artist)s><LOCATION DIR=%(dir)s FILE=%(file)s VOLUME=%(volume)s VOLUME_ID=""></LOCATION>
<ALBUM OF_TRACKS=%(total_num)s TITLE=%(album)s TRACK=%(order_num)s></ALBUM>
<INFO BITRATE=%(bitrate)s GENRE=%(genre)s PLAYTIME=%(duration_s)s IMPORT_DATE=%(import_date)s FILESIZE=%(size_in_kb)s></INFO>
</ENTRY>
"""
# Boilerplate that goes at the end of every NML file.
_NML_SUFFIX = u"""</COLLECTION>
<PLAYLISTS><NODE TYPE="FOLDER" NAME="$ROOT"><SUBNODES COUNT="1">
<NODE TYPE="PLAYLIST" NAME="_RECORDINGS"><PLAYLIST ENTRIES="0" TYPE="LIST"></PLAYLIST>
</NODE>
</SUBNODES>
</NODE>
</PLAYLISTS>
</NML>
"""
def _traktor_path_quote(path):
return path.replace("/", "/:")
class NMLWriter(object):
"""Generates an NML file for a collection of AudioFile objects."""
def __init__(self, file_volume, root_dir, out_fh):
"""Constructor.
Args:
file_volume: The SMB-style file volume containing the files.
This volume will need to be visible to the PC running Traktor
that the NML file will ultimately be used from.
root_dir: The root directory of the library, as seen by the
machine that is running Traktor.
out_fh: The file handle to write to.
"""
self.num_entries = 0
self._file_volume = file_volume
self._file_volume_quoted = _traktor_path_quote(file_volume)
self._root_dir = root_dir
self._out_fh = out_fh
# Make sure we are at the beginning of the file.
self._out_fh.seek(0)
# Write out a prefix for 0 entries.
self._out_fh.write(_NML_PREFIX % 0)
self._all_entries = []
def write(self, au_file):
"""Adds a an audio file to the collection.
Args:
au_file: An AudioFile object to add to the collection.
"""
entry_data = {}
entry_data["order_num"], entry_data["total_num"] = order.decode(
str(au_file.mutagen_id3.get("TRCK")))
if entry_data["total_num"] is None:
entry_data["total_num"] = 100
entry_data["artist"] = unicode_util.simplify(
au_file.mutagen_id3.get("TPE1", _UNKNOWN_ARTIST))
entry_data["album"] = unicode_util.simplify(
au_file.mutagen_id3.get("TALB", _UNKNOWN_ALBUM))
entry_data["song"] = unicode_util.simplify(
au_file.mutagen_id3.get("TIT2", _UNKNOWN_SONG))
# TODO(trow): Set this somehow.
entry_data["genre"] = "Unknown"
entry_data["dir"] = _traktor_path_quote(
au_file.canonical_directory(prefix=self._root_dir))
entry_data["file"] = au_file.canonical_filename()
entry_data["volume"] = self._file_volume_quoted
entry_data["bitrate"] = int(
au_file.mp3_header.bit_rate_kbps * 1000)
entry_data["size_in_kb"] = int(au_file.frame_size / 1024)
entry_data["duration_s"] = int(au_file.duration_ms / 1000)
entry_data["import_date"] = time.strftime(
"%Y/%m/%d", time.gmtime(au_file.import_timestamp))
entry_data["modified_date"] = entry_data["import_date"]
entry_data["modified_time"] = "35364"
order_num = int(entry_data["order_num"])
# Clean up any XML-unsafe characters and wrap each value in
# quotes.
for k, v in entry_data.items():
new_v = xml.sax.saxutils.quoteattr(unicode(v))
if new_v != v:
entry_data[k] = new_v
# TODO(trow): For now, we build a list of all entries so that
# we can fix the ordering --- that is because Traktor
# idiotically chooses to order tracks based on the order they
# appear in the NML file, not based on the track numbering.
entry_key = (au_file.album_id, order_num)
self._all_entries.append((entry_key, entry_data))
# TODO(trow): This is how we should do it!
#self._out_fh.write(_NML_ENTRY % entry_data)
self.num_entries += 1
def close(self):
# TODO(trow): We shouldn't need to build up a big in-memory
# data structure here!
self._all_entries.sort()
for _, entry_data in self._all_entries:
self._out_fh.write(_NML_ENTRY % entry_data)
# Write out the suffix.
self._out_fh.write(_NML_SUFFIX)
# Write out the prefix with the correct number of entries.
self._out_fh.seek(0)
self._out_fh.write(_NML_PREFIX % self.num_entries)
# Note: does not close the underlying file object!
|
python
|
input = open('input/input12.txt').readlines()
plants = []
ZERO = 5
BASE_GEN_COUNT = 0
FINAL_GEN_COUNT = 50000000000
will_grow = set()
wont_grow = set()
for line in input:
line = line.strip()
if line.startswith('initial'):
pots = list(line.split(' ')[2])
BASE_GEN_COUNT = len(pots)
plants = ['.' for p in range(len(pots) + BASE_GEN_COUNT + ZERO)]
for pot in range(len(pots)):
plants[ZERO + pot] = pots[pot]
elif line.endswith('#'):
will_grow.add(line.split(' => ')[0])
elif line.endswith('.'):
wont_grow.add(line.split(' => ')[0])
def get_plant_total():
total = 0
for i in range(len(plants)):
if plants[i] == '#': total += (i - ZERO)
return total
# I observed through experimentation that the change delta stayed the
# same after the 100th generation, so it is only necessary to calculate
# up to there. I'm guessing it is 100 because that is the length of the
# initial string. Surely there is an official name for this statistical
# pattern, but I don't know what it is...
plant_count = get_plant_total()
last_delta = 0
for g in range(BASE_GEN_COUNT):
if g == 20: print('Solution 12.1:', plant_count)
new_gen = ['.' for i in range(len(plants))]
for p in range(len(plants) - 5):
segment = ''.join(plants[p:p+5])
if segment in will_grow:
new_gen[p+2] = '#'
elif segment in wont_grow:
new_gen[p+2] = '.'
plants = new_gen
new_plant_count = get_plant_total()
new_delta = new_plant_count - plant_count
if last_delta != new_delta:
# print(g, 'to', g+1, 'delta from', last_delta, 'to', new_delta)
last_delta = new_delta
plant_count = new_plant_count
print('Solution 12.2:', plant_count + ((FINAL_GEN_COUNT - BASE_GEN_COUNT)*last_delta))
|
python
|
SOCIAL_AUTH_GITHUB_KEY = '0ec5adf60f9d0db84213'
SOCIAL_AUTH_GITHUB_SECRET = 'c4b6cd88aac6b4515c5b396be2727b21ee54725e'
SOCIAL_AUTH_LOGIN_URL = '/login'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login-error/'
SESSION_COOKIE_DOMAIN = '.localhost.be'
USE_X_FORWARDED_HOST = True
SOCIAL_AUTH_SANITIZE_REDIRECTS = False
DOCKER_API = '1.21'
|
python
|
from functools import lru_cache
import os
from time import time
import json
from io import StringIO
import markdown
import jwt
import requests
import github
import datetime
import logging
from time import sleep
from ruamel.yaml import YAML
yaml = YAML()
ZOOM_API = "https://api.zoom.us/v2/"
SPEAKERS_CORNER_USER_ID = "D0n5UNEHQiajWtgdWLlNSA"
VSF_USER_ID = "iJFotmmLRgOHJrTe9MKHRA"
TALKS_FILE = "talks.yml"
MAILGUN_BASE_URL = "https://api.eu.mailgun.net/v3/"
MAILGUN_DOMAIN = "mail.virtualscienceforum.org/"
class CollectExceptions:
def __init__(self):
self.exceptions = []
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
self.exceptions.append([exc_type, exc_value])
return True
def reraise(self):
if not self.exceptions:
return
elif len(self.exceptions) == 1:
raise RuntimeError() from self.exceptions[0][1]
raise RuntimeError([
exc_value
for _, exc_value in self.exceptions
])
def wait_until(minute):
"""Sleep until a specified minute of the hour starts."""
now = datetime.datetime.now(tz=datetime.timezone.utc)
desired = now.replace(minute=minute, second=0, microsecond=0)
if desired < now:
desired += datetime.timedelta(hours=1)
logging.info(f"Sleeping until {desired}")
sleep((desired - now).total_seconds())
def make_zoom_headers(duration: float=100) -> callable:
expiration = time() + duration
def zoom_headers() -> dict:
zoom_api_key = os.getenv("ZOOM_API_KEY")
zoom_api_secret = os.getenv("ZOOM_API_SECRET")
nonlocal expiration
if time() > expiration:
expiration = time() + duration
token = jwt.encode(
# Create a payload of the token containing API Key & expiration time
{"iss": zoom_api_key, "exp": expiration},
zoom_api_secret,
algorithm='HS256'
)
return {'authorization': f'Bearer {token}', 'content-type': 'application/json'}
return zoom_headers
zoom_headers = make_zoom_headers()
def vsf_repo():
gh = github.Github(os.getenv("VSF_BOT_TOKEN"))
return gh.get_repo("virtualscienceforum/virtualscienceforum")
def talks_data(ref="master", repo=None):
if repo is None:
repo = vsf_repo()
# Read the talks file
talks_data = repo.get_contents(TALKS_FILE, ref=ref)
talks = yaml.load(StringIO(talks_data.decoded_content.decode()))
for talk in talks:
# Workaround against issues
# https://sourceforge.net/p/ruamel-yaml/tickets/365/
# https://sourceforge.net/p/ruamel-yaml/tickets/366
# Note that we rely on the current behavior that returns UTC time
talk["time"] = datetime.datetime.fromtimestamp(
talk["time"]
.replace(tzinfo=datetime.timezone.utc)
.timestamp(),
tz=datetime.timezone.utc
)
return talks, talks_data.sha
def zoom_request(method: callable, *args, **kwargs):
"""A minimal wrapper around requests for querying zoom API with error handling"""
response = method(*args, **kwargs, headers=zoom_headers())
if response.status_code > 299:
raise RuntimeError(response.content.decode())
if response.content:
return response.json()
def speakers_corner_user_id() -> str:
users = zoom_request(requests.get, ZOOM_API + "users")["users"]
sc_user_id = next(
u["id"] for u in users
if u["first_name"] == "Speakers'" and u["last_name"] == "Corner"
)
return sc_user_id
def all_meetings(user_id) -> list:
"""Return all meetings by a user.
Handles pagination, and adds ``live: True`` to a meeting that is running (if any).
"""
meetings = []
next_page_token = ""
while True:
meetings_page = zoom_request(
requests.get,
f"{ZOOM_API}users/{user_id}/meetings",
params={"type": "scheduled", "page_size": 300, "next_page_token": next_page_token}
)
meetings += meetings_page["meetings"]
next_page_token = meetings_page["next_page_token"]
if not next_page_token:
break
live_meetings = zoom_request(
requests.get,
f"{ZOOM_API}users/{user_id}/meetings",
params={"type": "live", "page_size": 300}
)["meetings"]
if live_meetings:
for meeting in meetings:
if meeting["id"] == live_meetings[0]["id"]:
meeting["live"] = True
return meetings
def api_query(method, endpoint, **params):
"""A simple wrapper around mailgun API query"""
response = method(
MAILGUN_BASE_URL + endpoint,
auth=("api", os.getenv("MAILGUN_API_KEY")),
**params
)
try:
result = response.json()
except ValueError:
result = response.text
if response.status_code > 299: # Not OK
raise RuntimeError(result)
return result
def markdown_to_email(text: str) -> str:
html = markdown.markdown(text)
return (
'<table cellspacing="0" cellpadding="0" border="0"><tr>'
'<td style="word-break:normal;border-collapse:collapse!important;max-width:600px">'
f'{html}</td></tr></table>'
)
def markdown_to_plain(text: str) -> str:
return text.replace('[', '').replace(']', ' ').replace(' \n', '\n').replace('*', '')
def meeting_registrants(zoom_meeting_id: int) -> dict:
registrants = []
next_page_token = ""
while True:
response = requests.get(
f"https://api.zoom.us/v2/meetings/{zoom_meeting_id}/registrants",
headers=zoom_headers(),
params={"next_page_token": next_page_token}
)
# Registration was not enabled for this meeting
if response.status_code == 400:
return []
response = response.json()
registrants += response["registrants"]
next_page_token = response["next_page_token"]
if not next_page_token:
break
registrants = [
{**i, **{q["title"]: q["value"] for q in i.pop("custom_questions")}}
for i in registrants
]
return registrants
def send_to_participants(
template: str,
subject: str,
talk: dict,
from_email: str,
):
"""
Send an email to meeting participants.
template : jinja2.Template
Email body, variables are keys of ``talk`` (see talks yaml).
subject : str
Email subject, format string expecting as variables keys of ``talk`` (see talks yaml).
talk : dict
Dictionary corresponding to an entry in the talks yaml file.
other_parameters :
Keyword arguments to be passed to format the templates.
"""
message = template.render(**talk)
registrants = meeting_registrants(talk['zoom_meeting_id'])
# Defensively filter out invalid registrants
# See https://github.com/virtualscienceforum/automation/issues/27
registrants = [r for r in registrants if "email" in r and "join_url" in r]
data = {
"from": from_email,
"to": list({
f"{r.get('first_name', '')} {r.get('last_name', '')} <{r['email']}>"
for r in registrants
}),
"subject": subject.format(**talk),
"text": markdown_to_plain(message),
"html": markdown_to_email(message),
"recipient-variables": json.dumps(
{r["email"]: {"join_url": r["join_url"]}
for r in registrants}
),
}
return api_query(
requests.post,
MAILGUN_DOMAIN + "messages",
data=data
)
|
python
|
# Clase 24. Curso Píldoras Informáticas.
# Control de Flujo. POO1.
# Clase de Teoría.
# Lenguajes orientados a objetos: C++, Java, VisualNet...
# Atributos/Propiedades: elementos que definen las clases y objetos.
# Ventajas POO:
# Se pueden establecer Módulos.
# Código muy reciclable (esto con Fortran y otros lenguajes no OO no es posible). Herencia.
# Existe tratamiento de excepciones.
# Tiene la ventaja del encapsulamiento.
|
python
|
#coding=utf-8
import tensorflow as tf
import wmodule
import basic_tftools as btf
import wml_tfutils as wmlt
from object_detection2.datadef import EncodedData
import tfop
import functools
from object_detection2.datadef import *
import numpy as np
import wnn
import wsummary
from .build import HEAD_OUTPUTS
import object_detection2.wlayers as odl
from object_detection2.modeling.matcher import Matcher
slim = tf.contrib.slim
@HEAD_OUTPUTS.register()
class BoxFreeOutputs(wmodule.WChildModule):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, cfg,parent,box2box_transform, pred_class_logits, pred_proposal_deltas,proposals:EncodedData,
pred_iou_logits=None,
**kwargs
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals: When training it's EncodedData, when inference, it's ProposalsData
"""
super().__init__(cfg,parent,**kwargs)
self.pred_class_logits = pred_class_logits
if self.is_training:
gt_logits_i = proposals.gt_object_logits
'''
gt_logits_i's shape is [batch_size,box_nr]
'''
self.gt_classes = tf.reshape(gt_logits_i,[-1])
def _log_accuracy(self):
"""
Log the accuracy metrics to EventStorage.
"""
accuracy = wnn.accuracy_ratio(logits=self.pred_class_logits,labels=self.gt_classes)
tf.summary.scalar("fast_rcnn/accuracy",accuracy)
def softmax_cross_entropy_loss(self):
"""
Compute the softmax cross entropy loss for box classification.
Returns:
scalar Tensor
"""
self._log_accuracy()
wsummary.variable_summaries_v2(self.gt_classes,"gt_classes")
wsummary.variable_summaries_v2(self.pred_class_logits,"pred_class_logits")
if self.cfg.MODEL.ROI_HEADS.POS_LABELS_THRESHOLD>1e-3:
with tf.name_scope("modify_gtclasses"):
threshold = self.cfg.MODEL.ROI_HEADS.POS_LABELS_THRESHOLD
scores = tf.reshape(self.proposals[ED_SCORES],[-1])
gt_classes = self.gt_classes
gt_classes = tf.where(tf.greater(scores,threshold),gt_classes,tf.zeros_like(gt_classes))
classes_loss = tf.losses.sparse_softmax_cross_entropy(logits=self.pred_class_logits, labels=gt_classes,
loss_collection=None,
reduction=tf.losses.Reduction.MEAN)
else:
classes_loss = tf.losses.sparse_softmax_cross_entropy(logits=self.pred_class_logits, labels=self.gt_classes,
loss_collection=None,
reduction=tf.losses.Reduction.MEAN)
wsummary.histogram_or_scalar(classes_loss,"fast_rcnn/classes_loss")
return classes_loss*self.cfg.MODEL.ROI_HEADS.BOX_CLS_LOSS_SCALE
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
loss = {
"fastrcnn_loss_cls": self.softmax_cross_entropy_loss(),
}
return loss
def inference(self, score_thresh,
proposal_boxes=None,scores=None):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
scores:[batch_size,box_nr,num_classes+1]
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
with tf.name_scope("fast_rcnn_outputs_inference"):
if scores is None:
probability = tf.nn.softmax(self.pred_class_logits)
else:
probability = scores
probability = probability[...,1:] #删除背景
probability,labels = tf.nn.top_k(probability,k=1)
probability = tf.squeeze(probability,axis=-1)
labels = tf.squeeze(labels,axis=-1)+1 #加回删除的背景
size = btf.combined_static_and_dynamic_shape(probability)[0]
res_indices = tf.range(size)
mask = tf.greater(probability,score_thresh)
length = tf.reduce_sum(tf.cast(mask,tf.int32),axis=-1,keepdims=False)
probability = tf.boolean_mask(probability,mask)
boxes = tf.boolean_mask(proposal_boxes,mask)
labels = tf.boolean_mask(labels,mask)
res_indices = tf.boolean_mask(res_indices,mask)
probability, indices= tf.nn.top_k(probability, k=tf.shape(probability)[0])
labels = tf.expand_dims(tf.gather(labels, indices),axis=0)
boxes = tf.expand_dims(tf.gather(boxes, indices),axis=0)
res_indices = tf.expand_dims(tf.gather(res_indices, indices),axis=0)
probability = tf.expand_dims(probability,axis=0)
return {RD_PROBABILITY:probability,RD_BOXES:boxes,RD_LABELS:labels,RD_LENGTH:length,RD_INDICES:res_indices}
|
python
|
from sqlalchemy import Integer, Text, DateTime, func, Boolean, text
from models.database_models import Base, Column
class Comment(Base):
__tablename__ = "comment"
id = Column(Integer, primary_key=True, )
user_id = Column(Integer, nullable=False, comment="评论用户的 ID")
post_id = Column(Integer, nullable=False, comment="Post 文章的 ID")
content = Column(Text, nullable=False, comment="用户的评论")
create_time = Column(DateTime, server_default=func.now(), comment="创建时间")
update_time = Column(DateTime, server_default=func.now(), onupdate=func.now(), comment="更新时间")
deleted = Column(Boolean, default=False, server_default=text('0'), nullable=False, comment="该项目是否被删除")
|
python
|
#!/usr/bin/env python
# Copyright (C) 2009 Chia-I Wu <[email protected]>
# All Rights Reserved.
#
# This is based on extension_helper.py by Ian Romanick.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import license
import gl_XML
def get_function_spec(func):
sig = ""
# derive parameter signature
for p in func.parameterIterator():
if p.is_padding:
continue
# FIXME: This is a *really* ugly hack. :(
tn = p.type_expr.get_base_type_node()
if p.is_pointer():
sig += 'p'
elif tn.integer:
sig += 'i'
elif tn.size == 4:
sig += 'f'
else:
sig += 'd'
spec = [sig]
for ent in func.entry_points:
spec.append("gl" + ent)
# spec is terminated by an empty string
spec.append('')
return spec
class PrintGlRemap(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "remap_helper.py (from Mesa)"
self.license = license.bsd_license_template % ("Copyright (C) 2009 Chia-I Wu <[email protected]>", "Chia-I Wu")
return
def printRealHeader(self):
print '#include "main/dispatch.h"'
print '#include "main/remap.h"'
print ''
return
def printBody(self, api):
pool_indices = {}
print '/* this is internal to remap.c */'
print '#ifndef need_MESA_remap_table'
print '#error Only remap.c should include this file!'
print '#endif /* need_MESA_remap_table */'
print ''
print ''
print 'static const char _mesa_function_pool[] ='
# output string pool
index = 0;
for f in api.functionIterateAll():
pool_indices[f] = index
spec = get_function_spec(f)
# a function has either assigned offset, fixed offset,
# or no offset
if f.assign_offset:
comments = "will be remapped"
elif f.offset > 0:
comments = "offset %d" % f.offset
else:
comments = "dynamic"
print ' /* _mesa_function_pool[%d]: %s (%s) */' \
% (index, f.name, comments)
for line in spec:
print ' "%s\\0"' % line
index += len(line) + 1
print ' ;'
print ''
print '/* these functions need to be remapped */'
print 'static const struct gl_function_pool_remap MESA_remap_table_functions[] = {'
# output all functions that need to be remapped
# iterate by offsets so that they are sorted by remap indices
for f in api.functionIterateByOffset():
if not f.assign_offset:
continue
print ' { %5d, %s_remap_index },' \
% (pool_indices[f], f.name)
print ' { -1, -1 }'
print '};'
print ''
return
def _parser():
"""Parse input options and return a namsepace."""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--filename',
default="gl_API.xml",
metavar="input_file_name",
dest='file_name',
help="An xml description file.")
return parser.parse_args()
def main():
"""Main function."""
args = _parser()
api = gl_XML.parse_GL_API(args.file_name)
printer = PrintGlRemap()
printer.Print(api)
if __name__ == '__main__':
main()
|
python
|
# -*- coding: utf-8 -*-
# see LICENSE.rst
# ----------------------------------------------------------------------------
#
# TITLE : Code for the Examples
# AUTHOR : Nathaniel Starkman
# PROJECT : TrackStream
#
# ----------------------------------------------------------------------------
"""Examples Code."""
__author__ = "Nathaniel Starkman"
__copyright__ = "Copyright 2012+"
__license__ = "BSD3"
__maintainer__ = "Nathaniel Starkman"
__all__ = [
"get_transform_matrix",
]
##############################################################################
# IMPORTS
# LOCAL
from .coordinates import get_transform_matrix
##############################################################################
# END
|
python
|
def keep_evens(nums):
new_seq = filter(lambda num: num % 2 == 0, nums)
return list(new_seq)
print(keep_evens([3, 4, 6, 7, 0, 1]))
# Saída [4, 6, 0]
'''
1. Write code to assign to the variable filter_testing all the elements in lst_check that have a w in them using filter.
'''
lst_check = ['plums', 'watermelon', 'kiwi', 'strawberries', 'blueberries', 'peaches', 'apples', 'mangos', 'papaya']
filter_testing = filter(lambda word: 'w' in word, lst_check)
print(list(filter_testing))
# Saída ['watermelon', 'kiwi', 'strawberries']
'''
2. Using filter, filter lst so that it only contains words containing the letter “o”. Assign to variable lst2. Do not hardcode this.
'''
lst = ["witch", "halloween", "pumpkin", "cat", "candy", "wagon", "moon"]
lst2 = filter(lambda word: 'o' in word, lst)
print(lst2)
'''
3. Below, we have provided a list of strings called countries. Use filter to produce a list called b_countries that only contains the strings from countries that begin with B.
'''
countries = ['Canada', 'Mexico', 'Brazil', 'Chile', 'Denmark', 'Botswana', 'Spain', 'Britain', 'Portugal', 'Russia', 'Thailand', 'Bangladesh', 'Nigeria', 'Argentina', 'Belarus', 'Laos', 'Australia', 'Panama', 'Egypt', 'Morocco', 'Switzerland', 'Belgium']
b_countries = filter(lambda c: c[0] == 'B', countries)
print(list(b_countries))
# ['Brazil', 'Botswana', 'Britain', 'Bangladesh', 'Belarus', 'Belgium']
|
python
|
from django.dispatch import Signal
signup_complete = Signal(providing_args=["user",])
activation_complete = Signal(providing_args=["user",])
confirmation_complete = Signal(providing_args=["user","old_email"])
password_complete = Signal(providing_args=["user",])
order_complete = Signal(providing_args=["user",])
email_change = Signal(providing_args=["user","prev_email","new_email"])
profile_change = Signal(providing_args=["user",])
account_signin = Signal(providing_args=["user",])
account_signout = Signal(providing_args=["user",])
|
python
|
#!/usr/bin/env python3
from setuptools import setup
setup(
name='lilist',
version='0.1.0',
description='A linear interpolation list class',
url='http://github.com/MatthewScholefield/lilist',
author='Matthew Scholefield',
author_email='[email protected]',
license='MIT',
py_modules=[
'lilist'
]
)
|
python
|
from unittest import TestCase
import json
import attr
from marshmallow_helpers import RegisteredEnum, attr_with_schema
def enum_to_schema(enum_cls):
@attr_with_schema(register_as_scheme=True, strict=True)
@attr.s(auto_attribs=True)
class MyEnum:
enum: enum_cls
return MyEnum.schema
def enum_to_field(enum_cls):
return enum_to_schema(enum_cls)._declared_fields['enum']
class MyIntEnum(int, RegisteredEnum):
a = 1
b = 2
c = 3
class MyStrEnum(RegisteredEnum):
a = "A"
b = "B"
c = "C"
class MyTupleEnum(tuple, RegisteredEnum):
a = (1, "a")
b = (2, "b")
c = (3, "c")
class MyByKeyIntEnum(int, RegisteredEnum):
__by_value__ = False
a = 1
b = 2
c = 3
class MyLoadByKeyIntEnum(int, RegisteredEnum):
__load_by_value__ = False
a = 1
b = 2
c = 3
class MyDumpByKeyIntEnum(int, RegisteredEnum):
__dump_by_value__ = False
a = 1
b = 2
c = 3
class EnumTest(TestCase):
def test_enum_metadata(self):
self.assertListEqual(
enum_to_field(MyIntEnum).metadata.get('enum', []),
[1, 2, 3])
self.assertListEqual(
enum_to_field(MyStrEnum).metadata.get('enum', []),
["A", "B", "C"])
self.assertListEqual(
enum_to_field(MyTupleEnum).metadata.get('enum', []),
[(1, "a"), (2, "b"), (3, "c")])
self.assertListEqual(
enum_to_field(MyByKeyIntEnum).metadata.get('enum', []),
["a", "b", "c"])
self.assertListEqual(
enum_to_field(MyLoadByKeyIntEnum).metadata.get('enum', []),
["a", "b", "c"])
self.assertListEqual(
enum_to_field(MyDumpByKeyIntEnum).metadata.get('enum', []),
[1, 2, 3])
class SchemaTest(TestCase):
def test_loads(self):
self.assertEqual(
enum_to_schema(MyIntEnum)().loads('{"enum": 1}').enum,
MyIntEnum.a)
self.assertEqual(
enum_to_schema(MyStrEnum)().loads('{"enum": "A"}').enum,
MyStrEnum.a)
self.assertEqual(
enum_to_schema(MyTupleEnum)().loads('{"enum": [1, "a"]}').enum,
MyTupleEnum.a)
self.assertEqual(
enum_to_schema(MyByKeyIntEnum)().loads('{"enum": "a"}').enum,
MyByKeyIntEnum.a)
self.assertEqual(
enum_to_schema(MyLoadByKeyIntEnum)().loads('{"enum": "a"}').enum,
MyLoadByKeyIntEnum.a)
self.assertEqual(
enum_to_schema(MyDumpByKeyIntEnum)().loads('{"enum": 1}').enum,
MyDumpByKeyIntEnum.a)
def test_dumps(self):
self.assertEqual(
enum_to_schema(MyIntEnum)().dumps({"enum": MyIntEnum.a}),
json.dumps({"enum": 1}))
self.assertEqual(
enum_to_schema(MyStrEnum)().dumps({"enum": MyStrEnum.a}),
json.dumps({"enum": "A"}))
self.assertEqual(
enum_to_schema(MyTupleEnum)().dumps({"enum": MyTupleEnum.a}),
json.dumps({"enum": (1, "a")}))
self.assertEqual(
enum_to_schema(MyByKeyIntEnum)().dumps({"enum": MyByKeyIntEnum.a}),
json.dumps({"enum": "a"}))
self.assertEqual(
enum_to_schema(MyLoadByKeyIntEnum)().dumps(
{"enum": MyLoadByKeyIntEnum.a}),
json.dumps({"enum": 1}))
self.assertEqual(
enum_to_schema(MyDumpByKeyIntEnum)().dumps(
{"enum": MyDumpByKeyIntEnum.a}),
json.dumps({"enum": "a"}))
|
python
|
from flask import Flask
from flask_cors import CORS
import json, sys, os, base64
app = Flask(__name__)
CORS(app)
import logging
logging.getLogger("werkzeug").setLevel(logging.ERROR)
@app.route("/set_contest/<contestname>/<userhash>")
def set_contest(contestname, userhash):
print(base64.b64decode(contestname.replace("-", "/")))
if userhash == serverhash:
with open("static/contest.txt", "w") as f:
f.write("".join(map(chr, base64.b64decode(contestname.replace("-", "/")))))
return ""
@app.route("/current-contest")
def getCurrentContest():
with open("static/contest.txt", "r") as f:
return f.read().strip()
def contestIsOngoing():
return getCurrentContest() != ""
@app.route("/problems")
def serveProblems():
return json.dumps(getProblems())
def getProblems():
try:
return sorted(os.listdir("static/contests/%s/files" % getCurrentContest()))
except:
return []
@app.route("/problem/<int:id>")
def problem(id):
with open("static/contests/%s/files/" % getCurrentContest() + getProblems()[id], "r") as f:
return f.read()
def fullname(filename):
with open("static/contests/%s/files/" % getCurrentContest() + filename, "r",encoding='utf8') as f:
return f.read().split("\n")[0][5:-4]
@app.route("/fullnames")
def getFullNames():
return json.dumps(list(map(fullname, getProblems())))
@app.route("/data/<name>")
def getData(name):
contest = getCurrentContest()
data = getattr(getattr(__import__("static.contests.%s.Data.%s" % (contest, name)).contests, contest).Data, name)
return json.dumps({
"inputs": data.inputs,
"outputs": data.outputs,
"timelimit": data.timelimit,
"points": data.points
})
if __name__ == "__main__":
if len(sys.argv) >= 2:
try:
port = int(sys.argv[1])
except:
port = 5005
else:
port = 5005
if len(sys.argv) >= 3 and not sys.argv[2].startswith("--"):
serverhash = sys.argv[2]
else:
serverhash = "7d509328bd69ef7406baf28bd9897c0bf724d8d716b014d0f95f2e8dd9c43a06"
app.run(host = "0.0.0.0", port = port, debug = "--debug" in sys.argv)
|
python
|
from operator import mul
numstr = '73167176531330624919225119674426574742355349194934\
96983520312774506326239578318016984801869478851843\
85861560789112949495459501737958331952853208805511\
12540698747158523863050715693290963295227443043557\
66896648950445244523161731856403098711121722383113\
62229893423380308135336276614282806444486645238749\
30358907296290491560440772390713810515859307960866\
70172427121883998797908792274921901699720888093776\
65727333001053367881220235421809751254540594752243\
52584907711670556013604839586446706324415722155397\
53697817977846174064955149290862569321978468622482\
83972241375657056057490261407972968652414535100474\
82166370484403199890008895243450658541227588666881\
16427171479924442928230863465674813919123162824586\
17866458359124566529476545682848912883142607690042\
24219022671055626321111109370544217506941658960408\
07198403850962455444362981230987879927244284909188\
84580156166097919133875499200524063689912560717606\
05886116467109405077541002256983155200055935729725\
71636269561882670428252483600823257530420752963450'
def adjacencyProduct(n):
maxprod = -1
for i in range(len(numstr)-n):
intstrs = list(numstr[i:i+n])
ints = list(map(int, intstrs))
thisprod = reduce(mul, ints, 1)
if thisprod > maxprod:
maxprod = thisprod
return maxprod
if __name__=="__main__":
n = 13
mp = adjacencyProduct(n)
print(mp) #23514624000
|
python
|
import asyncio
import unittest.mock
import pytest
START = object()
END = object()
RETVAL = object()
@pytest.fixture
def mock():
return unittest.mock.Mock(return_value=RETVAL)
@pytest.fixture
async def async_fixture(mock):
return await asyncio.sleep(0.1, result=mock(START))
@pytest.mark.asyncio
async def test_async_fixture(async_fixture, mock):
assert mock.call_count == 1
assert mock.call_args_list[-1] == unittest.mock.call(START)
assert async_fixture is RETVAL
|
python
|
r = float(input('Quanto dinheiro você tem na carteira?: R$'))
print('Com R${:.2f} você pode comprar US${:.2f}'.format(r, (r/3.27)))
|
python
|
"""
XMLAction module.
"""
from pineboolib.core.utils import logging
import os.path
from pineboolib.core.utils.struct import ActionStruct
from .utils.path import _path, coalesce_path
from typing import Optional, Any, Union, TYPE_CHECKING
if TYPE_CHECKING:
from pineboolib.fllegacy.flaction import FLAction # noqa: F401
from pineboolib.fllegacy.flformdb import FLFormDB
from pineboolib.fllegacy.flformrecorddb import FLFormRecordDB
from .moduleactions import ModuleActions # noqa: F401
from .database.pnsqlcursor import PNSqlCursor # noqa: F401
class XMLAction(ActionStruct):
"""
Information related to actions specified in XML modules.
"""
logger = logging.getLogger("main.XMLAction")
mod: Optional["ModuleActions"]
alias: str
def __init__(self, *args, project, name=None, **kwargs) -> None:
"""
Constructor.
"""
super(XMLAction, self).__init__(*args, **kwargs)
self.mod = None
self.project = project
if not self.project:
raise ValueError("XMLActions must belong to a project")
self.form = self._v("form")
self.name = name or self._rv("name") # Mandatory
self.description = self._v("description")
self.scriptform = self._v("scriptform")
self.table = self._v("table")
self.mainform = self._v("mainform")
self.mainscript = self._v("mainscript")
self.formrecord = self._v("formrecord")
self.scriptformrecord = self._v("scriptformrecord")
self.mainform_widget: Optional[FLFormDB] = None
self.formrecord_widget: Optional[FLFormRecordDB] = None
self._loaded = False
def loadRecord(self, cursor: Optional["PNSqlCursor"]) -> "FLFormRecordDB":
"""
Load FLFormRecordDB by default.
@param cursor. Asigna un cursor al FLFormRecord
@return widget con form inicializado
"""
self._loaded = getattr(self.formrecord_widget, "_loaded", False)
if not self._loaded:
if self.formrecord_widget and getattr(self.formrecord_widget, "widget", None):
self.formrecord_widget.widget.doCleanUp()
# self.formrecord_widget.widget = None
self.logger.debug("Loading record action %s . . . ", self.name)
if self.project.DGI.useDesktop():
# FIXME: looks like code duplication. Bet both sides of the IF do the same.
self.formrecord_widget = self.project.conn.managerModules().createFormRecord(
self, None, cursor, None
)
else:
# self.script = getattr(self, "script", None)
# if isinstance(self.script, str) or self.script is None:
script = self.load_script(self.scriptformrecord, None)
self.formrecord_widget = script.form
if self.formrecord_widget is None:
raise Exception("After loading script, no form was loaded")
self.formrecord_widget.widget = self.formrecord_widget
self.formrecord_widget.iface = self.formrecord_widget.widget.iface
self.formrecord_widget._loaded = True
# self.formrecord_widget.setWindowModality(Qt.ApplicationModal)
self.logger.debug(
"End of record action load %s (iface:%s ; widget:%s)",
self.name,
getattr(self.formrecord_widget, "iface", None),
getattr(self.formrecord_widget, "widget", None),
)
if self.formrecord_widget is None:
raise Exception("Unexpected: No formrecord loaded")
if cursor:
self.formrecord_widget.setCursor(cursor)
return self.formrecord_widget
def load(self) -> "FLFormDB":
"""
Load master form.
"""
self._loaded = getattr(self.mainform_widget, "_loaded", False)
if not self._loaded:
if self.mainform_widget is not None and getattr(self.mainform_widget, "widget", None):
self.mainform_widget.widget.doCleanUp()
if self.project.DGI.useDesktop() and hasattr(self.project.main_window, "w_"):
self.logger.info("Loading action %s (createForm). . . ", self.name)
self.mainform_widget = self.project.conn.managerModules().createForm(
action=self, parent=self.project.main_window.w_
)
else:
self.logger.info(
"Loading action %s (load_script %s). . . ", self.name, self.scriptform
)
script = self.load_script(self.scriptform, None)
self.mainform_widget = script.form # FormDBWidget FIXME: Add interface for types
if self.mainform_widget is None:
raise Exception("After loading script, no form was loaded")
self.mainform_widget.widget = self.mainform_widget
self.mainform_widget.iface = self.mainform_widget.widget.iface
self.mainform_widget._loaded = True
self.logger.debug(
"End of action load %s (iface:%s ; widget:%s)",
self.name,
getattr(self.mainform_widget, "iface", None),
getattr(self.mainform_widget, "widget", None),
)
if self.mainform_widget is None:
raise Exception("Unexpected: No form loaded")
return self.mainform_widget
def execMainScript(self, name) -> None:
"""
Execute function for main action.
"""
a = self.project.conn.manager().action(name)
if not a:
self.logger.warning("No existe la acción %s", name)
return
self.project.call("%s.main" % a.name(), [], None, False)
def formRecordWidget(self) -> "FLFormRecordDB":
"""
Return formrecord widget.
This is needed because sometimes there isn't a FLFormRecordDB initialized yet.
@return wigdet del formRecord.
"""
if not getattr(self.formrecord_widget, "_loaded", None):
self.loadRecord(None)
if self.formrecord_widget is None:
raise Exception("Unexpected: No form loaded")
return self.formrecord_widget
# FIXME: cursor is FLSqlCursor but should be something core, not "FL". Also, an interface
def openDefaultFormRecord(self, cursor: "PNSqlCursor", wait: bool = True) -> None:
"""
Open FLFormRecord specified on defaults.
@param cursor. Cursor a usar por el FLFormRecordDB
"""
self.logger.info("Opening default formRecord for Action %s", self.name)
w = self.loadRecord(cursor)
# w.init()
if w:
if self.project.DGI.localDesktop():
if wait:
w.show_and_wait()
else:
w.show()
def openDefaultForm(self) -> None:
"""
Open Main FLForm specified on defaults.
"""
self.logger.info("Opening default form for Action %s", self.name)
w = self.load()
if w:
if self.project.DGI.localDesktop():
w.show()
def execDefaultScript(self) -> None:
"""
Execute script specified on default.
"""
self.logger.info("Executing default script for Action %s", self.name)
script = self.load_script(self.scriptform, None)
self.mainform_widget = script.form
if self.mainform_widget is None:
raise Exception("Unexpected: No form loaded")
if hasattr(self.mainform_widget, "iface"):
if self.mainform_widget.iface is not None:
self.mainform_widget.iface.main()
else:
self.mainform_widget.main()
def load_script(
self, scriptname: Optional[str], parent: Optional["FLFormDB"] = None
) -> Any: # returns loaded script
"""
Transform QS script into Python and starts it up.
@param scriptname. Nombre del script a convertir
@param parent. Objecto al que carga el script, si no se especifica es a self.script
"""
# FIXME: Parent logic is broken. We're loading scripts to two completely different objects.
from importlib import machinery
if scriptname:
scriptname = scriptname.replace(".qs", "")
self.logger.debug(
"Loading script %s of %s for action %s", scriptname, parent, self.name
)
else:
self.logger.info("No script to load on %s for action %s", parent, self.name)
parent_object = parent
action_: Union[XMLAction, "FLAction"] # XMLAction / FLAction
if parent is None:
action_ = self
else:
possible_flaction_ = getattr(parent, "_action", None)
if not isinstance(possible_flaction_, XMLAction):
from .utils.convert_flaction import convertFLAction # type: ignore
action_ = convertFLAction(possible_flaction_)
elif possible_flaction_ is not None:
action_ = possible_flaction_
python_script_path = None
# primero default, luego sobreescribimos
from pineboolib.qsa import emptyscript # type: ignore
script_loaded: Any = emptyscript
if scriptname is None:
script_loaded.form = script_loaded.FormInternalObj(
action=action_, project=self.project, parent=parent_object
)
if parent:
parent.widget = script_loaded.form
parent.iface = parent.widget.iface
return script_loaded
script_path_py = self.project.DGI.alternative_script_path("%s.py" % scriptname)
if script_path_py is None:
script_path_qs = _path("%s.qs" % scriptname, False)
script_path_py = coalesce_path("%s.py" % scriptname, "%s.qs.py" % scriptname, None)
mng_modules = self.project.conn.managerModules()
if mng_modules.staticBdInfo_ and mng_modules.staticBdInfo_.enabled_:
from pineboolib.fllegacy.flmodulesstaticloader import FLStaticLoader # FIXME
ret_py = FLStaticLoader.content(
"%s.qs.py" % scriptname, mng_modules.staticBdInfo_, True
) # Con True solo devuelve el path
if ret_py:
script_path_py = ret_py
else:
ret_qs = FLStaticLoader.content(
"%s.qs" % scriptname, mng_modules.staticBdInfo_, True
) # Con True solo devuelve el path
if ret_qs:
script_path_qs = ret_qs
if script_path_py is not None:
script_path = script_path_py
self.logger.info("Loading script PY %s . . . ", scriptname)
if not os.path.isfile(script_path):
raise IOError
try:
self.logger.debug(
"Cargando %s : %s ",
scriptname,
script_path.replace(self.project.tmpdir, "tempdata"),
)
loader = machinery.SourceFileLoader(scriptname, script_path)
script_loaded = loader.load_module() # type: ignore
except Exception:
self.logger.exception("ERROR al cargar script PY para la accion %s:", action_.name)
elif script_path_qs:
script_path = script_path_qs
self.project.parseScript(script_path)
self.logger.info("Loading script QS %s . . . ", scriptname)
python_script_path = (script_path + ".xml.py").replace(".qs.xml.py", ".qs.py")
try:
self.logger.debug(
"Cargando %s : %s ",
scriptname,
python_script_path.replace(self.project.tmpdir, "tempdata"),
)
loader = machinery.SourceFileLoader(scriptname, python_script_path)
script_loaded = loader.load_module() # type: ignore
except Exception:
self.logger.exception("ERROR al cargar script QS para la accion %s:", action_.name)
script_loaded.form = script_loaded.FormInternalObj(action_, self.project, parent_object)
if parent_object and parent:
parent_object.widget = script_loaded.form
if getattr(parent_object.widget, "iface", None):
parent_object.iface = parent.widget.iface
return script_loaded
def unknownSlot(self) -> None:
"""Log error for actions with unknown slots or scripts."""
self.logger.error("Executing unknown script for Action %s", self.name)
|
python
|
from datetime import datetime
from pathlib import Path
import unittest
import pandas as pd
import numpy as np
import vak
import article.syntax
HERE = Path(__file__).parent
DATA_DIR = HERE.joinpath('test_data')
class TestSyntax(unittest.TestCase):
def test_date_from_cbin_filename(self):
CBIN_FILENAME = 'bf_song_repo/gy6or6/032212/gy6or6_baseline_220312_0836.3.cbin'
dt_from_cbin = article.syntax.date_from_cbin_filename(CBIN_FILENAME)
self.assertTrue(
isinstance(dt_from_cbin, datetime)
)
self.assertTrue(
dt_from_cbin.date() == datetime(2012, 3, 22, 8, 36).date()
)
self.assertTrue(
dt_from_cbin.time() == datetime(2012, 3, 22, 8, 36).time()
)
def test_make_df_trans_probs(self):
vds_list = [str(path) for path in DATA_DIR.joinpath('vds').glob('*.vds.json')]
vds_list = [vak.Dataset.load(path) for path in vds_list]
df = article.syntax.make_df_trans_probs(vds_list)
self.assertTrue(
type(df) == pd.DataFrame
)
for field in article.syntax.FIELDS_SYNTAX:
self.assertTrue(field in df.columns)
def test_get_trans_prob(self):
vds_list = sorted(
[str(path) for path in DATA_DIR.joinpath('vds').glob('*.vds.json')]
)
vds_list = [vak.Dataset.load(path) for path in vds_list]
vds_list = vds_list[:1] # just keep one
df = article.syntax.make_df_trans_probs(vds_list)
date = datetime(2012, 3, 22, 0, 0).date()
label = 'S'
label_plus_one = 'i'
p = article.syntax.get_trans_prob(df, date, label, label_plus_one)
self.assertTrue(
type(p) == float
)
self.assertTrue(
p > 0.99
)
def test_find_branch_point(self):
trans_mat = np.asarray(
[
[0., 1.0, 0., 0.],
[0., 0., 0.1, 0.9],
[0., 0., 0., 1.0],
])
labels = list('abcd')
bp_inds, bp_lbl = article.syntax.find_branch_points(trans_mat, labels)
self.assertTrue(
len(bp_inds) == 1
)
self.assertTrue(
bp_lbl == ['b']
)
if __name__ == '__main__':
unittest.main()
|
python
|
"""
The ``fine_tune.py`` file is used to continue training (or `fine-tune`) a model on a `different
dataset` than the one it was originally trained on. It requires a saved model archive file, a path
to the data you will continue training with, and a directory in which to write the results.
. code-block:: bash
$ python fine_tune.py --help
usage: fine_tune.py [-h] -s SERIALIZATION_DIR -c CONFIG_FILE_PATH -p
PRETRAINED_DIR -m PRETRAINED_MODEL_NAME
optional arguments:
-h, --help show this help message and exit
-s SERIALIZATION_DIR, --serialization_dir SERIALIZATION_DIR
Directory in which to save the model and its logs.
-c CONFIG_FILE_PATH, --config_file_path CONFIG_FILE_PATH
Path to parameter file describing the new multi-tasked
model to be fine-tuned.
-p PRETRAINED_DIR, --pretrained_dir PRETRAINED_DIR
Directory in which was saved the pre-trained model.
-m PRETRAINED_MODEL_NAME, --pretrained_model_name PRETRAINED_MODEL_NAME
Name of the weight file for the pretrained model to
fine-tune in the ``pretrained_dir``.
"""
import argparse
import itertools
import os
import json
import re
from copy import deepcopy
import torch
from typing import List, Dict, Any, Tuple
import logging
logging.basicConfig(format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO)
from hmtl.tasks import Task
from hmtl.training.multi_task_trainer import MultiTaskTrainer
from hmtl.common import create_and_set_iterators
from evaluate import evaluate
from train import train_model
from allennlp.models.model import Model
from allennlp.data import Vocabulary
from allennlp.data.iterators import DataIterator
from allennlp.commands.train import create_serialization_dir
from allennlp.common.params import Params
from allennlp.common.checks import ConfigurationError
from allennlp.nn import RegularizerApplicator
logger = logging.getLogger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--serialization_dir", required=True, help="Directory in which to save the model and its logs.", type=str)
parser.add_argument("-c", "--config_file_path", required=True, help="Path to parameter file describing the new multi-tasked model to be fine-tuned.", type=str)
parser.add_argument("-p", "--pretrained_dir", required=True, help="Directory in which was saved the pre-trained model.", type=str)
parser.add_argument("-m", "--pretrained_model_name", required=True, help="Name of the weight file for the pretrained model to fine-tune in the ``pretrained_dir``.", type=str)
args = parser.parse_args()
params = Params.from_file(params_file=args.config_file_path)
serialization_dir = args.serialization_dir
create_serialization_dir(params, serialization_dir, False)
serialization_params = deepcopy(params).as_dict(quiet=True)
with open(os.path.join(serialization_dir, "config.json"), "w") as param_file:
json.dump(serialization_params, param_file, indent=4)
task_list = []
task_keys = [key for key in params.keys() if re.search("^task_", key)]
for key in task_keys:
logger.info("Creating %s", key)
task_params = params.pop(key)
task_description = task_params.pop("task_description")
task_data_params = task_params.pop("data_params")
task = Task.from_params(params=task_description)
task_list.append(task)
_, _ = task.load_data_from_params(params=task_data_params)
vocab = Vocabulary.from_files(os.path.join(args.pretrained_dir, "vocabulary"))
logger.info("Vocabulary loaded from %s", os.path.join(args.pretrained_dir, "vocabulary"))
vocab.save_to_files(os.path.join(serialization_dir, "vocabulary"))
logger.info("Save vocabulary to file %s", os.path.join(serialization_dir, "vocabulary"))
task_list = create_and_set_iterators(params=params, task_list=task_list, vocab=vocab)
regularizer = RegularizerApplicator.from_params(params.pop("regularizer", []))
model_params = params.pop("model")
model = Model.from_params(vocab=vocab, params=model_params, regularizer=regularizer)
logger.info("Loading the pretrained model from %s", os.path.join(args.pretrained_dir, args.pretrained_model_name))
try:
pretrained_model_state_path = os.path.join(args.pretrained_dir, args.pretrained_model_name)
pretrained_model_state = torch.load(pretrained_model_state_path)
model.load_state_dict(state_dict=pretrained_model_state)
except:
raise ConfigurationError("It appears that the configuration of the pretrained model and " "the model to fine-tune are not compatible. " "Please check the compatibility of the encoders and taggers in the " "config files.")
multi_task_trainer_params = params.pop("multi_task_trainer")
trainer = MultiTaskTrainer.from_params(model=model, task_list=task_list, serialization_dir=serialization_dir, params=multi_task_trainer_params)
metrics = train_model(multi_task_trainer=trainer, recover=False)
if metrics is not None:
logging.info("Fine-tuning is finished ! Let's have a drink. It's on the house !")
|
python
|
#!/usr/bin/python3
from brownie import HuskyTokenDeployer, accounts, HuskyToken, HuskyTokenMinter, Wei
def main():
provost = accounts.load('husky')
admin = provost
is_live = False
if provost.balance() == 0:
accounts[0].transfer(provost, Wei('1 ether'))
husky_token = HuskyToken.deploy("Husky", "HUSKY", 0, {'from': provost}, publish_source=is_live)
husky_token_minter = HuskyTokenMinter.deploy(husky_token, admin, {'from': provost}, publish_source=is_live)
husky_token.addMinter(husky_token_minter, {'from': provost});
husky_token.renounceMinter({'from': provost});
|
python
|
from pprint import pprint
import gym
env = gym.make('ChessVsRandomBot-v0')
def available_moves():
state = env.state
moves_p1 = env.get_possible_moves(state, 1)
moves_p2 = env.get_possible_moves(state, -1)
pprint(moves_p1)
pprint(moves_p2)
# no actions left -> resign
if len(moves_p1) == 0:
print('resigning is the only move...')
resign_action = env.resign()
# chess coordinates Player 1
for m in moves_p1:
print(env.convert_coords(m))
# chess coordinates Player 2
for m in moves_p2:
print(env.convert_coords(m))
# Player 1 moves
for piece in set([m['piece_id'] for m in moves_p1]):
env.render_moves(state, piece, moves_p1, mode='human')
# Player 2 moves
for piece in set([m['piece_id'] for m in moves_p2]):
env.render_moves(state, piece, moves_p2, mode='human')
if __name__ == "__main__":
available_moves()
|
python
|
def is_valid(phrase):
words = phrase.split()
word_letters = list(map("".join, map(sorted, words)))
return len(set(word_letters)) == len(words)
passphrases = open("day4.txt").readlines()
valid_phrases = list(filter(is_valid, passphrases))
print('Valid Passphrases:', len(valid_phrases)) # = 231
|
python
|
import os
import datetime
import argparse
import numpy
import networks
import torch
modelnames = networks.__all__
# import datasets
datasetNames = ('Vimeo_90K_interp') #datasets.__all__
parser = argparse.ArgumentParser(description='DAIN')
parser.add_argument('--debug',action = 'store_true', help='Enable debug mode')
parser.add_argument('--netName', type=str, default='DAIN',
choices = modelnames,help = 'model architecture: ' +
' | '.join(modelnames) +
' (default: DAIN)')
parser.add_argument('--datasetName', default='Vimeo_90K_interp',
choices= datasetNames,nargs='+',
help='dataset type : ' +
' | '.join(datasetNames) +
' (default: Vimeo_90K_interp)')
parser.add_argument('--datasetPath',default='',help = 'the path of selected datasets')
parser.add_argument('--dataset_split', type = int, default=97, help = 'Split a dataset into trainining and validation by percentage (default: 97)')
parser.add_argument('--seed', type=int, default=1, help='random seed (default: 1)')
parser.add_argument('--numEpoch', '-e', type = int, default=100, help= 'Number of epochs to train(default:150)')
parser.add_argument('--batch_size', '-b',type = int ,default=1, help = 'batch size (default:1)' )
parser.add_argument('--workers', '-w', type =int,default=8, help = 'parallel workers for loading training samples (default : 1.6*10 = 16)')
parser.add_argument('--channels', '-c', type=int,default=3,choices = [1,3], help ='channels of images (default:3)')
parser.add_argument('--filter_size', '-f', type=int, default=4, help = 'the size of filters used (default: 4)',
choices=[2,4,6, 5,51]
)
parser.add_argument('--lr', type =float, default= 0.002, help= 'the basic learning rate for three subnetworks (default: 0.002)')
parser.add_argument('--rectify_lr', type=float, default=0.001, help = 'the learning rate for rectify/refine subnetworks (default: 0.001)')
parser.add_argument('--save_which', '-s', type=int, default=1, choices=[0,1], help='choose which result to save: 0 ==> interpolated, 1==> rectified')
parser.add_argument('--time_step', type=float, default=0.5, help='choose the time steps')
parser.add_argument('--flow_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--occ_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--filter_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--ctx_lr_coe', type = float, default=1.0, help = 'relative learning rate w.r.t basic learning rate (default: 1.0)')
parser.add_argument('--depth_lr_coe', type = float, default=0.01, help = 'relative learning rate w.r.t basic learning rate (default: 0.01)')
parser.add_argument('--alpha', type=float,nargs='+', default=[0.0, 1.0], help= 'the ration of loss for interpolated and rectified result (default: [0.0, 1.0])')
parser.add_argument('--epsilon', type = float, default=1e-6, help = 'the epsilon for charbonier loss,etc (default: 1e-6)')
parser.add_argument('--weight_decay', type = float, default=0, help = 'the weight decay for whole network ' )
parser.add_argument('--patience', type=int, default=5, help = 'the patience of reduce on plateou')
parser.add_argument('--factor', type = float, default=0.2, help = 'the factor of reduce on plateou')
parser.add_argument('--pretrained', dest='SAVED_MODEL', default=None, help ='path to the pretrained model weights')
parser.add_argument('--no-date', action='store_true', help='don\'t append date timestamp to folder' )
parser.add_argument('--use_cuda', default= True, type = bool, help='use cuda or not')
parser.add_argument('--use_cudnn',default=1,type=int, help = 'use cudnn or not')
parser.add_argument('--dtype', default=torch.cuda.FloatTensor, choices = [torch.cuda.FloatTensor,torch.FloatTensor],help = 'tensor data type ')
# parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint (default: none)')
parser.add_argument('--uid', type=str, default= None, help='unique id for the training')
parser.add_argument('--force', action='store_true', help='force to override the given uid')
parser.add_argument('--video', type = str, default= None, help='')
parser.add_argument('--outStr', type = str, default= None, help='')
parser.add_argument('--outFolder', type = str, default= None, help='')
parser.add_argument('--fps', type = float, default= None, help='')
parser.add_argument('--palette', type = int, default= 0, help='')
parser.add_argument('--resc', type = int, default= 0, help='')
parser.add_argument('--maxResc', type = int, default= 0, help='')
parser.add_argument('--loop', type = int, default= 0, help='')
parser.add_argument('--framerateConf', type = int, default= 0, help='')
parser.add_argument('--use60RealFps', type = float, default= 60, help='')
parser.add_argument('--use60', type = int, default= 0, help='')
parser.add_argument('--use60C1', type = int, default= 0, help='')
parser.add_argument('--use60C2', type = int, default= 0, help='')
parser.add_argument('--interpolationMethod', type = int, default= 0, help='')
parser.add_argument('--exportPng', type = int, default= 0, help='')
parser.add_argument('--useAnimationMethod', type = int, default= 1, help='')
parser.add_argument('--splitFrames', type = int, default= 0, help='')
parser.add_argument('--splitSize', type = int, default= 0, help='')
parser.add_argument('--splitPad', type = int, default= 0, help='')
parser.add_argument('--alphaMethod', type = int, default= 0, help='')
parser.add_argument('--inputMethod', type = int, default= 0, help='')
parser.add_argument('--cleanOriginal', type = int, default= 1, help='')
parser.add_argument('--cleanInterpol', type = int, default= 1, help='')
parser.add_argument('--doOriginal', type = int, default= 1, help='')
parser.add_argument('--doIntepolation', type = int, default= 1, help='')
parser.add_argument('--doVideo', type = int, default= 1, help='')
parser.add_argument('--checkSceneChanges', type = int, default= 1, help='')
parser.add_argument('--sceneChangeSensibility', type = int, default= 10, help='')
parser.add_argument('--uploadBar', type = None, default= None, help='')
parser.add_argument('--useWatermark', type = int, default= 0, help='')
args = parser.parse_args()
import shutil
save_path = ""
parser.add_argument('--save_path',default=save_path,help = 'the output dir of weights')
parser.add_argument('--log', default = save_path+'/log.txt', help = 'the log file in training')
parser.add_argument('--arg', default = save_path+'/args.txt', help = 'the args used')
args = parser.parse_args()
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.