content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
3->7->5->12->None
"""
class SinglyListNode(object):
def __init__(self, value):
self.value = value
self.next = None
a = SinglyListNode(3)
b = SinglyListNode(7)
c = SinglyListNode(5)
d = SinglyListNode(12)
a.next = b
b.next = c
c.next = d
print(a.next)
print(b)
print(b.next)
print(c)
def iterate(head):
# goes through the linkedlist and whenever it sees next pointer as None, it stops
current = head
while current != None:
print(current.value)
current = current.next
iterate(a)
|
python
|
from __future__ import absolute_import, unicode_literals
import logging
from django.core.management.base import BaseCommand
from housing_counselor.geocoder import BulkZipCodeGeocoder, GeocodedZipCodeCsv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Geocode all possible zipcodes'
def add_arguments(self, parser):
parser.add_argument('output_filename', help='output CSV filename')
parser.add_argument('-c', '--continue-file', action='store_true',
help='continue partially complete output file')
def handle(self, *args, **options):
output_filename = options['output_filename']
logger.info('geocoding zipcodes to %s', output_filename)
if options['continue_file']:
mode = 'a'
zipcodes = GeocodedZipCodeCsv.read(output_filename)
start = int(max(zipcodes.keys())) + 1
else:
mode = 'w'
start = 0
logger.info('starting geocoding at %s', start)
zipcodes = BulkZipCodeGeocoder().geocode_zipcodes(start=start)
with open(output_filename, mode) as f:
GeocodedZipCodeCsv.write(f, zipcodes)
|
python
|
"""
Ejercicio 6
Escriba un programa que pida la fecha segun el formato 04/12/1973 y lo retome segun
el formato 1973/12/04
"""
from datetime import date
from datetime import datetime
fecha = str(input("Ingrese una fecha(formato dd/mm/aaaa): "))
fecha1 = datetime.strptime(fecha, "%d/%m/%Y")
fecha3 = datetime.strftime(fecha1, "%Y/%m/%d")
#print(fecha1)
print(fecha3)
|
python
|
from .registries import Registry, meta_registry, QuerySet, Manager, MultipleObjectsReturned, DoesNotExist
__version__ = "0.2.1"
|
python
|
# Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from utils import device
from utils.util import MaceLogger
from utils.util import mace_check
def get_apu_version(enable_apu, android_ver, target_soc):
if enable_apu:
android_ver = (int)(android_ver)
if android_ver <= 10: # android Q
target_soc = target_soc.lower()
if target_soc.startswith("mt67"):
return 1
else:
return 2
elif android_ver == 11: # android R
target_soc = target_soc.lower()
if target_soc.startswith("mt689") or target_soc == "mt6877":
return 4
else:
return 3
else: # android S
return 4
return -1
def get_apu_so_paths_by_props(android_ver, target_soc):
so_path_array = []
apu_version = get_apu_version(True, android_ver, target_soc)
so_path = "third_party/apu/"
if apu_version == 1 or apu_version == 2:
if apu_version == 1:
so_path += "android_Q/mt67xx/"
else:
so_path += "android_Q/mt68xx/"
frontend_so_path = so_path + "%s/libapu-frontend.so" % target_soc
if not os.path.exists(frontend_so_path):
frontend_so_path = so_path + "libapu-frontend.so"
so_path_array.append(frontend_so_path)
so_path_array.append(so_path + "%s/libapu-platform.so" % target_soc)
elif apu_version == 3:
so_path += "android_R/"
# For android R except mt689x&mt6877
so_path_array.append(so_path + "libapu-apuwareapusys.mtk.so")
so_path_array.append(so_path + "libapu-apuwareutils.mtk.so")
so_path_array.append(so_path + "libapu-apuwarexrp.mtk.so")
so_path_array.append(so_path + "libapu-frontend.so")
so_path_array.append(so_path + "libapu-platform.so")
else: # For android S and mt689x&mt6877 on android R
mace_check(apu_version == 4, "Invalid apu verison")
return so_path_array
def get_apu_so_paths(android_device):
target_props = android_device.info()
target_soc = target_props["ro.board.platform"]
android_ver = (int)(target_props["ro.build.version.release"])
return get_apu_so_paths_by_props(android_ver, target_soc)
def parse_args():
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument(
"--target_abi",
type=str,
default="arm64-v8a",
help="Target ABI: only support arm64-v8a"
)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
version_parser = subparsers.add_parser(
'get-version',
parents=[base_parser],
help='get apu version')
version_parser.set_defaults(func=get_version)
copy_so_parser = subparsers.add_parser(
'copy-so-files',
parents=[base_parser],
help='copy apu files to apu_path')
copy_so_parser.add_argument(
"--apu_path",
type=str,
default="",
help="path for storing apu so files on device"
)
copy_so_parser.set_defaults(func=copy_so_files)
return parser.parse_known_args()
def get_cur_device_id(flags):
run_devices = device.choose_devices(flags.target_abi, "all")
run_device = None
device_num = len(run_devices)
if device_num == 0: # for CI
MaceLogger.warning("No Android devices are plugged in, "
"you need to copy `apu` so files by yourself.")
elif device_num > 1: # for CI
MaceLogger.warning("More than one Android devices are plugged in, "
"you need to copy `apu` so files by yourself.")
else:
run_device = run_devices[0]
return run_device
def get_version(flags):
device_id = get_cur_device_id(flags)
if device_id is not None:
android_device = device.create_device(flags.target_abi, device_id)
target_props = android_device.info()
target_soc = target_props["ro.board.platform"]
android_ver = (int)(target_props["ro.build.version.release"])
apu_version = get_apu_version(True, android_ver, target_soc)
else:
apu_version = 4
MaceLogger.warning("Can not get unique device ID, MACE select the"
" latest apu version: %s" % apu_version)
sys.exit(apu_version)
def copy_so_files(flags):
apu_so_paths = []
device_id = get_cur_device_id(flags)
if device_id is not None:
android_device = device.create_device(flags.target_abi, device_id)
apu_so_paths = get_apu_so_paths(android_device)
for apu_so_path in apu_so_paths:
device.execute("cp -f %s %s" % (apu_so_path, flags.apu_path), True)
if __name__ == "__main__":
flags, args = parse_args()
flags.func(flags)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the tag_linux.txt tagging file."""
import unittest
from plaso.containers import events
from plaso.lib import definitions
from plaso.parsers import bash_history
from plaso.parsers import docker
from plaso.parsers import dpkg
from plaso.parsers import selinux
from plaso.parsers import syslog
from plaso.parsers import utmp
from plaso.parsers import zsh_extended_history
from plaso.parsers.syslog_plugins import cron
from tests.data import test_lib
class LinuxTaggingFileTest(test_lib.TaggingFileTestCase):
"""Tests the tag_linux.txt tagging file.
In the tests below the EventData classes are used to catch failing tagging
rules in case event data types are renamed.
"""
_TAG_FILE = 'tag_linux.txt'
def testRuleApplicationExecution(self):
"""Tests the application_execution tagging rule."""
# Test: data_type is 'bash:history:command'
attribute_values_per_name = {}
self._CheckTaggingRule(
bash_history.BashHistoryEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'docker:json:layer'
attribute_values_per_name = {}
self._CheckTaggingRule(
docker.DockerJSONLayerEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'selinux:line' AND (audit_type is 'EXECVE' OR
# audit_type is 'USER_CMD')
attribute_values_per_name = {
'audit_type': ['EXECVE', 'USER_CMD']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'shell:zsh:history'
attribute_values_per_name = {}
self._CheckTaggingRule(
zsh_extended_history.ZshHistoryEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'syslog:cron:task_run'
attribute_values_per_name = {}
self._CheckTaggingRule(
cron.CronTaskRunEventData, attribute_values_per_name,
['application_execution'])
# Test: reporter is 'sudo' AND body contains 'COMMAND='
attribute_values_per_name = {
'body': ['test if my COMMAND=bogus'],
'reporter': ['sudo']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['application_execution'])
# Test: reporter is 'CROND' AND body contains 'CMD'
attribute_values_per_name = {
'body': ['test if my CMD bogus'],
'reporter': ['CROND']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['application_execution'])
def testRuleLogin(self):
"""Tests the login tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 7
attribute_values_per_name = {
'type': [7]}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name,
['login'])
# Test: data_type is 'selinux:line' AND audit_type is 'LOGIN'
attribute_values_per_name = {
'audit_type': ['LOGIN']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'login' AND (body contains 'logged in' OR
# body contains 'ROOT LOGIN' OR body contains 'session opened')
attribute_values_per_name = {
'body': ['logged in', 'ROOT LOGIN', 'session opened'],
'reporter': ['login']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'sshd' AND (body contains 'session opened' OR
# body contains 'Starting session')
attribute_values_per_name = {
'body': ['session opened', 'Starting session'],
'reporter': ['sshd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'dovecot' AND body contains 'imap-login: Login:'
attribute_values_per_name = {
'body': ['imap-login: Login:'],
'reporter': ['dovecot']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'postfix/submission/smtpd' AND body contains 'sasl_'
attribute_values_per_name = {
'body': ['sasl_method=PLAIN, sasl_username='],
'reporter': ['postfix/submission/smtpd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
def testRuleLoginFailed(self):
"""Tests the login_failed tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_LOGIN_FAILURES'
attribute_values_per_name = {
'audit_type': ['ANOM_LOGIN_FAILURES']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login_failed'])
# Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGIN' AND
# body contains 'res=failed'
attribute_values_per_name = {
'audit_type': ['USER_LOGIN'],
'body': ['res=failed']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login_failed'])
# Test: data_type is 'syslog:line' AND body contains 'pam_tally2'
attribute_values_per_name = {
'body': ['pam_tally2']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: (reporter is 'sshd' OR
# reporter is 'login' OR
# reporter is 'postfix/submission/smtpd' OR
# reporter is 'sudo') AND
# body contains 'uthentication fail'
attribute_values_per_name = {
'body': ['authentication failed', 'authentication failure',
'Authentication failure'],
'reporter': ['login', 'postfix/submission/smtpd', 'sshd', 'sudo']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: (reporter is 'xscreensaver' or
# reporter is 'login') AND
# body contains 'FAILED LOGIN'
attribute_values_per_name = {
'body': ['FAILED LOGIN'],
'reporter': ['login', 'xscreensaver']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: reporter is 'su' AND body contains 'DENIED'
attribute_values_per_name = {
'body': ['DENIED su from'],
'reporter': ['su']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: reporter is 'nologin'
attribute_values_per_name = {
'reporter': ['nologin']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
def testRuleUserAdd(self):
"""Tests the useradd tagging rule."""
# Test: reporter is 'useradd' AND body contains 'new user'
attribute_values_per_name = {
'reporter': ['useradd'],
'body': ['new user']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['useradd'])
# Test: data_type is 'selinux:line' AND audit_type is 'ADD_USER'
attribute_values_per_name = {
'audit_type': ['ADD_USER']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['useradd'])
def testRuleGroupAdd(self):
"""Tests the groupadd tagging rule."""
# Test: reporter is 'useradd' AND body contains 'new group'
attribute_values_per_name = {
'reporter': ['useradd'],
'body': ['new group']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupadd'])
# Test: data_type is 'selinux:line' AND audit_type is 'ADD_GROUP'
attribute_values_per_name = {
'audit_type': ['ADD_GROUP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['groupadd'])
# Test: reporter is 'groupadd'
attribute_values_per_name = {
'reporter': ['groupadd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupadd'])
def testRuleUserDel(self):
"""Tests the userdel tagging rule."""
# Test: reporter is 'userdel' AND body contains 'delete user'
attribute_values_per_name = {
'reporter': ['userdel'],
'body': ['delete user']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['userdel'])
# Test: data_type is 'selinux:line' AND audit_type is 'DEL_USER'
attribute_values_per_name = {
'audit_type': ['DEL_USER']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['userdel'])
def testRuleGroupDel(self):
"""Tests the groupdel tagging rule."""
# Test: reporter is 'userdel' AND body contains 'removed group'
attribute_values_per_name = {
'reporter': ['userdel'],
'body': ['removed group']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupdel'])
# Test: data_type is 'selinux:line' AND audit_type is 'DEL_GROUP'
attribute_values_per_name = {
'audit_type': ['DEL_GROUP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['groupdel'])
# Test: reporter is 'groupdel'
attribute_values_per_name = {
'reporter': ['groupdel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupdel'])
def testRuleFirewallChange(self):
"""Tests the firewall_change tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'NETFILTER_CFG'
attribute_values_per_name = {
'audit_type': ['NETFILTER_CFG']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['firewall_change'])
def testRuleLogout(self):
"""Tests the logout tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 8 AND terminal != '' AND
# pid != 0
# Cannot use _CheckTaggingRule here because of terminal != ''
event = events.EventObject()
event.timestamp = self._TEST_TIMESTAMP
event.timestamp_desc = definitions.TIME_DESCRIPTION_UNKNOWN
event_data = utmp.UtmpEventData()
event_data.type = 0
event_data.terminal = 'tty1'
event_data.pid = 1
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.type = 8
event_data.terminal = ''
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.terminal = 'tty1'
event_data.pid = 0
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.pid = 1
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 1)
self._CheckLabels(storage_writer, ['logout'])
# Test: reporter is 'login' AND body contains 'session closed'
attribute_values_per_name = {
'body': ['session closed'],
'reporter': ['login']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'sshd' AND (body contains 'session closed' OR
# body contains 'Close session')
attribute_values_per_name = {
'body': ['Close session', 'session closed'],
'reporter': ['sshd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'systemd-logind' AND body contains 'logged out'
attribute_values_per_name = {
'body': ['logged out'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'dovecot' AND body contains 'Logged out'
attribute_values_per_name = {
'body': ['Logged out'],
'reporter': ['dovecot']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGOUT'
attribute_values_per_name = {
'audit_type': ['USER_LOGOUT']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['logout'])
def testRuleSessionStart(self):
"""Tests the session_start tagging rule."""
# Test: reporter is 'systemd-logind' and body contains 'New session'
attribute_values_per_name = {
'body': ['New session'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['session_start'])
def testRuleSessionStop(self):
"""Tests the session_stop tagging rule."""
# Test: reporter is 'systemd-logind' and body contains 'Removed session'
attribute_values_per_name = {
'body': ['Removed session'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['session_stop'])
def testRuleBoot(self):
"""Tests the boot tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 2 AND
# terminal is 'system boot' AND username is 'reboot'
attribute_values_per_name = {
'terminal': ['system boot'],
'type': [2],
'username': ['reboot']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['boot'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_BOOT'
attribute_values_per_name = {
'audit_type': ['SYSTEM_BOOT']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['boot'])
def testRuleShutdown(self):
"""Tests the shutdonw tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 1 AND
# (terminal is '~~' OR terminal is 'system boot') AND
# username is 'shutdown'
attribute_values_per_name = {
'terminal': ['~~', 'system boot'],
'type': [1],
'username': ['shutdown']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['shutdown'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_SHUTDOWN'
attribute_values_per_name = {
'audit_type': ['SYSTEM_SHUTDOWN']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['shutdown'])
def testRuleRunlevel(self):
"""Tests the runlevel tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 1 AND
# username is 'runlevel'
attribute_values_per_name = {
'type': [1],
'username': ['runlevel']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['runlevel'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_RUNLEVEL'
attribute_values_per_name = {
'audit_type': ['SYSTEM_RUNLEVEL']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['runlevel'])
def testRuleDeviceConnection(self):
"""Tests the device_connection tagging rule."""
# Test: reporter is 'kernel' AND body contains 'New USB device found'
attribute_values_per_name = {
'body': ['New USB device found'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['device_connection'])
def testRuleDeviceDisconnection(self):
"""Tests the device_disconnection tagging rule."""
# Test: reporter is 'kernel' AND body contains 'USB disconnect'
attribute_values_per_name = {
'body': ['USB disconnect'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['device_disconnection'])
def testRuleApplicationInstall(self):
"""Tests the application_install tagging rule."""
# Test: data_type is 'dpkg:line' AND body contains 'status installed'
attribute_values_per_name = {
'body': ['status installed']}
self._CheckTaggingRule(
dpkg.DpkgEventData, attribute_values_per_name,
['application_install'])
def testRuleServiceStart(self):
"""Tests the service_start tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_START'
attribute_values_per_name = {
'audit_type': ['SERVICE_START']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['service_start'])
def testRuleServiceStop(self):
"""Tests the service_stop tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_STOP'
attribute_values_per_name = {
'audit_type': ['SERVICE_STOP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['service_stop'])
def testRulePromiscuous(self):
"""Tests the promiscuous tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_PROMISCUOUS'
attribute_values_per_name = {
'audit_type': ['ANOM_PROMISCUOUS']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['promiscuous'])
# Test: reporter is 'kernel' AND body contains 'promiscuous mode'
attribute_values_per_name = {
'body': ['promiscuous mode'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['promiscuous'])
def testRuleCrach(self):
"""Tests the crash tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_ABEND'
attribute_values_per_name = {
'audit_type': ['ANOM_ABEND']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name, ['crash'])
# Test: reporter is 'kernel' AND body contains 'segfault'
attribute_values_per_name = {
'body': ['segfault'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['crash'])
if __name__ == '__main__':
unittest.main()
|
python
|
__author__ = 'surya'
# plot each IntegralInteraction S values and save a plot in the end for the respective plate
def plot(file,nslen,slen):
import matplotlib.pyplot as plt
start=5
list=[]
with open(file+"_IntegralIntensity.txt") as files:
next(files)
for lines in files:
splits=lines.split("\t")
for i in range(start+nslen,start+nslen+slen):
list.append(float(splits[i].strip()))
plt.hist(list,50)
plt.xlabel('intensity')
plt.ylabel('frequency')
plt.title('Histogram distribution of S integral intesity')
plt.subplots_adjust(left=0.2)
plt.savefig(file+'.png')
plt.clf()
return file+'.png'
# gives a final plot for the number of positives found before and after the filtration of annotated entries that could
# be SD+; control+ or control- for example
def plotFinalPlt(path):
import matplotlib.pyplot as pltt
x=[]
y=[]
ys=[]
with open(path+".txt") as file:
next (file)
for line in file:
splits=line.split("\t")
x.append(splits[1].strip())
y.append(splits[4].strip())
ys.append(splits[2].strip())
pltt.plot(y,"ro-",ys,"bs-")
pltt.title('Significant interaction found for each plate')
pltt.xlabel('interaction')
pltt.ylabel('interactions')
pltt.xlim(-1,len(x))
mi=int(min(y))-2
ma=int(max(ys))+10
# pltt.ylim(mi,ma)
for i in range(0,len(x)):
pltt.annotate(x[i]+", " +y[i], xy=(i,y[i]),
arrowprops=dict(facecolor='green'),
)
pltt.savefig(path+'.png')
pltt.clf()
return file
##################################################################
## create a plot from the list of the values
def create_plot(x_list,nx_list,var):
import random
## select random numbers of the same length of NS
s_list=random.sample(x_list,len(nx_list))
num_bins=50
import matplotlib.pyplot as plt
plt.figure("Histogram distribution for "+var)
plt.subplot(211)
plt.title("Stimulating Integral Intensity")
plt.ylabel('frequency')
plt.hist(s_list, num_bins,facecolor='green')
plt.subplot(212)
plt.title("Non-Stimulating Integral Intensity")
plt.hist(nx_list, num_bins,facecolor='red')
plt.xlabel('intensity')
plt.ylabel('frequency')
# # Tweak spacing to prevent clipping of ylabel
# plt.subplots_adjust(left=0.15)
plt.show()
################################################################
### create a box plot
|
python
|
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.postgres import fields as pgfields
from django.contrib.auth.models import User
# about user
# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#using-a-custom-user-model-when-starting-a-project
# https://simpleisbetterthancomplex.com/tutorial/2017/02/18/how-to-create-user-sign-up-view.html
class AppSettings:
"""Settings for whole applicaion"""
LANGS = {
'ru': 'Русский',
'en': 'English'
}
SET_TYPES = {
'by_stop': _('By finish'),
'by_start': _('By start')
}
@staticmethod
def get():
return dict(
min_weight=Set.MIN_WEIGHT,
max_weight=Set.MAX_WEIGHT,
min_reps=Set.MIN_REPS,
max_reps=Set.MAX_REPS,
langs=AppSettings.LANGS,
set_types=AppSettings.SET_TYPES
)
class UserSettings:
"""User settings are stored in profile"""
# https://docs.djangoproject.com/en/2.0/ref/contrib/postgres/fields/#django.contrib.postgres.fields.JSONField
# default must be callable
@staticmethod
def default():
return dict(
lang='ru',
set_type='by_stop',
set_weight=20,
set_reps=10
)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
settings = pgfields.JSONField(default=UserSettings.default)
# todo: investigate
# http://www.django-rest-framework.org/api-guide/serializers/#handling-saving-related-instances-in-model-manager-classes
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class TrainingName(models.Model):
"""Название тренировки, общий, дополняемый список, для всех"""
text = models.CharField(_('name'), max_length=250, unique=True)
class Training(models.Model):
"""Тренировка"""
STARTED = 'st'
FINISHED = 'fn'
STATUSES = (
(STARTED, _('Started')),
(FINISHED, _('Finished'))
)
date = models.DateTimeField(_('date'), default=timezone.now)
status = models.CharField(
max_length=2,
choices=STATUSES,
default=STARTED
)
name = models.ForeignKey(
TrainingName,
on_delete=models.PROTECT,
verbose_name=_('name')
)
user = models.ForeignKey(
User,
on_delete=models.PROTECT,
related_name='trainings',
verbose_name=_('user')
)
def __str__(self):
return self.name
class Set(models.Model):
"""Подходы (вес, повторения, время)"""
MIN_WEIGHT = 1
MAX_WEIGHT = 600
MIN_REPS = 1
MAX_REPS = 999
weight = models.PositiveIntegerField(_('weight'))
reps = models.PositiveIntegerField(_('repetitions'))
started_at = models.DateTimeField(_('started at'), null=True)
"""Start time of set, value - if set is started manually, null if set is filled by end fact"""
# todo: validate no less than started (? and training date)
stopped_at = models.DateTimeField(_('stopped at'), default=timezone.now)
"""Stop time of set"""
training = models.ForeignKey(
Training,
on_delete=models.CASCADE,
related_name='sets',
verbose_name=_('training')
)
def __str__(self):
return '{} x{}'.format(self.weight, self.reps)
|
python
|
import requests
from bs4 import BeautifulSoup
#define a founction that get text from a html page
def gettext(url, kv=None):
try:
r = requests.get(url,headers = kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print("Failure")
#define a founction that scrapy a photo
def scrapy_photo(url,file_name):
try:
r = requests.get(url)
r.encoding = r.apparent_encoding
print(r.status_code)
r.raise_for_status()
with open(file_name,'wb') as f:
f.write(r.content)
except:
print("error")
#get all of links in a html page
def get_img_url(w_url):
html = gettext(w_url,kv = {'user-agent':'Mozilla/5.0'})
soup = BeautifulSoup(html, 'lxml')
a = soup.find_all('img')
link = []
#get all links
for i in a:
link.append(i.attrs['src'])
return link
def main():
n = 1
url = input("please input a url of web:")
url_link = get_img_url(url)
for i in url_link:
file_name = "pic{}.jfif".format(i)
scrapy_photo(i,file_name)
n = n + 1
if __name__ == "__main__":
main()
|
python
|
"""\
wxDatePickerCtrl objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
from edit_windows import ManagedBase, EditStylesMixin
from tree import Node
import common, compat, config
import decorators
if compat.IS_PHOENIX:
#import wx.adv
from wx.adv import DatePickerCtrl
else:
#import wx.calendar
from wx import DatePickerCtrl
class EditDatePickerCtrl(ManagedBase, EditStylesMixin):
"Class to handle wxDatePickerCtrl objects"
# XXX unify with EditCalendarCtrl?
_PROPERTIES = ["Widget", "style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
def __init__(self, name, parent, id, sizer, pos):
# Initialise parent classes
ManagedBase.__init__(self, name, 'wxDatePickerCtrl', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
def create_widget(self):
# TODO add all the other parameters for the DatePickerCtrl initial date
self.widget = DatePickerCtrl(self.parent.widget, self.id, style=self.style)
# handle compatibility:
@decorators.memoize
def wxname2attr(self, name):
cn = self.codegen.get_class(self.codegen.cn(name))
module = wx if compat.IS_CLASSIC else wx.adv
return getattr(module, cn)
def properties_changed(self, modified=None):
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditDatePickerCtrl objects"
label = 'datepicker_ctrl_%d' % number[0]
while common.app_tree.has_name(label):
number[0] += 1
label = 'datepicker_ctrl_%d' % number[0]
with parent.frozen():
datepicker_ctrl = EditDatePickerCtrl(label, parent, wx.NewId(), sizer, pos)
datepicker_ctrl.properties["style"].set_to_default()
datepicker_ctrl.check_defaults()
node = Node(datepicker_ctrl)
datepicker_ctrl.node = node
if parent.widget: datepicker_ctrl.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory to build EditDatePickerCtrl objects from a XML file"
from xml_parse import XmlParsingError
try:
label = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
datepicker_ctrl = EditDatePickerCtrl(label, parent, wx.NewId(), sizer, pos)
#sizer.set_item(datepicker_ctrl.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(datepicker_ctrl)
datepicker_ctrl.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return datepicker_ctrl
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditDatePickerCtrl'] = builder
common.widgets_from_xml['EditDatePickerCtrl'] = xml_builder
return common.make_object_button('EditDatePickerCtrl', 'datepicker_ctrl.xpm')
|
python
|
from util.html import HTML
import numpy as np
import os
import ntpath
import time
from . import util
import matplotlib.pyplot as plt
from util.util import load_validation_from_file, smooth_kernel, load_loss_from_file
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.name = opt.name
if opt.isTrain:
# create a logging file to store training losses
self.loss_log = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
self.validation_log = os.path.join(opt.checkpoints_dir, opt.name, 'validation.txt')
self.training_log = os.path.join(opt.checkpoints_dir, opt.name, 'training.txt')
if opt.continue_train:
if os.path.isfile(self.loss_log):
self.plot_data = load_loss_from_file(self.loss_log)
if len(self.plot_data['legend']) == 0:
del self.plot_data
print('Loaded loss from', self.loss_log)
if os.path.isfile(self.validation_log):
self.validation_score = load_validation_from_file(self.validation_log)
print('Loaded validation scores from', self.validation_log)
if os.path.isfile(self.training_log):
self.traing_score = load_validation_from_file(self.training_log)
print('Loaded training scores from', self.training_log)
elif os.path.isfile(self.loss_log):
# Erase old content
open(self.loss_log, 'w').close()
open(self.validation_log, 'w').close()
open(self.training_log, 'w').close()
with open(self.loss_log, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def plot_current_losses(self):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'figure'):
self.figure = plt.figure()
else:
plt.figure(self.figure.number)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title(self.name + ' loss over time')
# plt.yscale('symlog')
# plt.ylim((-50,80))
x = self.plot_data['X']
y = np.array(self.plot_data['Y']).transpose()
for i, loss in enumerate(y):
if i>=3:
break
plt.plot(x, loss, label=self.plot_data['legend'][i])
plt.legend()
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'loss.png')
plt.tight_layout()
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def plot_current_validation_score(self, score, total_iters):
with open(self.validation_log, 'a') as f:
f.write(', '.join(map(str, score))+'\n')
if not hasattr(self, 'validation_score'):
self.validation_score = []
self.validation_score.append(score)
if not hasattr(self, 'figure2'):
self.figure2 = plt.figure()
else:
plt.figure(self.figure2.number)
plt.xlabel('Iteration')
plt.ylabel('Mean Relative Error')
plt.title(self.name + ' validation error over time')
plt.ylim([0,max(1,np.amax(self.validation_score))])
step_size = int(total_iters/len(self.validation_score))
x = list(range(step_size, total_iters+1, step_size))
plt.plot(x, [0.15]*len(x), 'r--')
for i in range(len(score)):
plt.plot(x, np.array(self.validation_score)[:,i])
plt.legend(('15% error mark', *self.opt.physics_model.get_label_names()))
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'validation_score.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def plot_current_training_score(self, score, total_iters):
with open(self.training_log, 'a') as f:
f.write(', '.join(map(str, score))+'\n')
if not hasattr(self, 'training_score'):
self.training_score = []
self.training_score.append(score)
if not hasattr(self, 'figure2'):
self.figure2 = plt.figure()
else:
plt.figure(self.figure2.number)
plt.xlabel('Iteration')
plt.ylabel('Mean Relative Error')
plt.title(self.name + ' training error over time')
plt.ylim([0,max(1,np.amax(self.training_score))])
step_size = int(total_iters/len(self.training_score))
x = list(range(step_size, total_iters+1, step_size))
plt.plot(x, [0.15]*len(x), 'r--')
for i in range(len(score)):
plt.plot(x, np.array(self.training_score)[:,i])
plt.legend(('15% error mark', *self.opt.physics_model.get_label_names()))
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'training_score.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data, iter):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(iter)
self.plot_data['Y'].append([losses[k].detach().cpu().numpy() for k in self.plot_data['legend']])
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.loss_log, "a") as log_file:
log_file.write('%s\n' % message) # save the message
def save_smooth_loss(self):
"""Stores the current loss as a png image.
"""
num_points = len(self.plot_data['Y'])
if not hasattr(self, 'figure'):
self.figure = plt.figure()
else:
plt.figure(self.figure.number)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title(self.name + ' loss over time')
# plt.yscale('symlog')
# plt.ylim((-50,80))
x = self.plot_data['X']
y_all = np.array(self.plot_data['Y']).transpose()
y = []
for y_i in y_all:
y.append(smooth_kernel(y_i))
x = np.linspace(x[0],x[-1],len(y[0]))
for i, loss in enumerate(y):
plt.plot(x, loss, label=self.plot_data['legend'][i])
plt.legend()
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'loss_smooth.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def save_images(webpage: HTML, visuals: dict, image_path: list, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im in visuals.items():
if im is None:
continue
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
|
python
|
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: THIS MODULE IS CURRENTLY UNUSED.
The current permissions scheme for resource finder is:
- Anyone (logged-in and non-logged-in users) can view and print
- Any logged-in user can edit data
THE CODE BELOW IS UNNECESSARY WITH THIS PERMISSION SCHEME
Handler for allowing an Account with 'grant' permission to grant access using
the permission scheme provided in access.py
"""
import logging
import model
import utils
from utils import DateTime, ErrorMessage, Redirect
from utils import db, html_escape, users, _
from access import check_action_permitted
class GrantAccess(utils.Handler):
def get(self):
"""Shows all access requests that are waiting for approval."""
self.require_action_permitted('grant')
q = model.Account.all().filter('requested_actions !=', None)
requests = []
for account in q.fetch(100):
for action in account.requested_actions:
if check_action_permitted(self.account, 'grant'):
requests.append({'email': account.email,
'requested_action': action,
'key': account.key()})
self.render('templates/grant_access.html',
requests=requests,
params=self.params,
grant_url=self.get_url('/grant_access'),
logout_url=users.create_logout_url('/'),
subdomain=self.subdomain)
def post(self):
"""Grants or denies a single request."""
action = self.request.get('action')
if not action:
raise ErrorMessage(404, 'missing action (requested_action) params')
self.require_action_permitted('grant')
account = model.Account.get(self.request.get('key'))
if not account:
raise ErrorMessage(404, 'bad key given')
#TODO(eyalf): define account.display_name() or something
name = account.email
if not action in account.requested_actions:
#i18n: Error message
raise ErrorMessage(404, _('No pending request for '
'%(account_action)s by %(user)s')
% (action, name))
account.requested_actions.remove(action)
grant = self.request.get('grant', 'deny')
if grant == 'approve':
account.actions.append(action)
account.put()
logging.info('%s request for %s was %s' % (account.email,
action,
grant))
if self.params.embed:
if grant == 'approve':
self.write(
#i18n: Application for the given permission action approved
_('Request for becoming %(action)s was approved.') % action)
else:
self.write(
#i18n: Application for the given permission action denied
_('Request for becoming %(action)s was denied.') % action)
else:
raise Redirect(self.get_url('/grant_access'))
if __name__ == '__main__':
utils.run([('/grant_access', GrantAccess)], debug=True)
|
python
|
import pytest
import NAME
|
python
|
import os
import numpy as np
import joblib
class proba_model_manager():
def __init__(self, static_data, params={}):
if len(params)>0:
self.params = params
self.test = params['test']
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
self.istrained = False
self.method = 'mlp'
self.model_dir = os.path.join(static_data['path_model'], 'Probabilistic')
self.data_dir = self.static_data['path_data']
if hasattr(self, 'test'):
try:
self.load(self.test_dir)
except:
pass
else:
try:
self.load(self.model_dir)
except:
pass
self.static_data = static_data
self.cluster_name = static_data['_id']
self.rated = static_data['rated']
self.probabilistic = True
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
def load(self, path):
if os.path.exists(os.path.join(path, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(path, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
|
python
|
"""Test the cli_data_download tool outputs."""
# TODO review and edit this
import argparse
from pathlib import Path
from cmatools.cli_data_download import cli_data_download
from cmatools.definitions import SRC_DIR
DEBUG = True
"""bool: Debugging module-level constant (Default: True)."""
# Define cli filepath
CLI = Path(SRC_DIR, "cmatools", "cli_simple_analysis.py")
"""str: Filepath to command line tool module."""
# TODO mark as slow
# Keep this simple test, but mock so no actual download occurs
def test_cli_data_download():
"""Test for cli_data_download() function."""
parsed_args = argparse.Namespace(portal="CEDA", dataset="HADCRUT")
output = cli_data_download(parsed_args)
# Expect True, indicates download success
assert output is True
|
python
|
import sys, json
from PIL import Image
from parser.png_diff import PNG_DIFF
from format.util import *
def diff(file_before, file_after):
"""diff png file
args:
file_before (str)
file_after (str)
returns:
png_diff (PNG_DIFF)
"""
png_before = Image.open(file_before)
png_after = Image.open(file_after)
png_diff = PNG_DIFF()
png_diff.diff(png_before, png_after)
return png_diff
def make_diff(file_before, file_after, file_output_name):
"""diff png file and save as file
args:
file_before (str)
file_after (str)
file_output_name (str)
returns:
saved_files (list)
"""
png_diff = diff(file_before, file_after)
saved_diff_images = create_diff_image("RGBA", tuple(png_diff.size[0]), png_diff.pixel_diff, file_output_name)
saved_diff_json = create_diff_json(png_diff, file_output_name)
saved_files = saved_diff_images
saved_files.append(saved_diff_json)
return saved_files
|
python
|
# -*- coding: utf-8 -*-
# Check if the move of v can satisfied, makebetter, or notsatisfied
from .FMConstrMgr import FMConstrMgr
class FMBiConstrMgr(FMConstrMgr):
def select_togo(self):
"""[summary]
Returns:
dtype: description
"""
return 0 if self.diff[0] < self.diff[1] else 1
|
python
|
from django.shortcuts import get_object_or_404, render
from .models import Card, Group, Product
def searching(request, keyword):
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
def index(request):
offers = Product.objects.filter(old_price__gte=1)[:3]
products = Product.objects.all()[:3]
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'offers': offers,
'products': products,
'keyword': keyword
}
return render(request, 'main/index.html', context)
def catalog(request):
offers = Product.objects.filter(old_price__gte=1)
products = Product.objects.all().order_by('price')
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'offers': offers,
'products': products,
'keyword': keyword
}
return render(request, 'main/catalog.html', context)
def group_list(request, slug):
group = get_object_or_404(Group, slug=slug)
products = group.products.all()
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'group': group,
'products': products,
'keyword': keyword
}
return render(request, 'main/group.html', context)
def cart(request):
products = Product.objects.all()[:10]
cards = Card.objects.all()
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'products': products,
'cards': cards,
'keyword': keyword
}
return render(request, 'main/cart.html', context)
|
python
|
#
# project-k Forth kernel in python
# Use the same kernel code for all applications.
# FigTaiwan H.C. Chen [email protected] 21:14 2017-07-31
#
import re, sys
name = "peforth"
vm = __import__(__name__)
major_version = 1; # major version, peforth.py kernel version, integer.
ip = 0;
stack = [] ;
rstack = [];
vocs = [];
words = {};
current = "forth";
context = "forth";
order = [context];
wordhash = {};
dictionary = [];
dictionary.append(0);
here = 1; # dictionary[0] is 0
tib = "";
ntib = 0;
RET = None; # The 'ret' instruction code. It marks the end of a colon word.
EXIT = ""; # The 'exit' instruction code.
compiling = False;
stop = False; # Stop the outer loop
newname = ""; # new word's name
newxt = None
newhelp = "";
# Reset the forth VM
def reset():
# defined in project-k kernel peforth.py
global rstack, compiling, ip, stop, ntip
# rstack = []; this creates extra error when return from the inner loop
compiling = False;
ip = 0; # forth VM instruction pointer
stop = True;
ntib = len(tib); # don't clear tib, a clue for debug.
# All peforth words are instances of this Word() constructor.
class Word:
def __init__(self, name, xt):
self.name = name
self.xt = xt
self.immediate = False
self.help = ""
self.comment = ""
def __str__(self): # return help message
return self.name + " " + self.help + ' __str__'
def __repr__(self): # execute xt and return help message
return "<Word '{}'>".format(self.name)
# returns the last defined word.
def last():
return words[current][-1]
# Get the word-list where new defined words are going to
def current_word_list():
return words[current]
# Get the word-list that is searched first.
def context_word_list():
return words[context]
# Get string from recent ntib down to, but not including, the next delimiter.
# Return result={str:"string", flag:boolean}
# If delimiter is not found then return the entire remaining TIB, multiple-lines,
# through result.str, purpose is to maximum the severity.
# result.flag indicates delimiter found or not found.
# o If you want to read the entire line in TIB, use nexttoken('\n|\r').
# nexttoken() skip the next character which is usually white space in Forth source code,
# e.g. s", this is reasonable because it's Forth. While the leading white space(s)
# will be included if useing the lower level nextstring('\\s') instead of nexttoken().
# o If you need to know whether the delimiter is found, use nextstring()。
# o result.str is "" if TIB has nothing left.
# o The ending delimiter is remained.
# o The delimiter is a regular expression.
def nextstring(deli):
# search for delimiter in tib from ntib
# index = tib[ntib:].find(deli) does not support regular expression, no good
global ntib
result = {}
try:
index = re.search(deli, tib[ntib:]).start() # start() triggers exception when not found
# see https://stackoverflow.com/questions/2674391/python-locating-the-position-of-a-regex-match-in-a-string
result['str'] = tib[ntib:ntib+index]; # found, index is the length
result['flag'] = True;
ntib += index; # Now ntib points at the delimiter.
except Exception:
result['str'] = tib[ntib:] # get the tib from ntib to EOL
result['flag'] = False;
ntib = len(tib) # skip to EOL
return result;
# Get next token which is found after the recent ntib of TIB.
# If delimiter is RegEx white-space ('\\s') or absent then skip all leading white spaces first.
# Usual case, skip the next character which should be a white space for Forth.
# But if delimiter is CRLF, which is to read the entire line, for blank lines the ending CRLF won't be skipped.
# o Return "" if TIB has nothing left.
# o Return the remaining TIB if delimiter is not found.
# o The ending delimiter is remained.
# o The delimiter is a regular expression.
def nexttoken(deli='\\s'):
global tib, ntib
if ntib >= len(tib): return ""
if deli == '\\s':
# skip all leading white spaces
while tib[ntib] in [" ","\t","\n","\r"]:
if (ntib+1) < len(tib):
ntib += 1
else:
break
elif deli in ['\\n','\n','\\r','\r','\\n|\\r','\n|\r','\\r|\\n', '\r|\n']:
# skip the next character that must be whitespace
if tib[ntib] not in ['\n','\r']:
# But don't skip the EOL itself!
ntib += 1
else:
# skip next character that must be whitespace
ntib += 1
token = nextstring(deli)['str'];
return token;
# tick() is same thing as forth word '。
# Letting words[voc][0]=0 also means tick() return 0 indicates "not found".
# Return the word obj of the given name or 0 if the word is not found.
# May be redefined for selftest to detect private words referenced by name.
# vm.tick keeps the original version.
def tick(name):
# defined in project-k peforth.py
if name in wordhash.keys():
return wordhash[name]
else:
return 0 # name not found
# Return a boolean.
# Is the new word reDef depends on only the words[current] word-list, not all
# word-lists, nor the word-hash table. Can't use tick() because tick() searches
# the word-hash that includes not only the words[current] word-list.
def isReDef(name):
result = False;
wordlist = current_word_list();
for i in range(1,len(wordlist)): # skip [0] which is 0
if wordlist[i].name == name :
result = True;
break;
return result;
# comma(x) compiles anything into dictionary[here]. x can be number, string,
# function, object, array .. etc。
# To compile a word, comma(tick('word-name'))
def comma(x):
global dictionary, here
try:
dictionary[here], here = x , here + 1
except:
dictionary.append(x)
here += 1
# dummy RET
try:
dictionary[here] = RET
except:
dictionary.append(RET)
# [here] will be overwritten, we do this dummy because
# RET is the ending mark for 'see' to know where to stop.
'''
Discussions:
'address' or 'ip' are index of dictionary[] array. dictionary[] is the memory of the
Forth virtual machine.
execute() executes a function, a word "name", and a word Object.
inner(entry) jumps into the entry address. The TOS of return stack can be 0, in that
case the control will return back to python host, or the return address.
inner() used in outer(), and colon word's xt() while execute() is used everywhere.
We have 3 ways to call forth words from Python: 1. execute('word'),
2. dictate('word word word'), and 3. inner(cfa).
dictate() cycles are stand alone tasks. We can suspend an in-completed dictate() and we
can also run another dictate() within a dictate().
The ultimate inner loop is like this: while(w){ip++; w.xt(); w=dictionary[ip]};
Boolean(w) == False is the break condition. So I choose None to be the RET instruction
and the empty string "" to be the EXIT instruction. Choices are None, "", [], {}, False,
and 0. While 0 neas 'suspend' the inner loop.
To suspend the Forth virtual machine means to stop inner loop but not pop the
return stack, resume is possible because return stack remained. We need an instruction
to do this and it's 0. dictionary[0] and words[<vid>][0] are always 0 thus ip=w=0
indicates that case. Calling inner loop from outer loop needs to push(0) first so
as to balance the return stack also letting the 0 instruction to stop popping the
return stack because there's no more return address, it's outer interpreter remember?
'''
# -------------------- ###### The inner loop ###### -------------------------------------
# Translate all possible entry or input to the suitable word type.
def phaseA (entry):
global ip
w = 0;
if type(entry)==str:
# "string" is word name
w = tick(entry.strip()); # remove leading and tailing white spaces
elif (type(entry)==Word or callable(entry)) : # function, Word
w = entry;
elif type(entry)==int:
# number could be dictionary entry or 0.
# could be does> branch entry or popped from return stack by RET or EXIT instruction.
ip = entry;
w = dictionary[ip];
else:
panic("Error! execute() doesn't know how to handle this thing : "+entry+" ("+type(entry)+")\n","err");
return w;
# Execute the given w by the correct method
def phaseB(w):
global ip, rstack
if type(w)==Word: # Word object
try:
w.xt(w)
except Exception as err:
panic("Word in phaseB {}: {}\nBody:\n{}".format(repr(w),err,w.xt.__doc__))
elif callable(w) : # a function
try:
w();
except Exception as err:
panic("Callable in phaseB {}: {}\nBody:\n{}".format(repr(w),err,w.__doc__))
elif str(type(w))=="<class 'code'>": # code object
exec(w)
elif type(w)==int:
# Usually a number is the entry of does>. Can't use inner() to call it
# The below push-jump mimics the call instruction of a CPU.
rstack.append(ip); # Forth ip is the "next" instruction to be executed. Push return address.
ip = w; # jump
else:
panic("Error! don't know how to execute : "+w+" ("+type(w)+")\n","error");
# execute("unknown") == do nothing, this is beneficial when executing a future word
# May be redefined for selftest to detect private words called by name.
# vm.execute keeps the original version.
def execute(entry):
# defined in proejct-k peforth.py
w = phaseA(entry)
if w:
if type(w) in [int, float]:
panic("Error! please use inner("+w+") instead of execute("+w+").\n","severe");
else:
phaseB(w);
return(vm) # support function cascade
else:
panic(entry + " unknown!")
# FORTH inner loop of project-k VM
def inner(entry, resuming=None):
# defined in project-k kernel peforth.py
global ip
w = phaseA(entry);
while not stop:
while w: # this is the very inner loop
ip += 1 # Forth general rule. IP points to the *next* word.
phaseB(w) # execute it
w = dictionary[ip] # get next word
if (w==0):
break; # w==0 is suspend, break inner loop but reserve rstack.
else:
ip = rstack.pop(); # w is either ret(None) or exit(""), return to caller, or 0 when resuming through outer(entry)
if(resuming):
w = dictionary[ip]; # Higher level of inner()'s have been terminated by suspend, do their job.
if not (ip and resuming):
break # Resuming inner loop. ip==0 means resuming has done。
### End of the inner loop ###
# FORTH outer loop of project-k VM
# If entry is given then resume from the entry point by executing
# the remaining colon thread down until ip reaches 0, that's resume.
# Then proceed with the tib/ntib string.
def outer(entry=None):
# Handle one token.
def outerExecute(token):
w = tick(token); # not found is 0. w is an Word object.
if (w) :
if(not compiling): # interpret state or immediate words
if getattr(w,'compileonly',False):
panic(
"Error! "+token+" is compile-only.",
len(tib)-ntib>100 # error or warning? depends
);
return;
execute(w);
else: # compile state
if (w.immediate) :
execute(w); # Not inner(w);
else:
if getattr(w,'interpretonly',False):
panic(
"Error! "+token+" is interpret-only.",
len(tib)-ntib>100 # error or warning? depends
);
return;
comma(w); # compile w into dictionary. w is a Word() object
else:
# token is unknown or number
# This line: f = float(token) makes problems try-except can not catch
def is_number(s):
# https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float
try:
complex(s) # for int, float and complex
except ValueError:
return False
return True
n = None #
if is_number(token):
# token is (int, float, complex) we ignore complex so far
f = complex(token).real
i = int(f)
if i==f:
n = i
else:
n = f
else:
# token is unknown or (hex, oct, binary)
def panic_unknown():
panic(
"Error! "+token+" unknown.\n",
len(tib)-ntib>100 # error or warning? depends
);
try:
# token is a number
if token[:2] in ["0x","0X"]:
n = int(token,base=16)
elif token[:2] in ["0o","0O"]:
n = int(token,base=8)
elif token[:2] in ["0b","0B"]:
n = int(token,base=2)
else:
if not push(token).execute("unknown").pop():
panic_unknown()
except Exception as err:
if not push(token).execute("unknown").pop():
panic_unknown()
if n != None :
push(n)
if (compiling):
execute("literal");
if (entry):
inner(entry, True); # resume from the breakpoint
while(not stop):
token = nexttoken();
if (token==""):
break; # TIB done, loop exit.
outerExecute(token);
### End of the outer loop ###
# Generates the .xt() function of all code words.
# Python does not support annonymous function so we use genxt() instead.
# _me argument refers to the word object itself, if you need to access
# any attribute of the word.
# xt.__doc__ keeps the source code.
# py: help(genxt) to read me.
def genxt(name, body):
ll = {}
# _me will be the code word object itself.
source = "def xt(_me=None): ### {} ###"
if tick('-indent') and tick('indent'):
# Beautify source code if -indent and indent are defined
push(body);execute('-indent');execute('indent')
body = pop()
if body.strip()=="":
source = (source+"\n pass\n").format(name)
else:
source = (source+'\n{}').format(name,body)
try:
exec(source,globals(),ll)
except Exception as err:
panic("Failed in genxt({},Body) : {}\nBody:\n{}".format(name, err, body))
ll['xt'].__doc__ = source
ll['xt'].name = name
return ll['xt']
# Python does not support annoymous function, this can be recovered by
# using closure. genfunc("body","args","name") returns a function which
# is composed by the given function name, source code and arguments.
# <name>.__doc__ keeps the source code.
# py: help(genfunc) to read me.
def genfunc(body,args,name):
local = {}
source = "def {}({}):".format(name,args)
# args can be "", or 'x, y=123,z=None'
if body.strip()=="":
source = source+"\n pass\n";
else:
source = (source+'\n{}').format(body)
exec(source,globals(),local)
local[name].__doc__ = source
return local[name]
# The basic FORTH word 'code's run time.
def docode(_me=None):
# All future code words can see local variables in here, for jeforth.3we.
# [x] check if this is true for python, <== Not True for Python.
global compiling, newname, newxt, newhelp, ntib
newname = nexttoken();
if isReDef(newname): # don't use tick(newname), it's wrong.
print("reDef " + newname);
# get code body
push(nextstring("end-code"));
if tos()['flag']:
compiling = "code"; # it's true and a clue of compiling a code word.
newxt = genxt(newname, pop()['str'])
else:
panic("Error! expecting 'end-code'.");
reset();
code = Word('code', docode)
code.vid = 'forth'
code.wid = 1
code.type = 'code'
code.help = '( <name> -- ) Start composing a code word.'
# The basic FORTH word 'end-code's run time.
def doendcode(_me=None):
global compiling
if compiling!="code":
panic("Error! 'end-code' a none code word.")
current_word_list().append(Word(newname,newxt))
last().vid = current;
last().wid = len(current_word_list())-1;
last().type = 'code';
# ---------
mm = re.match(r"^.*?#\s*(.*)$", last().xt.__doc__.split('\n')[1])
last().help = mm.groups()[0] if mm and mm.groups()[0] else ""
# ---------
wordhash[last().name] = last();
compiling = False;
endcode = Word('end-code', doendcode)
endcode.vid = 'forth'
endcode.wid = 2
endcode.type = 'code'
endcode.immediate = True
endcode.compileonly = True
endcode.help = '( -- ) Wrap up the new code word.'
# forth master word-list
# Letting current_word_list()[0] == 0 has many advantages. When tick('name')
# returns a 0, current_word_list()[0] is 0 too, indicates a not-found.
words[current] = [0,code,endcode]
# Find a word as soon as possible.
wordhash = {"code":current_word_list()[1], "end-code":current_word_list()[2]};
# Command interface to the project-k VM.
# The input can be multiple lines or an entire ~.f file.
# Yet it usually is the TIB (Terminal input buffer).
def dictate(input):
global tib, ntib, ip, stop
tibwas = tib
ntibwas = ntib
ipwas = ip
tib = input;
ntib = 0;
stop = False; # stop outer loop
outer();
tib = tibwas;
ntib = ntibwas;
ip = ipwas;
return(vm) # support function cascade
# -------------------- end of main() -----------------------------------------
# Top of Stack access easier. ( tos(2) tos(1) tos(void|0) -- ditto )
# tos(i,new) returns tos(i) and by the way change tos(i) to new value this is good
# for counting up or down in a loop.
def tos(index=None,value=None):
global stack
if index==None:
return stack[-1]
elif value==None:
return stack[len(stack)-1-index];
else:
data = stack[len(stack)-1-index];
stack[len(stack)-1-index] = value;
return(data);
# Top of return Stack access easier. ( rtos(2) rtos(1) rtos(void|0) -- ditto )
# rtos(i,new) returns rtos(i) and by the way change rtos(i) to new value this is good
# for counting up or down in a loop.
def rtos(index=None,value=None):
global rstack
if index==None:
return rstack[-1]
elif value==None:
return rstack[len(rstack)-1-index];
else:
data = rstack[len(rstack)-1-index];
rstack[len(rstack)-1-index] = value;
return(data);
# rstack access easier. e.g. rpop(1) gets rtos(1)
# ( rtos(2) rtos(1) rtos(0) -- rtos(2) rtos(0) )
# push(formula(rpop(i)),i-1) manipulates the rtos(i) directly, usually when i is the index
# of a loop.
def rpop(index=None):
if index==None:
return rstack.pop();
else:
return rstack.pop(len(rstack)-1-index);
# Stack access easier. e.g. pop(1) gets tos(1) ( tos(2) tos(1) tos(0) -- tos(2) tos(0) )
# push(formula(pop(i)),i-1) manipulate the tos(i) directly, when i is the index of a loop.
def pop(index=None):
if index==None:
return stack.pop();
else:
return stack.pop(len(stack)-1-index);
# Stack access easier. e.g. push(data,1) inserts data to tos(1),
# ( tos2 tos1 tos -- tos2 tos1 data tos )
# push(formula(pop(i)),i-1) manipulate the tos(i) directly, usually when i
# is the index of a loop.
def push(data=None, index=None):
global stack
if index==None:
stack.append(data);
else:
stack.insert(len(stack)-1-index,data);
return(vm) # support function cascade
# ---- end of projectk.py ----
|
python
|
from wallet import Wallet
wallet = Wallet()
address = wallet.getnewaddress()
print address
|
python
|
from config.settings_base import *
##### EDIT BELOW
API_KEY = "Paste your key in between these quotation marks"
|
python
|
from rest_framework import serializers
from .models import Homework
class HomeworkStudentSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
course = serializers.ReadOnlyField(source='course.id')
lecture = serializers.ReadOnlyField(source='lecture.id')
grade = serializers.ReadOnlyField(read_only=True)
comments = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Homework
fields = ['id', 'created', 'owner', 'course', 'lecture', 'hometask', 'url', 'grade', 'comments']
class HomeworkTeacherSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
course = serializers.ReadOnlyField(source='course.id')
lecture = serializers.ReadOnlyField(source='lecture.id')
hometask = serializers.PrimaryKeyRelatedField(read_only=True)
url = serializers.ReadOnlyField(read_only=True)
comments = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Homework
fields = ['id', 'created', 'owner', 'course', 'lecture', 'hometask', 'url', 'grade', 'comments']
|
python
|
#!/usr/bin/env python
'''
Copyright (C) 2019, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'Open-Resty Lua Nginx (FLOSS)'
def is_waf(self):
schema1 = [
self.matchHeader(('Server', r'^openresty/[0-9\.]+?')),
self.matchStatus(403)
]
schema2 = [
self.matchContent(r'openresty/[0-9\.]+?'),
self.matchStatus(406)
]
if all(i for i in schema1):
return True
if all(i for i in schema2):
return True
return False
|
python
|
from pypy.rlib import _rffi_stacklet as _c
from pypy.rlib import objectmodel, debug
from pypy.rpython.annlowlevel import llhelper
from pypy.tool.staticmethods import StaticMethods
class StackletGcRootFinder:
__metaclass__ = StaticMethods
def new(thrd, callback, arg):
h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg)
if not h:
raise MemoryError
return h
new._annspecialcase_ = 'specialize:arg(1)'
def switch(thrd, h):
h = _c.switch(thrd._thrd, h)
if not h:
raise MemoryError
return h
def destroy(thrd, h):
_c.destroy(thrd._thrd, h)
if objectmodel.we_are_translated():
debug.debug_print("not using a framework GC: "
"stacklet_destroy() may leak")
is_empty_handle = _c.is_empty_handle
def get_null_handle():
return _c.null_handle
gcrootfinder = StackletGcRootFinder # class object
|
python
|
# coding: utf-8
from __future__ import unicode_literals
import re
from django import forms
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.utils.encoding import iri_to_uri, smart_text
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
__all__ = ['CategoryChoiceField', 'build_absolute_uri']
absolute_http_url_re = re.compile(r'^https?://', re.I)
class CategoryChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
# pylint: disable=W0212
level = getattr(obj, obj._mptt_meta.level_attr)
indent = max(0, level - 1) * '│'
if obj.parent:
last = ((obj.parent.rght - obj.rght == 1) and
(obj.rght - obj.lft == 1))
if last:
indent += '└ '
else:
indent += '├ '
return '%s%s' % (indent, smart_text(obj))
def build_absolute_uri(location, is_secure=False):
from django.contrib.sites.models import Site
site = Site.objects.get_current()
host = site.domain
if not absolute_http_url_re.match(location):
current_uri = '%s://%s' % ('https' if is_secure else 'http', host)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def get_paginator_items(items, paginate_by, page):
paginator = Paginator(items, paginate_by)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
|
python
|
from typing import List
import torch
import numpy as np
# details about math operation in torch can be found in: http://pytorch.org/docs/torch.html#math-operations
# convert numpy to tensor or vise versa
np_data = np.arange(6).reshape((2, 3)) # reshape 重塑 把1X6矩阵变为2X3矩阵
# numpy.arange([start=0, ]stop, [step=1, ]dtype=None) np.arange(6) ->[0 1 2 3 4 5]
torch_data = torch.from_numpy(np_data)
tensor2array = torch_data.numpy()
print(
'\nnumpy array:', np_data, # [[0 1 2], [3 4 5]]
'\ntorch tensor:', torch_data, # 0 1 2 \n 3 4 5 [torch.LongTensor of size 2x3]
'\ntensor to array:', tensor2array, # [[0 1 2], [3 4 5]]
)
# abs
data: List[int] = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data) # 32-bit floating point
print(
'\nabs',
'\nnumpy: ', np.abs(data), # [1 2 1 2]
'\ntorch: ', torch.abs(tensor) # [1 2 1 2]
)
# sin
print(
'\nsin',
'\nnumpy: ', np.sin(data), # [-0.84147098 -0.90929743 0.84147098 0.90929743]
'\ntorch: ', torch.sin(tensor) # [-0.8415 -0.9093 0.8415 0.9093]
)
# mean
print(
'\nmean',
'\nnumpy: ', np.mean(data), # 0.0
'\ntorch: ', torch.mean(tensor) # 0.0
)
# matrix multiplication
data2 = [[1, 2], [3, 4]]
tensor = torch.FloatTensor(data2) # 32-bit floating point
# correct method
print(
'\nmatrix multiplication (matmul)',
'\nnumpy: ', np.matmul(data2, data2), # [[7, 10], [15, 22]]
'\ntorch: ', torch.mm(tensor, tensor) # [[7, 10], [15, 22]]
)
'''
点乘是对应位置元素相乘,要求两矩阵必须尺寸相同;
叉乘是矩阵a的第一行乘以矩阵b的第一列,各个元素对应相乘然后求和作为第一元素的值,
要求矩阵a的列数等于矩阵b的行数,乘积矩阵的行数等于左边矩阵的行数,乘积矩阵的列数等于右边矩阵的列数。
所以正确的说法应该是:
numpy.matmul() 和torch.mm() 是矩阵乘法(叉乘),
numpy.multiply() 和 torch.mul() 是矩阵点乘(对应元素相乘)
'''
# incorrect method
# data2 = np.array(data2)
# print(
# '\nmatrix multiplication (dot)',
# '\nnumpy: ', data.dot(data2), # [[7, 10], [15, 22]]
# '\ntorch: ', tensor.dot(tensor) # this will convert tensor to [1,2,3,4], you'll get 30.0
# )
|
python
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc_env] *
# language: python
# name: conda-env-core_acc_env-py
# ---
# # Differential expression analysis
#
# This notebook performs Differential Expression analysis using experiment, PRJNA283002, associated with [this publication](https://pubmed.ncbi.nlm.nih.gov/26078448/). Here they characterized the Anr regulon by comparing WT vs anr mutants.
# +
# %load_ext autoreload
# %load_ext rpy2.ipython
# %autoreload 2
import os
import pandas as pd
from rpy2.robjects import pandas2ri
from core_acc_modules import paths_corr, utils, DE_helper
pandas2ri.activate()
# -
# Load gene expression data
expression_df = pd.read_csv(paths_corr.PAO1_GE, sep="\t", index_col=0, header=0)
# ## Select expression data for experiment and replace gene ids
# +
# Select expression data associated with PRJNA283002 experiment
sample_metadata = pd.read_csv(paths_corr.DE_METADATA, sep="\t", index_col=0, header=0)
select_sample_ids = list(sample_metadata.index)
select_expression_df = expression_df.loc[select_sample_ids]
# +
# Replace gene sequencing ids with PAO1 ids to help us interpret our findings
pao1_fasta_file = paths_corr.PAO1_REF
seq_id_to_gene_id_pao1 = utils.dict_gene_num_to_ids(pao1_fasta_file)
select_expression_df.rename(mapper=seq_id_to_gene_id_pao1, axis="columns", inplace=True)
select_expression_df.head()
# -
# Save selected expression data
select_expression_df.to_csv(paths_corr.SELECT_GE, sep="\t")
# ## DE analysis
# Process data for DESeq
DE_helper.process_samples_for_DESeq(
paths_corr.SELECT_GE,
paths_corr.DE_METADATA,
paths_corr.SELECT_GE_PROCESSED,
)
# Create subdirectory: "<local_dir>/DE_stats/"
os.makedirs(paths_corr.DE_STATS_DIR, exist_ok=True)
# Convert python path objects for use by R in the next cell
metadata_filename = str(paths_corr.DE_METADATA)
processed_expression_filename = str(paths_corr.SELECT_GE_PROCESSED)
repo_dir = str(paths_corr.PROJECT_DIR)
out_filename = str(paths_corr.DE_STATS_OUTPUT)
# + magic_args="-i metadata_filename -i processed_expression_filename -i out_filename -i repo_dir" language="R"
#
# source(paste0(repo_dir, '/core_acc_modules/DE_analysis.R'))
#
# # File created: "<local_dir>/DE_stats/DE_stats_template_data_<project_id>_real.txt"
# get_DE_stats_DESeq(
# metadata_filename,
# processed_expression_filename,
# out_filename
# )
# -
# ## Compare results with publication
# +
# Get top DEGs
# Compare against publication
DE_stats = pd.read_csv(paths_corr.DE_STATS_OUTPUT, sep="\t", header=0, index_col=0)
selected_DE_stats = DE_stats[(abs(DE_stats["log2FoldChange"]) > 1)]
print(selected_DE_stats.shape)
selected_DE_stats
# -
published_DEGs = [
"PA1557",
"PA3928",
"PA2119",
"PA3847",
"PA0515",
"PA0513",
"PA0512",
"PA0510",
"PA0521",
"PA0522",
"PA0525",
"PA0526",
"PA2126",
"PA2127",
"PA2133",
]
selected_DE_stats.loc[published_DEGs]
# +
input_DESeq_data = pd.read_csv(
processed_expression_filename, sep="\t", index_col=0, header=0
)
input_DESeq_data[published_DEGs]
# -
# The differential expression results can be found in [Figure 1](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4524035/) of the paper. Spot checking it looks like the genes have consistent direction of logFC.
#
# Note:
# * NaN's occur if the samples are all 0. Need to check why PA3847 is NaN. Setting filtering to False doesn't get rid of NaNs
#
# https://bioconductor.org/packages/release/bioc/vignettes/DESeq2/inst/doc/DESeq2.html#why-are-some-p-values-set-to-na
|
python
|
from rest_framework import viewsets, filters, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.views import APIView
from profiles_api import permissions
from profiles_api import serializers, models
# noinspection PyMethodMayBeStatic
class HelloAPIViewSets(viewsets.ViewSet):
"""Test API Viewset"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions(list, create, retrieve, update, partial_update)'
'Automatically maps to URLs',
'More features , less code'
]
return Response({'message': 'Hello', 'an_apiview': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hellow {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Retrieve an particular obj"""
return Response({'method': 'GET'})
def update(self, request, pk=None):
"""Update an object"""
return Response({'method': 'PUT'})
def partial_update(self, request, pk=None):
"""Update a field in object"""
return Response({'method': 'PATCH'})
def destroy(self, requests, pk=None):
"""delete an object"""
return Response({'method': 'DELETE'})
# noinspection PyMethodMayBeStatic
class HelloAPIView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request):
"""Return a list of API view features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)'
'Is similar to a traditional view',
'Gives you the most control over application',
'Is mapped manually to URLs'
]
return Response({'message': 'Hello', 'an_apiview': an_apiview})
def post(self, request):
"""Create hello message post request"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hellow {name}'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request):
"""Update an object"""
return Response({'method': 'PUT'})
def patch(self, request):
"""Partial Update of the object"""
return Response({'method': 'PATCH'})
def delete(self, requests):
"""delete an object"""
return Response({'method': 'DELETE'})
# noinspection PyMethodMayBeStatic
class UserViewSet(viewsets.ModelViewSet):
"""Handles creating and updating viewset"""
serializer_class = serializers.UserSerializer
queryset = models.User.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name', 'email',)
class UserLoginAPIView(ObtainAuthToken):
"""Handling creating user auth"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""Handles creating, reading and updating profile feed items"""
authentication_classes = (TokenAuthentication,)
serializer_class = serializers.ProfileFeedItemSerializer
queryset = models.ProfileFeedItem.objects.all()
permission_classes = (
permissions.UpdateOwnStatus,
IsAuthenticated
)
def perform_create(self, serializer):
"""Sets the user profile to the logged-in user"""
serializer.save(user_profile=self.request.user)
|
python
|
import json, yaml
import logging
DEBUG =0
logger = logging.getLogger()
if DEBUG:
#coloredlogs.install(level='DEBUG')
logger.setLevel(logging.DEBUG)
else:
#coloredlogs.install(level='INFO')
logger.setLevel(logging.INFO)
strhdlr = logging.StreamHandler()
logger.addHandler(strhdlr)
formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
strhdlr.setFormatter(formatter)
|
python
|
#!/usr/bin/env python3
""" Makes Maven multi module project. """
from argparse import ArgumentParser
from os import makedirs
from os.path import realpath, relpath, dirname, normpath
from sys import argv
import vang.maven.pom as pom
POM_TEMPLATE = """<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>###group_id###</groupId>
<artifactId>###artifact_id###</artifactId>
<version>###version###</version>
<packaging>pom</packaging>
<modules>
###modules###
</modules>
</project>"""
def get_pom(pom_infos, output_dir, group_id, artifact_id, version):
""" Returns multi module pom content for pom_infos
with paths relative to output_dir. """
modules = '\n'.join(' <module>{}</module>'.format(
relpath(realpath(dirname(info['pom_path'])), realpath(output_dir)))
for info in pom_infos)
return POM_TEMPLATE \
.replace('###group_id###', group_id) \
.replace('###artifact_id###', artifact_id) \
.replace('###version###', version) \
.replace('###modules###', modules)
def make_project(pom_infos, output_dir, group_id, artifact_id, version,
**kwargs):
""" Makes a Maven multi module project. """
pom = get_pom(pom_infos, output_dir, group_id, artifact_id, version)
makedirs(output_dir)
with open(
normpath(f'{output_dir}/pom.xml'),
'wt',
encoding='utf-8',
) as pom_file:
pom_file.write(pom)
def get_pom_infos(source_dir):
pom_infos = []
for pom_path in pom.get_pom_paths(source_dir):
try:
pom_info = pom.get_pom_info(pom_path)
pom_infos.append(pom_info)
except Exception as e: # pragma: no cover
print(f'Can not add {pom_path}')
print(e)
return pom_infos
def parse_args(args):
parser = ArgumentParser(description='Create Maven multi module project')
parser.add_argument(
'-d',
'--use_defaults',
action='store_true',
help='Create with default values.')
return parser.parse_args(args)
def main(use_defaults):
defaults = {
'group_id': 'my.group',
'artifact_id': 'ws',
'version': '1.0.0-SNAPSHOT',
'source_dir': '.',
'output_dir': 'ws'
}
if use_defaults:
pom_infos = get_pom_infos(defaults['source_dir'])
make_project(pom_infos, **defaults)
else:
group_id = str(
input('groupId (default mygroup): ') or defaults['group_id'])
artifact_id = str(
input('artifactId (default ws): ') or defaults['artifact_id'])
version = str(
input('version (default 1.0.0-SNAPSHOT): ') or defaults['version'])
source_dir = normpath(
str(input('sourceDir: (default .)') or defaults['source_dir']))
output_dir = normpath(
str(input('outputDir: (default ./ws)') or defaults['output_dir']))
pom_infos = get_pom_infos(source_dir)
make_project(pom_infos, output_dir, group_id, artifact_id, version)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
|
python
|
from sparkpost import SparkPost
sp = SparkPost()
response = sp.templates.update(
'TEST_ID',
name='Test Template',
from_email='[email protected]',
subject='Updated Test email template!',
html='<b>This is a test email template! Updated!</b>'
)
print(response)
|
python
|
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from solumclient.common.apiclient import client
from solumclient.common.apiclient import fake_client
from solumclient.tests import base
from solumclient.v1 import plan
plan_list = [
{
'name': 'Example plan 1',
'artifacts': (
[{'name': 'My python app',
'artifact_type': 'git_pull',
'content': {'href': 'git://example.com/project.git'},
'requirements': [{
'requirement_type': 'git_pull',
'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}]),
'services': [{'name': 'Build Service',
'id': 'build',
'characteristics': ['python_build_service']}],
'description': 'A plan with no services or artifacts shown'
},
{
'name': 'Example plan 2',
'artifacts': (
[{'name': 'My java app',
'artifact_type': 'git_pull',
'content': {'href': 'git://example.com/project.git'},
'requirements': [{
'requirement_type': 'git_pull',
'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}]),
'services': [{'name': 'Build Service',
'id': 'build',
'characteristics': ['python_build_service']}],
'description': 'A plan with no services or artifacts shown'
},
]
artifacts = [{'name': 'My python app',
'artifact_type': 'git_pull',
'content': {'href': 'git://example.com/project.git'},
'requirements': [{
'requirement_type': 'git_pull',
'language_pack': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'fulfillment': '1dae5a09ef2b4d8cbf3594b0eb4f6b94'}]}]
services = [{'name': 'Build Service',
'id': 'build',
'characteristics': ['python_build_service']}]
plan_fixture = {
'uri': 'http://example.com/v1/plans/p1',
'name': 'Example plan',
'type': 'plan',
'tags': ['small'],
'artifacts': artifacts,
'services': services,
'project_id': '1dae5a09ef2b4d8cbf3594b0eb4f6b94',
'user_id': '55f41cf46df74320b9486a35f5d28a11',
'description': 'A plan with no services or artifacts shown'
}
fixtures_list = {
'/v1/plans': {
'GET': (
{},
plan_list
),
}
}
fixtures_list_empty = {
'/v1/plans': {
'GET': (
{},
[]
),
}
}
fixtures_get = {
'/v1/plans/p1': {
'GET': (
{},
plan_fixture
),
}
}
fixtures_create = {
'/v1/plans': {
'POST': (
{},
plan_fixture
),
}
}
fixtures_put = {
'/v1/plans/p1': {
'PUT': (
{},
plan_fixture
),
}
}
class PlanManagerTest(base.TestCase):
def assert_plan_obj(self, plan_obj):
self.assertIn('Plan', repr(plan_obj))
self.assertIn('Artifact', repr(plan_obj.artifacts[0]))
self.assertIn('ServiceReference', repr(plan_obj.services[0]))
self.assertEqual(plan_fixture['uri'], plan_obj.uri)
self.assertEqual(plan_fixture['type'], plan_obj.type)
self.assertEqual(plan_fixture['project_id'], plan_obj.project_id)
self.assertEqual(plan_fixture['user_id'], plan_obj.user_id)
def test_list_all(self):
fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_list)
api_client = client.BaseClient(fake_http_client)
plan.PlanManager(api_client)
# NOTE(stannie): will re-enable this test once
# https://bugs.launchpad.net/solum/+bug/1331093 is committed.
# FakeHTTPClient doesn't manage YAML properly but since this method
# will use the json content-type once implemented in the API, this can
# stay temporary disabled.
def test_list_empty(self):
fake_http_client = fake_client.FakeHTTPClient(
fixtures=fixtures_list_empty)
api_client = client.BaseClient(fake_http_client)
mgr = plan.PlanManager(api_client)
self.assertEqual([], mgr.list())
def test_create(self):
fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_create)
api_client = client.BaseClient(fake_http_client)
mgr = plan.PlanManager(api_client)
plan_obj = mgr.create('version: 1\nname: ex_plan1\ndescription: dsc1.')
self.assert_plan_obj(plan_obj)
def test_plan_create_post_failure(self):
api_client = mock.MagicMock()
api_client.post.side_effect = Exception("Bad data")
try:
mgr = plan.PlanManager(api_client)
mgr.create('version: 1\nname: ex_plan1\ndescription: dsc1.')
except Exception:
self.assertTrue(True)
def test_plan_create_post_success(self):
api_client = mock.MagicMock()
dummy_data = 'version: 1\nname: ex_plan1\ndescription: dsc1.'
response = mock.MagicMock()
setattr(response, 'content', dummy_data)
api_client.post.return_value = response
try:
mgr = plan.PlanManager(api_client)
plan_obj = mgr.create(dummy_data)
assert plan_obj is not None
assert plan_obj.name == 'ex_plan1'
assert plan_obj.description == 'dsc1.'
assert plan_obj.version == 1
except Exception:
self.assertFalse(True)
def test_get(self):
fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_get)
api_client = client.BaseClient(fake_http_client)
mgr = plan.PlanManager(api_client)
plan_obj = mgr.get(plan_id='p1')
self.assert_plan_obj(plan_obj)
def test_update(self):
fake_http_client = fake_client.FakeHTTPClient(fixtures=fixtures_put)
api_client = client.BaseClient(fake_http_client)
mgr = plan.PlanManager(api_client)
plan_obj = mgr.update('version: 1\nname: ex_plan1\ndescription: dsc1.',
plan_id='p1')
self.assert_plan_obj(plan_obj)
|
python
|
import json
import logging
import os
import boto3
from get_and_parse_hiscores.lib.hiscores import rs_api
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ddb = boto3.resource("dynamodb")
table = ddb.Table(os.environ["HISCORES_TABLE_NAME"])
def handler(event, context):
"""Call HiScores API, parse response, and save to Dynamo table."""
# retrieve player username
logger.debug(f"Received event: {event}")
records = event["Records"]
try:
players = [json.loads(record["body"])["player"] for record in records]
player = players[0]
except (KeyError, IndexError):
raise ValueError(f"Event did not contain player names: {event}")
if len(players) > 1:
logger.warn(
f"Received records for multiple players: {players}. "
f"Only the first player, '{player}', will be processed."
)
player = player.replace("-", " ")
# retrieve HiScores for `player`
logger.info(f"Getting HiScores for {player}")
payload = rs_api.process_hiscores_response(
rs_api.request_hiscores(player=player, timeout=45.0)
)
# write result to `table`
logger.info(
f"Putting payload for player '{payload['player']}', "
f"timestamp '{payload['timestamp']}'"
)
logger.debug(f"Putting payload {payload}")
table.put_item(Item=payload)
return payload
|
python
|
import sys
import copy
import math
def extra():
fp = open("23.input")
nums = list(map(int, fp.readline()))
next_nums = {}
for i in range(len(nums) - 1):
next_nums[nums[i]] = nums[i + 1]
MAX_VAL = 1_000_000
next_nums[nums[-1]] = 10
for i in range(10, MAX_VAL):
next_nums[i] = i + 1
next_nums[MAX_VAL] = nums[0]
N_PICKUP = 3
N_ROUND = 10_000_000
current = nums[0]
for n_round in range(N_ROUND):
if n_round != 0:
current = next_nums[current]
one = next_nums[current]
two = next_nums[one]
three = next_nums[two]
pickup = (one, two, three)
destination = current - 1 if current > 1 else MAX_VAL
while destination in pickup:
destination = destination - 1 if destination > 1 else MAX_VAL
# Fix
next_nums[current] = next_nums[three]
next_nums[three] = next_nums[destination]
next_nums[destination] = one
ans = next_nums[1] * next_nums[next_nums[1]]
print(ans)
def main():
fp = open("23.input")
nums = list(map(int, fp.readline()))
curr_cup_idx = 0
N_PICKUP = 3
N_ROUND = 100
for n_round in range(N_ROUND):
# curr step
current_val = nums[curr_cup_idx]
pickups = [
nums[(curr_cup_idx + x) % len(nums)]
for x in range(1, N_PICKUP + 1)
]
destination = nums[curr_cup_idx] - 1 if nums[curr_cup_idx] > 1 else 9
while True:
if destination in pickups:
destination = destination - 1 if destination > 1 else 9
else:
break
# print("Round ===> {}".format(n_round + 1))
# print("Current: [{}] {}".format(nums[curr_cup_idx], nums))
# print("Pickup: {}".format(pickups))
# print("Destination: {}".format(destination))
# move to next step
for pickup in pickups:
nums.remove(pickup)
for i, num in enumerate(nums):
if num == destination:
for k, pickup in enumerate(pickups):
nums.insert(i + k + 1, pickup)
break
for i, num in enumerate(nums):
if num == current_val:
curr_cup_idx = i
break
curr_cup_idx = (curr_cup_idx + 1) % len(nums)
pos1 = None
for i, num in enumerate(nums):
if num == 1:
pos1 = i
break
ans = []
for i in range(8):
ans.append(str(nums[(pos1 + i + 1) % len(nums)]))
ans = ''.join(ans)
print(ans)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == 'extra':
extra()
else:
main()
|
python
|
# coding=utf-8
"""
Common methods for UI code.
"""
from __future__ import absolute_import
from datacube.utils import is_supported_document_type
def get_metadata_path(dataset_path):
"""
Find a metadata path for a given input/dataset path.
:type dataset_path: pathlib.Path
:rtype: Path
"""
# They may have given us a metadata file directly.
if dataset_path.is_file() and is_supported_document_type(dataset_path):
return dataset_path
# Otherwise there may be a sibling file with appended suffix '.agdc-md.yaml'.
expected_name = dataset_path.parent.joinpath('{}.agdc-md'.format(dataset_path.name))
found = _find_any_metadata_suffix(expected_name)
if found:
return found
# Otherwise if it's a directory, there may be an 'agdc-metadata.yaml' file describing all contained datasets.
if dataset_path.is_dir():
expected_name = dataset_path.joinpath('agdc-metadata')
found = _find_any_metadata_suffix(expected_name)
if found:
return found
raise ValueError('No metadata found for input %r' % dataset_path)
def _find_any_metadata_suffix(path):
"""
Find any metadata files that exist with the given file name/path.
(supported suffixes are tried on the name)
:type path: pathlib.Path
"""
existing_paths = list(filter(is_supported_document_type, path.parent.glob(path.name + '*')))
if not existing_paths:
return None
if len(existing_paths) > 1:
raise ValueError('Multiple matched metadata files: {!r}'.format(existing_paths))
return existing_paths[0]
|
python
|
import sys
import json
import collections
MEM_TOKEN_SIZE = 4
def build_vocab(tokens, vocab_count):
token_list = tokens.split()
for tok in token_list:
if tok in vocab_count.keys():
vocab_count[tok] += 1
else:
vocab_count[tok] = 1
def process_one_conversation(text, vocab_count):
conversation = json.loads(text.strip(), encoding="utf-8", object_pairs_hook=collections.OrderedDict)
goal = conversation["goal"]
knowledge = conversation["knowledge"]
history = conversation["history"] if len(conversation["history"]) > 0 else ["null"]
response = conversation["response"] if "response" in conversation else "null"
video_entities, person_entities = [], []
context_arr, conv_arr, kb_arr = [], [], []
all_entities = {'topic_a': [],
'topic_b': []
}
topic_a = goal[0][1]
topic_b = goal[0][2]
nid = 0
for i, triple in enumerate(knowledge):
[s, p, o] = triple
triple_str = " ".join(triple)
build_vocab(triple_str, vocab_count)
assert s in [topic_a, topic_b]
o_tokens = o.split()
if s == topic_a:
all_entities['topic_a'].append(s)
for tok in o_tokens:
all_entities['topic_a'].append(tok)
else:
all_entities['topic_b'].append(s)
for tok in o_tokens:
all_entities['topic_b'].append(tok)
if u"领域" == p:
if topic_a == s:
domain_a = o
if domain_a == u"电影":
video_entities.append(topic_a)
else:
person_entities.append(topic_a)
elif topic_b == s:
domain_b = o
if domain_b == u"电影":
video_entities.append(topic_b)
else:
person_entities.append(topic_b)
kb_info = generate_memory(triple, "", str(nid))
kb_arr += kb_info
context_arr = kb_info + context_arr
for i, utterance in enumerate(history):
if utterance == 'null':
gen_m = generate_memory(utterance, "$u", 0)
elif i % 2 == 0:
build_vocab(utterance, vocab_count)
nid += 1
gen_m = generate_memory(utterance, "$s", str(nid))
else:
build_vocab(utterance, vocab_count)
gen_m = generate_memory(utterance, "$u", str(nid))
context_arr += gen_m
conv_arr += gen_m
build_vocab(response, vocab_count)
# get gold entity for each response
gold_ent = []
for w in response.split():
if w in all_entities['topic_a'] or w in all_entities['topic_b']:
gold_ent.append(w)
# get local pointer position for each word in system response
ptr_index = []
for key in response.split():
index = [loc for loc, val in enumerate(context_arr) if (val[0] == key and key in gold_ent)]
if len(index) > 0:
index = max(index)
else:
index = len(context_arr)
ptr_index.append(index)
# get global pointer labels for words in system response, the 1 in the end is for the NULL token
selector_index = [1 if (word_arr[0] in gold_ent or word_arr[0] in response.split())
else 0 for word_arr in context_arr] + [1]
# get sketch response
topic_entity = [topic_a, topic_b]
sketch_response = generate_template(topic_entity, response)
data_detail = {
'context_arr': list(context_arr + [['$$$$'] * MEM_TOKEN_SIZE]), # $$$$ is NULL token
'response': response,
'sketch_response': sketch_response,
'ptr_index': ptr_index + [len(context_arr)],
'selector_index': selector_index,
'ent_index': gold_ent,
'conv_arr': list(conv_arr),
'kb_arr': list(kb_arr)
}
return data_detail
def generate_memory(sent, speaker, time):
sent_new = []
if speaker == "$u" or speaker == "$s": # dialogue memory
sent_token = sent.split(' ')
for idx, word in enumerate(sent_token):
temp = [word, speaker, 'turn'+str(time), 'word'+str(idx)] + ["PAD"]*(MEM_TOKEN_SIZE-4)
sent_new.append(temp)
else: # knowledge memory
sent_token = sent[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent))
sent_new.append(sent_token)
return sent_new
def generate_template(topic_entity, sentence):
"""
Based on the system response and the provided entity table, the output is the sketch response.
"""
sketch_response = []
for word in sentence.split():
if word not in topic_entity:
sketch_response.append(word)
else:
if word == topic_entity[0]:
ent_type = 'topic_a'
else:
ent_type = 'topic_b'
sketch_response.append('@' + ent_type)
sketch_response = " ".join(sketch_response)
return sketch_response
def convert_sample_to_json(sample_file, json_file, vocab_file=None):
print("Reading lines from %s" % sample_file)
vocab_count = {}
with open(sample_file, 'r') as fr, open(json_file, 'w') as fw:
for i, line in enumerate(fr):
text_dict = process_one_conversation(line, vocab_count)
text_json = json.dumps(text_dict, ensure_ascii=False)
fw.write(text_json + "\n")
if i > 0 and i % 10000 == 0:
print("line %d done" % i)
if vocab_file is not None:
print("Building vocabs...")
vocab_sorted = sorted(vocab_count.items(), key=lambda tup: tup[1], reverse=True)
with open(vocab_file, 'w') as fw:
for word, freq in vocab_sorted:
fw.write(word + '\t' + str(freq) + '\n')
if __name__ == '__main__':
try:
convert_sample_to_json(sys.argv[1], sys.argv[2], sys.argv[3])
except KeyboardInterrupt:
print("\nExited from the program ealier!")
|
python
|
import os, wifisec, platform
from speedtest import SpeedTest
from hku import fetch_heroku
from rich import print
from rich.table import Table
def display():
plat = platform.system()
global clear
if plat == "Linux":
clear = lambda: os.system('clear')
elif plat == "Windows":
clear = lambda: os.system('cls')
while True:
main_tab = Table(header_style="bold magenta", show_edge=False, show_lines=False, show_header=False)
main_tab.add_column("Network")
main_tab.add_column("Projects")
projects_tab = Table(header_style="bold magenta", title="Projects")
projects_tab.add_column("Name")
projects_tab.add_column("Running")
for p in fetch_heroku():
if p[1]:
projects_tab.add_row(str(p[0]), "[green]True[/green]")
else:
projects_tab.add_row(str(p[0]), "[red]False[/red]")
try:
st = SpeedTest()
ping = "%.2f" % st.ping()
#download = "%.2f" % st.download()
#upload = "%.2f" % st.upload()
except OSError:
ping = "# Connection Error"
status_tab = Table(title="Network Status", header_style="bold magenta")
status_tab.add_column("Stat")
status_tab.add_column("Data")
status_tab.add_row("Ping", str(ping))
#status_tab.add_row("Download", str(download))
#status_tab.add_row("Upload", str(upload))
devices = wifisec.who()
connected_tab = Table(title="Connected Devices", header_style="bold magenta")
connected_tab.add_column("NAME")
connected_tab.add_column("MAC")
connected_tab.add_column("IP")
clear()
for d in devices:
ip = d[1].split(".")
for i, sec in enumerate(ip[1:-1]):
ip[i+1] = "#"*len(sec)
ip = ".".join(ip)
mac = d[3].split(":")
for i, sec in enumerate(mac[1:-1]):
mac[i+1] = "#"*len(sec)
mac = ":".join(mac)
name = d[-1]
if "(" in name:
name = name[:name.find("(")]
connected_tab.add_row(name, mac, ip)
main_tab.add_row(status_tab, connected_tab, projects_tab)
print(main_tab)
#print("[bold red]No device secured, breach possible.[/bold red]")
|
python
|
from scripttease.library.commands.base import Command, ItemizedCommand, Sudo
from scripttease.library.overlays.common import python_pip
class TestCommand(object):
def test_getattr(self):
c = Command("ls -ls", extra=True)
assert c.extra is True
def test_get_statement(self):
c = Command(
"ls -ls",
comment="kitchen sink",
condition="$last_command -eq 0",
cd="/path/to/project",
prefix="source python/bin/active",
register="list_success",
stop=True,
sudo="deploy"
)
statement = c.get_statement(cd=True)
assert "( cd" in statement
assert "sudo" in statement
assert ")" in statement
assert "# kitchen sink" in statement
assert "if [[ $last_command" in statement
assert "list_success=$?" in statement
assert "if [[ $list_success" in statement
c = Command(
"ls -ls",
stop=True
)
statement = c.get_statement()
assert "if [[ $?" in statement
def test_has_attribute(self):
c = Command("ls -ls")
assert c.has_attribute("testing") is False
def test_init(self):
c = Command("ls -ls", sudo=Sudo(user="deploy"))
assert isinstance(c.sudo, Sudo)
assert c.sudo.user == "deploy"
c = Command("ls -ls", sudo="deploy")
assert isinstance(c.sudo, Sudo)
assert c.sudo.user == "deploy"
c = Command("ls -ls", sudo=True)
assert isinstance(c.sudo, Sudo)
assert c.sudo.user == "root"
c = Command("ls -ls")
assert isinstance(c.sudo, Sudo)
assert c.sudo.user == "root"
assert c.sudo.enabled is False
def test_is_itemized(self):
c = Command("ls -ls")
assert c.is_itemized is False
def test_repr(self):
c = Command("ls -ls", comment="listing")
assert repr(c) == "<Command listing>"
c = Command("ls -ls")
assert repr(c) == "<Command>"
def test_set_attribute(self):
c = Command("ls -ls")
assert c.testing is None
c.set_attribute("testing", True)
assert c.testing is True
class TestItemizedCommand(object):
def test_getattr(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item", extra=True)
assert c.extra is True
def test_get_commands(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
commands = c.get_commands()
for i in commands:
assert isinstance(i, Command)
def test_get_statement(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
statement = c.get_statement()
assert "Pillow" in statement
assert "psycopg2-binary" in statement
assert "django" in statement
def test_has_attribute(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
assert c.has_attribute("testing") is False
def test_is_itemized(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
assert c.is_itemized is True
def test_repr(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
assert repr(c) == "<ItemizedCommand python_pip>"
def test_set_attribute(self):
c = ItemizedCommand(python_pip, ["Pillow", "psycopg2-binary", "django"], "$item")
assert c.testing is None
c.set_attribute("testing", True)
assert c.testing is True
class TestSudo(object):
def test_bool(self):
s = Sudo()
assert bool(s) is False
s = Sudo(True)
assert bool(s) is True
def test_str(self):
s = Sudo()
assert str(s) == ""
s = Sudo(True)
assert str(s) == "sudo -u root"
|
python
|
from AtomicContributions.ContributionsOfAtomsToModes import AtomicContributionsCalculator
import unittest
import numpy as np
import os
path_here = os.path.dirname(__file__)
class AtomicContributionToModesTest(unittest.TestCase):
def setUp(self):
self.Contributions = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR'),
ForceConstants=False,
ForceFileName=os.path.join(path_here, 'FORCE_SETS'),
supercell=[[3, 0, 0], [0, 3, 0], [0, 0, 4]],
primitive=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
self.Contributions_masses = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR'),
ForceConstants=False,
ForceFileName=os.path.join(path_here, 'FORCE_SETS'),
supercell=[[3, 0, 0], [0, 3, 0], [0, 0, 4]],
primitive=[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
masses=[12.010700, 12.010700, 15.999400, 15.999400,
14.006700, 14.006700, 14.006700, 14.006700, 2,
2,
2, 2, 2, 2, 2, 2])
self.Contributions2 = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR.NaCl'),
ForceConstants=False,
ForceFileName=os.path.join(path_here, 'FORCE_SETS.NaCl'),
supercell=[[2, 0, 0], [0, 2, 0], [0, 0, 2]], nac=True,
BornFileName=os.path.join(path_here, 'BORN.NaCl'),
primitive=[[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]])
self.ContributionsFC = AtomicContributionsCalculator(PoscarName=os.path.join(path_here, 'POSCAR_Methanol'),
ForceConstants=True,
ForceFileName=os.path.join(path_here,
'FORCE_CONSTANTS_Methanol'),
supercell=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], nac=False)
def test_attributes(self):
# test calculation of frequencies
self.assertAlmostEqual(self.Contributions._frequencies[47], 3490.6434922723, places=1)
# test calculation of eigenvectors
self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 0]), 0.00084433323436)
self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 1]), 0.00084433323436)
self.assertAlmostEqual(abs(self.Contributions._EigFormat[15, 47, 2]), 0.37170414232138)
# check if sign of eigenvectors is consistent!!
self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]),
np.sign(self.Contributions._EigFormat[15, 47, 0]))
self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]),
np.sign(self.Contributions._EigFormat[15, 47, 1]))
self.assertEqual(np.sign(self.Contributions._EigFormat[14, 47, 2]),
np.sign(self.Contributions._EigFormat[15, 47, 2]))
# test irreps
self.assertEqual(self.Contributions._IRLabels[-1], 'B2')
# test contributions
sum_contribution = 0.0
for atom in range(0, 16):
sum_contribution += self.Contributions._PercentageAtom[47, atom]
self.assertAlmostEqual(sum_contribution, 1.0)
# TODO: test NAC
self.assertAlmostEqual(self.Contributions2._frequencies[-1], 153.7212069157, places=2)
# TODO: set masses externally [e.g., use D mass]
self.assertAlmostEqual(self.Contributions_masses._frequencies[47], 2598.2875793589, places=1)
# test calculation of eigenvectors
self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 0]), 0.00378948635566)
self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 1]), 0.00378948635566)
self.assertAlmostEqual(abs(self.Contributions_masses._EigFormat[15, 47, 2]), 0.33223420830758)
# check if sign of eigenvectors is consistent
self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]),
np.sign(self.Contributions_masses._EigFormat[15, 47, 0]))
self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]),
np.sign(self.Contributions_masses._EigFormat[15, 47, 1]))
self.assertEqual(np.sign(self.Contributions_masses._EigFormat[14, 47, 2]),
np.sign(self.Contributions_masses._EigFormat[15, 47, 2]))
# test irreps
self.assertEqual(self.Contributions._IRLabels[-1], 'B2')
# start from FORCE constants instead
self.assertAlmostEqual(self.ContributionsFC._frequencies[-1], 3741.4132865293, places=1)
if __name__ == '__main__':
unittest.main()
|
python
|
from typing import Type
from serflag import SerFlag
from handlers.graphql.utils.query import resolve_from_root
def resolve_myactions(actions_type: Type[SerFlag]):
def resolver(root, info, **args):
actions = resolve_from_root(root, info)
if not actions:
return []
return actions_type.deserialize_distinct(actions)
return resolver
def resolve_owner(actions_type: Type[SerFlag]):
def resolver(root, info, **args):
data = resolve_from_root(root, "my_actions")
if not data:
return False
return actions_type.deserialize(data) == actions_type.ALL
return resolver
|
python
|
#!/usr/bin/python3
import math
import pygame
import random
import sys
from pygame import K_d, K_a, K_w, K_s, K_SPACE
SIZE = WIDTH, HEIGHT = 500, 500
BLACK = 0, 0, 0
WHITE = 255, 255, 255
SHIP_W = 12
SHIP_H = 25
MAX_SPEED = 3
ASTEROID_LIMIT = 2
class Game_Space:
"""Initiates and holds all variables needed for the game to run. Also
includes all methods for changing the state of the game: move, shoot, etc.
"""
def __init__(self):
# Sets screen, font, and generates player's ship
self.screen = pygame.display.set_mode(SIZE)
self.font = pygame.font.SysFont('monospace', 25)
self.ship = Ship([WIDTH // 2, HEIGHT // 2], SHIP_W, SHIP_H)
self.asteroids = []
self.explosions = []
self.score = 0
self.big_asteroids = 0
self.satelite = None
self.target_score = 1000
def collision_check(self):
# Collision check for all objects in the GameSpace
if self.satelite is not None:
for i in range(len(self.ship.shots)):
if self.satelite.collision(self.ship.shots[i]):
self.score += 850
del self.ship.shots[i]
self.satelite.explode()
self.satelite = None
return
for i in range(len(self.asteroids)):
for j in range(len(self.ship.shots)):
if self.asteroids[i].collision(self.ship.shots[j]):
self.asteroids[i].break_apart()
if isinstance(self.asteroids[i], Big_Asteroid):
self.score += 100
self.big_asteroids -= 1
else:
self.score += 50
del self.asteroids[i]
del self.ship.shots[j]
return
for asteroid in self.asteroids:
if self.ship.collision(asteroid):
self.ship.explode()
self.game_over()
def handle_explosions(self):
# Cleans up explosion debris
for explosion in self.explosions:
for i in range(len(explosion)):
if explosion[i].timer <= 0:
del explosion[i]
return
else:
explosion[i].timer -= 1
def update_score(self):
# Updates the score displayed on the screen
display_score = self.font.render(str(self.score), False, WHITE)
width, height = self.font.size(str(self.score))
self.screen.blit(display_score, (WIDTH - width - 10,
HEIGHT - height - 10))
def game_over(self):
# Game over operation
# TODO: End game, display high scores
self.ship.x = WIDTH // 2
self.ship.y = HEIGHT // 2
def draw_all(self):
# Draw all objects in the GameSpace
self.ship.draw()
for asteroid in self.asteroids:
asteroid.draw()
for shot in self.ship.shots:
shot.draw()
for explosion in self.explosions:
for debris in explosion:
debris.draw()
if self.satelite is not None:
self.satelite.draw()
def move_all(self):
# Move all objects in the GameSpace
self.ship.move()
for asteroid in self.asteroids:
asteroid.move()
for shot in self.ship.shots:
shot.move()
for explosion in self.explosions:
for debris in explosion:
debris.move()
if self.satelite is not None:
self.satelite.move()
def spawn_asteroids(self):
# Spawns BigAsteroids if currently under the limit
if self.big_asteroids < ASTEROID_LIMIT:
if random.choice([True, False]):
self.asteroids.append(Big_Asteroid(None))
self.big_asteroids += 1
def spawn_satelite(self):
# Spawns Satelite object if target score is met, increases target each
# spawn
if self.score > self.target_score:
if self.satelite is None:
self.satelite = Satelite()
self.target_score *= 3
elif self.satelite.x < 0:
self.satelite = None
def run_game(self):
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
self.screen.fill(BLACK)
self.update_score()
self.draw_all()
self.ship.control(pygame.key.get_pressed())
self.collision_check()
self.move_all()
self.spawn_asteroids()
self.spawn_satelite()
self.handle_explosions()
self.ship.remove_shots()
pygame.display.flip()
pygame.time.wait(25)
class Menu:
"""Menu object to be displayed before and after every game. Work in
progress, not yet implmented.
"""
def __init__(self):
# Set font and grab current pygame surface
self.font_inactive = pygame.font.SysFont('monospace', 45)
self.font_active = pygame.font.SysFont('monospace', 60)
self.screen = pygame.display.get_surface()
self.options = [['New Game', True], ['Exit', False]]
self.spacing = 10
self.padding_top = 100
self.padding_left = 80
def make_menu(self):
# Draw the menu on the screen
x = self.padding_left
y = self.padding_top
for menu_item in self.options:
option = menu_item[0]
active = menu_item[1]
if active:
button = self.font_active.render(option, False, WHITE)
width, height = self.font_active.size(option)
else:
button = self.font_inactive.render(option, False, WHITE)
width, height = self.font_inactive.size(option)
self.screen.blit(button, (x, y))
y += height + self.spacing
def action(self, keys):
# Get user input and change active menu item or do menu action.
for i in range(len(self.options)):
if self.options[i][1]:
pos = i
if keys[K_w]:
if pos > 0:
self.options[pos][1] = False
self.options[pos - 1][1] = True
elif keys[K_s]:
if pos < len(self.options) - 1:
self.options[pos][1] = False
self.options[pos + 1][1] = True
elif keys[K_SPACE]:
if self.options[pos][0] == 'New Game':
game.run_game()
elif self.options[pos][0] == 'Exit':
sys.exit()
class Space_Object:
"""Base object for all other objects. Includes draw and move methods."""
def __init__(self, position, width, height):
# Requires position, width, and height as inputs. Gets the current
# pygame surface
self.position = position
self.x = position[0]
self.y = position[1]
self.width = width
self.height = height
self.screen = pygame.display.get_surface()
self.speed = [0, 0]
self.direction = 0
self.delta_speed = 0
self.speed_limit = MAX_SPEED
self.rotation = 0
self.color = WHITE
self.screen_wrap = True
def move(self):
# Adjust the objects position variables depending on it's speed and
# direction
rad = -math.radians(self.direction + self.rotation)
sx = self.delta_speed * math.sin(rad)
sy = self.delta_speed * math.cos(rad)
self.delta_speed = 0
self.speed[0] -= sx
self.speed[1] += sy
if self.speed[0] > self.speed_limit:
self.speed[0] = self.speed_limit
elif self.speed[0] < -self.speed_limit:
self.speed[0] = -self.speed_limit
if self.speed[1] > self.speed_limit:
self.speed[1] = self.speed_limit
elif self.speed[1] < -self.speed_limit:
self.speed[1] = -self.speed_limit
self.x += self.speed[0]
self.y += self.speed[1]
if self.screen_wrap:
if self.x < 0 - 10:
self.x += WIDTH
elif self.x > WIDTH + 10:
self.x -= WIDTH
if self.y < 0 - 10:
self.y += HEIGHT
elif self.y > HEIGHT + 10:
self.y -= HEIGHT
self.position = [self.x, self.y]
def points(self):
# Returns the objects relative shape adjusted for orientation and
# position
point_list = []
rad = -math.radians(self.direction)
for point in self.relative_coord:
dx = self.x + point[0] * math.cos(rad) - point[1] * math.sin(rad)
dy = self.y + point[1] * math.cos(rad) + point[0] * math.sin(rad)
point_list.append([dx, dy])
return point_list
def draw(self):
# Draws object on the screen
pygame.draw.polygon(self.screen, self.color, self.points(), 2)
def collision(self, item):
# Determines if a collision has taken place between two objects using
# their positions, widths, and heights
min_safe_x = self.width / 2 + item.width / 4
min_safe_y = self.height / 2 + item.height / 4
min_safe_dist = math.sqrt(min_safe_x ** 2 + min_safe_y ** 2)
abs_x = abs(self.x - item.x)
abs_y = abs(self.y - item.y)
abs_dist = math.sqrt(abs_x ** 2 + abs_y ** 2)
if abs_dist < min_safe_dist:
return True
def explode(self):
# Create an explosion effect be generating debris
explosion = []
direction = random.randint(0, 365)
debris_amount = 5
for i in range(debris_amount):
explosion.append(Debris(self.position, direction))
direction += 73
game.explosions.append(explosion)
class Ship(Space_Object):
"""The user controlled space ship. Has special methods shoot, control, and
remove_shots. Stores the number of ship shots currently active and applies
a shot limit. Holds the ships limiting factors: acceleration, turn speed.
"""
def __init__(self, position, width, height):
# Initialize SpaceObject and set object shape
Space_Object.__init__(self, position, width, height)
self.relative_coord = [[-self.width // 2, self.height * 2 // 5],
[0, self.height // 5],
[self.width // 2, self.height * 2 // 5],
[0, -self.height * 3 // 5]]
self.shots = []
self.shot_limit = 10
self.shot_delay = 0
self.acceleration = 2
self.turn_speed = 5
def shoot(self):
# Generate a shot from the front of the ship
origin = self.points()[3]
if self.shot_delay == 0:
if len(self.shots) < 10:
self.shots.append(Shot(origin, self.direction))
self.shot_delay = 8
else:
self.shot_delay -= 1
def remove_shots(self):
# Cleans up shots that have moveed off screen
for i in range(len(self.shots)):
if self.shots[i].x < 0 or self.shots[i].y < 0:
del self.shots[i]
break
elif self.shots[i].x > WIDTH or self.shots[i].y > HEIGHT:
del self.shots[i]
break
def control(self, keys):
# Defines the result from user input and applies it
if keys[K_w]:
self.delta_speed -= self.acceleration
elif keys[K_s]:
self.delta_speed += self.acceleration
if keys[K_a]:
self.direction += self.turn_speed
elif keys[K_d]:
self.direction -= self.turn_speed
if keys[K_SPACE]:
self.shoot()
class Shot(Space_Object):
"""Shot object, fired from ship and can collide with other space objects.
"""
def __init__(self, position, direction):
# Calculates speed on initiation
self.width = 2
self.height = 6
self.speed_limit = MAX_SPEED + 4
self.screen_wrap = False
Space_Object.__init__(self, position, self.width, self.height)
self.direction = direction
rad = -math.radians(self.direction)
self.speed = [self.speed_limit * math.sin(rad),
-self.speed_limit * math.cos(rad)]
self.relative_coord = [[0, 0], [0, self.height]]
def draw(self):
# Drawn as a line instead of the default polygon
points = self.points()
pygame.draw.line(self.screen,
self.color,
points[0],
points[1],
self.width)
class Asteroid(Space_Object):
"""Base object for asteroids. Includes different shapes and break apart
methods for asteroid destruction.
"""
def __init__(self, position):
# Randomly chooses asteroid from collection of shapes.
ASTEROID_SHAPES = [
[[-self.width / 2, -self.height / 3],
[-self.width / 3, -self.height / 2],
[self.width / 6, -self.height / 2],
[self.width / 2, -self.height / 6],
[self.width / 2, self.height / 3],
[self.width / 3, self.height / 2],
[self.width / 6, self.height / 2],
[-self.width / 6, self.height / 6],
[-self.width / 3, self.height / 6],
[-self.width / 2, 0]],
[[0, self.height / 2],
[self.width / 6, self.height / 2],
[self.width / 3, self.height / 3],
[self.width / 3, self.height / 6],
[self.width / 2, 0],
[self.width / 2, -self.height / 6],
[self.width / 3, -self.height / 3],
[self.width / 6, -self.height / 3],
[0, -self.height / 2],
[-self.width / 6, -self.height / 2],
[-self.width / 6, -self.height / 3],
[-self.width / 2, 0],
[-self.width / 2, self.height / 6],
[-self.width / 3, self.height / 3],
[-self.width / 6, self.height / 3]]
]
# Randomly choose start position if position is not inherited
if position is None:
start = random.choice([1, 2, 3, 4])
if start == 1:
position = [0, random.randint(0, HEIGHT)]
elif start == 2:
position = [WIDTH, random.randint(0, HEIGHT)]
elif start == 3:
position = [random.randint(0, WIDTH), 0]
else:
position = [random.randint(0, WIDTH), HEIGHT]
Space_Object.__init__(self, position, self.width, self.height)
self.speed = random.randint(1, self.speed_limit)
self.direction = random.randint(0, 365)
self.relative_coord = ASTEROID_SHAPES[random.randint(0, len(ASTEROID_SHAPES) - 1)] # noqa
rad = -math.radians(self.direction)
self.speed = [self.speed_limit * math.sin(rad),
-self.speed_limit * math.cos(rad)]
self.rotation = random.randint(-20, 20)
def break_apart(self):
# Default break_apart calls base explode method
self.explode()
class Big_Asteroid(Asteroid):
"""Big asteroids are slow and break apart into small asteroids."""
def __init__(self, position):
self.height = 75
self.width = 75
self.speed_limit = MAX_SPEED - 2
Asteroid.__init__(self, position)
def break_apart(self):
for i in range(random.randint(1, 4)):
game.asteroids.append(Small_Asteroid(self.position))
self.explode()
class Small_Asteroid(Asteroid):
"""Small asteroids are fast and are destroyed on collision with a shot."""
height = 20
width = 20
speed_limit = MAX_SPEED - 1
def __init__(self, position):
Asteroid.__init__(self, position)
class Debris(Shot):
"""Debris uses the shot class to show destruction. Incldues a timer
variable to be deleted when timer hits zero.
"""
def __init__(self, position, direction):
self.width = 1
self.height = random.randint(1, 20)
Shot.__init__(self, position, direction)
self.timer = random.randint(5, 15)
class Satelite(Space_Object):
"""Special, high value target. Moves from right to left across the middle
of the screen. A more complex shape than the other sopace objects.
"""
def __init__(self):
Space_Object.__init__(self, [WIDTH, HEIGHT // 2], 12, 10)
self.speed = [-MAX_SPEED, 0]
self.screen_wrap = False
def draw(self):
# Draw method includes a circle and three lines.
line_1 = [[self.x, self.y - self.height // 4],
[self.x + self.width * 3 // 4, self.y - self.height // 2]]
line_2 = [[self.x + self.width // 4, self.y],
[self.x + self.width * 3 // 4, self.y]]
line_3 = [[self.x, self.y + self.height // 4],
[self.x + self.width * 3 // 4, self.y + self.height // 2]]
pygame.draw.circle(self.screen,
self.color,
(int(self.x), int(self.y)),
self.width // 4)
pygame.draw.line(self.screen, self.color, line_1[0], line_1[1], 1)
pygame.draw.line(self.screen, self.color, line_2[0], line_2[1], 1)
pygame.draw.line(self.screen, self.color, line_3[0], line_3[1], 1)
def main(game):
menu = Menu()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
game.screen.fill(BLACK)
menu.make_menu()
menu.action(pygame.key.get_pressed())
pygame.display.flip()
pygame.time.wait(25)
if __name__ == '__main__':
pygame.init()
game = Game_Space()
main(game)
|
python
|
import logging
import os.path
from os import getenv
import telegram.ext
from dotenv import load_dotenv
from telegram.ext import Updater, CommandHandler, MessageHandler
from telegram.ext.filters import Filters
from bot.commands import hello_cmd, echo_cmd, pin_message_cmd, slap_cmd, me_cmd, \
unknown_command_cmd, shrug_cmd, google_cmd, get_cmd, list_cmd, set_cmd, \
del_cmd, credits_cmd, pidor_cmd, pidoreg_cmd, meme_cmd, pidorules_cmd
# Setup logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s \
- %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Load configs and create bot instance
load_dotenv() # load telegram bot token from .env file
API_TOKEN = getenv("TELEGRAM_BOT_API_SECRET", "")
logger.debug("Beginning of token: %s", API_TOKEN[:5])
if not os.path.exists('storage'):
os.mkdir('storage')
updater = Updater(API_TOKEN, use_context=True,
persistence=telegram.ext.PicklePersistence(
filename='storage/data.bin'))
dispatch = updater.dispatcher
not_edited = ~Filters.update.edited_message
# Setup dispatcher with callbacks
dispatch.add_handler(CommandHandler('hello', hello_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('slap', slap_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('me', me_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('shrug', shrug_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('google', google_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('pin', pin_message_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('credits', credits_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('meme', meme_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('get', get_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('list', list_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('set', set_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('del', del_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('pidor', pidor_cmd, filters=not_edited))
dispatch.add_handler(
CommandHandler('pidorules', pidorules_cmd, filters=not_edited))
dispatch.add_handler(CommandHandler('pidoreg', pidoreg_cmd, filters=not_edited))
updater.dispatcher.add_handler(
MessageHandler(Filters.regex(r'^/\w+') & not_edited, unknown_command_cmd))
updater.dispatcher.add_handler(
MessageHandler(Filters.text & ~Filters.update.edited_message, echo_cmd))
# Run the bot
updater.start_polling()
updater.idle()
|
python
|
from pyHS100 import Discover
for dev in Discover.discover().values():
print(dev)
print("host:" + dev.host)
|
python
|
from collections import namedtuple
Meta= namedtuple('Meta', ('long_name', 'units', 'comment'))
Meta.__new__.__defaults__ = (None,) * len(Meta._fields)
METADATA = {
'FileCode': Meta(
long_name='file_code'),
'HeaderLen': Meta(
long_name='header_length',
units='bytes'),
'StartTime': Meta(
long_name='start_time',
comment='time of first sample in file'),
'StopTime': Meta(
long_name='stop_time',
comment='time of last sample in file'),
'CGProg': Meta(
long_name='program_number',
comment='chirp generator program number'),
'ModelNo': Meta(
long_name='model_number',
comment='0=94GHz single polarisation radar, 1=94GHz dual polarisation radar'),
'ProgName': Meta(
long_name='program_name'),
'CustName': Meta(
long_name='customer_name'),
'Freq': Meta(
long_name='radar_frequency',
units='GHz'),
'AntSep': Meta(
long_name='antenna_separation',
units='m',
comment='separation of both antenna axis (bistatic configuration)'),
'AntDia': Meta(
long_name='antenna_diameter',
units='m'),
'AntG': Meta(
long_name='antenna_gain',
comment='linear antenna gain'),
'HPBW': Meta(
long_name='half_power_beam_width',
units='degrees'),
'Cr': Meta(
long_name='radar_constant'),
'DualPol': Meta(
long_name='dual_polarisation',
comment='0=single polarisation radar, 1=dual polarisation radar in LDR mode, '
'2=dual polarisation radar in STSR mode'),
'CompEna': Meta(
long_name='compression',
comment='0=not compressed, 1=compressed, 2=compressed and polarimetric variables saved'),
'AntiAlias': Meta(
long_name='anti_alias',
comment='0=spectra not anti-aliased, 1=spectra have been anti-aliased'),
'SampDur': Meta(
long_name='sample_duration',
units='s'),
'GPSLat': Meta(
long_name='gps_latitude',
units='degrees_north'),
'GPSLong': Meta(
long_name='gps_longitude',
units='degrees_east'),
'CalInt': Meta(
long_name='calibration_interval',
comment='period for automatic zero calibrations in number of samples'),
'RAltN': Meta(
long_name='n_range_layers',
comment='number of radar ranging layers'),
'TAltN': Meta(
long_name='n_temperature_layers',),
'HAltN': Meta(
long_name='n_humidity_layers'),
'SequN': Meta(
long_name='n_chirp_sequences'),
'RAlts': Meta(
long_name='range_layers'),
'TAlts': Meta(
long_name='temperature_layers'),
'HAlts': Meta(
long_name='humidity_layers'),
'Fr': Meta(
long_name='range_factors'),
'SpecN': Meta(
long_name='n_samples_in_chirp'),
'RngOffs': Meta(
long_name='chirp_start_indices'),
'ChirpReps': Meta(
long_name='n_chirps_in_sequence'),
'SeqIntTime': Meta(
long_name='sequence_integration_time'),
'dR': Meta(
long_name='range_resolution',
units='m',
comment='chirp sequence range resolution'),
'MaxVel': Meta(
long_name='max_doppler_velocity',
units='m/s',
comment='max. Doppler velocity for each chirp sequence (unambiguous)'),
'ChanBW': Meta(
long_name='bandwidth',
units='Hz',
comment='bandwidth of individual radar channel in the sequence'),
'ChirpLowIF': Meta(
long_name='lowest_IF_frequency',
units='Hz'),
'ChirpHighIF': Meta(
long_name='highest_IF_frequency',
units='Hz'),
'RangeMin': Meta(
long_name='minimum_altitude',
units='m',
comment='minimum altitude (range) of the sequence'),
'RangeMax': Meta(
long_name='maximum_altitude',
units='m',
comment='maximum altitude (range) of the sequence)'),
'ChirpFFTSize': Meta(
long_name='fft_size',
comment='Must be power of 2'),
'ChirpInvSamples': Meta(
long_name='n_invalid_samples',
comment='number of invalid samples at beginning of chirp'),
'ChirpCenterFr': Meta(
long_name='chirp_center_frequency',
units='MHz'),
'ChirpBWFr': Meta(
long_name='chirp_bandwidth',
units='MHz'),
'FFTStartInd': Meta(
long_name='fft_start_index'),
'FFTStopInd': Meta(
long_name='fft_stop_index'),
'ChirpFFTNo': Meta(
long_name='n_chirp_fft',
comment='number of FFT range layers in one chirp (usually = 1)'),
'SampRate': Meta(
long_name='adc_sampling_rate',
units='Hz'),
'MaxRange': Meta(
long_name='maximum_range',
units='m',
comment='maximum unambiguous range'),
'SupPowLev': Meta(
long_name='power_leveling_flag',
comment='flag indicating the use of power levelling (0=yes, 1=no)'),
'SpkFilEna': Meta(
long_name='spike_filter_flag',
comment='flag indicating the use of spike/plankton filter (1=yes, 0=no)'),
'PhaseCorr': Meta(
long_name='phase_correction_flag',
comment='flag indicating the use of phase correction (1=yes, 0=no)'),
'RelPowCorr': Meta(
long_name='relative_power_correction_flag',
comment='flag indicating the use of relative power correction (1=yes, 0=no)'),
'FFTWindow': Meta(
long_name='fft_window',
comment='FFT window in use: 0=square, 1=parzen, 2=blackman, 3=welch, 4=slepian2, 5=slepian3'),
'FFTInputRng': Meta(
long_name='adc_voltage_range',
comment='ADC input voltage range (+/-)',
units='mV'),
'NoiseFilt': Meta(
long_name='noise_filter_threshold',
comment='noise filter threshold factor (multiple of STD in Doppler spectra)'),
'Time': Meta(
long_name='time',
units='s'),
'MSec': Meta(
long_name='time_ms',
units='ms'),
'QF': Meta(
long_name='quality_flag',
comment='Bit 1=ADC saturation, Bit 2=spectral width too high, Bit 3=no transm. power leveling'),
'RR': Meta(
long_name='rain_rate',
units='mm/h'),
'RelHum': Meta(
long_name='relative_humidity',
units='%'),
'EnvTemp': Meta(
long_name='temperature',
units='K',
comment='environment temperature'),
'BaroP': Meta(
long_name='pressure',
units='hPa',
comment='barometric pressure'),
'WS': Meta(
long_name='wind_speed',
units='km/h',),
'WD': Meta(
long_name='wind_direction',
units='degrees'),
'DDVolt': Meta(
long_name='voltage',
units='V',
comment='direct detection channel voltage'),
'DDTb': Meta(
long_name='brightness_temperature',
units='K'),
'TransPow': Meta(
long_name='transmitter_power',
units='W'),
'TransT': Meta(
long_name='transmitter_temperature',
units='K'),
'RecT': Meta(
long_name='receiver_temperature',
units='K'),
'PCT': Meta(
long_name='pc_temperature',
units='K'),
'LWP': Meta(
long_name='liquid_water_path',
units='g/m2'),
'Elev': Meta(
long_name='elevation',
units='degrees'),
'Azi': Meta(
long_name='azimuth',
units='degrees'),
'Status': Meta(
long_name='status_flag',
comment='mitigation status flags: 0/1=heater switch (ON/OFF) 0/10=blower switch (ON/OFF)'),
'TotSpec': Meta(
long_name='doppler_spectrum',
comment='linear Ze'),
'HSpec': Meta(
long_name='doppler_spectrum_h',
comment='horizontal polarisation, linear Ze'),
'ReVHSpec': Meta(
long_name='covariance_spectrum_re',
comment='real part linear Ze'),
'ImVHSpec': Meta(
long_name='covariance_spectrum_im',
comment='imaginary part linear Ze'),
'RefRat': Meta(
long_name='linear_depolarization_ratio',
units='dB'),
'DiffPh': Meta(
long_name='differential_phase',
units='rad'),
'SLDR': Meta(
long_name='ldr_slanted',
units='dB'),
'CorrCoeff': Meta(
long_name='correlation_coefficient',),
'SCorrCoeff': Meta(
long_name='correlation_coefficient_slanted',),
'KDP': Meta(
long_name='differential_phase_shift',
units='rad/km'),
'DiffAtt': Meta(
long_name='differential_attenuation',
units='db/km'),
'TotNoisePow': Meta(
long_name='integrated_noise',
comment='integrated Doppler spectrum noise power'),
'HNoisePow': Meta(
long_name='integrated_noise_h',
comment='integrated Doppler spectrum noise power in h-pol'),
'AliasMsk': Meta(
long_name='anti_alias_correction',
comment='mask indicating if anti-aliasing has been applied (=1) or not (=0)'),
'MinVel': Meta(
long_name='minimum_velocity',
units='m/s'),
'PowIF': Meta(
long_name='IF_power',
comment='IF power at ADC',
units='uW'),
'Ze': Meta(
long_name='reflectivity',
comment='linear reflectivity in Ze units for vertical polarisation'),
'MeanVel': Meta(
long_name='velocity',
units='m/s',
comment='mean velocity for vertical polarisation'),
'SpecWidth': Meta(
long_name='width',
units='m/s',
comment='spectral width for vertical polarisation'),
'Skewn': Meta(
long_name='skewness',
comment='spectral skewness for vertical polarisation'),
'Kurt': Meta(
long_name='kurtosis',),
}
|
python
|
# Generated by Django 2.2.6 on 2019-10-12 18:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('fashion_catalogue', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='color',
),
migrations.RemoveField(
model_name='category',
name='size',
),
]
|
python
|
# eqcmd.py: Basic routines for interfacing with EQ.
import asyncio
from asyncio.subprocess import create_subprocess_shell, PIPE
import eqlog
import os
import re
import shlex
import random
class CommandError(Exception):
"""A problem running a command"""
pass
class NotReadyError(Exception):
"""EverQuest isn't ready to receive a command"""
pass
async def _xdotool(display, text) -> [str]:
"""Interface with X via calls to xdotool."""
cmd = "/usr/bin/xdotool "+text
env = dict([(k, os.environ[k]) for k in os.environ])
env["DISPLAY"] = display
proc = await asyncio.create_subprocess_shell(cmd, stdout=PIPE, stderr=PIPE, env=env)
stdout, stderr = await proc.communicate()
rc = await proc.wait()
if rc != 0:
raise CommandError("Command send failed")
return str(stdout, "utf8").splitlines()
_EQDISPLAY = None
async def _eqdisplay() -> str:
"""Figure out which X display EQ is running on."""
return os.environ["DISPLAY"]
# global _EQDISPLAY
# if _EQDISPLAY is None:
# for i in range(100):
# try:
# display = ":%d" % i
# await _xdotool(display, "search --name EverQuest")
# _EQDISPLAY = display
# break
# except CommandError:
# pass
# else:
# raise CommandError("Couldn't find EverQuest display")
# return _EQDISPLAY
async def _eqxdo(text):
"""Run xdotool against the display holding EverQuest"""
return await _xdotool(await _eqdisplay(), text)
_WINDLOC_RE = re.compile(r"\s*Position: ([0-9]+),([0-9]+).*")
_GEOMETRY_RE = re.compile(r"\s*Geometry: ([0-9]+)x([0-9]+).*")
async def _geometry():
"""Get the EQ window location"""
lines = await _eqxdo("search --name EverQuest getwindowgeometry")
loc = None
size = None
for line in lines:
m = _WINDLOC_RE.match(line)
if m:
loc = int(m.group(1)), int(m.group(2))
m = _GEOMETRY_RE.match(line)
if m:
size = int(m.group(1)), int(m.group(2))
if loc is None or size is None:
raise CommandError("Couldn't find EverQuest window")
return loc, size
async def _prepare():
"""Prepare EQ window to receive input"""
loc, size = await _geometry()
x = loc[0] + (size[0]//3 + random.randint(0, size[0]//3))
y = loc[1] + (size[1]//3 + random.randint(0, size[1]//3))
await _eqxdo("mousemove %d %d" % (x, y))
await _eqxdo("click 1")
await asyncio.sleep(0.2)
await _eqxdo("search --name EverQuest windowmap windowraise windowfocus")
await _eqxdo("click 1")
async def _press_raw(key_name):
"""Press a key in EQ"""
await _eqxdo("key " + shlex.quote(key_name))
await asyncio.sleep(0.2)
async def _press(key_name):
"""Press a key in EQ after preparing for input"""
await _prepare()
await _press_raw(key_name)
async def _type(text):
"""Type a line of text in EQ"""
await _prepare()
await _press_raw("Return")
await _eqxdo("type --delay 20 "+shlex.quote(text))
await _press_raw("Return")
async def _expect_io():
"""Wait until a line of text comes in from the EQ log."""
try:
with eqlog.tap() as t:
await t.next_line()
except asyncio.CancelledError:
pass
_EQ_READY = False
async def _ping_watch():
"""Keep the _EQ_READY variable up to date. Here, we decide EQ is up and running if
we've seen at least 1 chat message (e.g. "You are out of food and drink.") sometime
in the past minute."""
global _EQ_READY
while True:
f = asyncio.ensure_future(_expect_io())
try:
await asyncio.wait_for(f, 60)
_EQ_READY = True
except asyncio.TimeoutError:
_EQ_READY = False
def is_ready():
"""Determine if EQ is ready to receive commands"""
return _EQ_READY
async def wait_for_ready():
"""Wait for EQ to be ready to receive commands"""
while True:
if is_ready():
return
else:
await asyncio.sleep(1)
_is_init = False
async def init():
"""Prepare the EQ command subsytem"""
global _is_init
if _is_init:
return
_is_init = True
await _eqdisplay()
asyncio.ensure_future(_ping_watch())
class CommandTap(object):
"""A context object, extending the functionality of eqlog.LogTap,
which also allows sending commands to EQ."""
_LOCK = asyncio.Lock()
def __init__(self):
self._ltctx = None
self._lt = None
def __enter__(self):
self._ltctx = eqlog.tap()
self._lt = self._ltctx.__enter__()
return self
def __exit__(self, *args):
try:
self._ltctx.__exit__(*args)
finally:
CommandTap._LOCK.release()
async def next_line(self):
"""Retrieve the next line"""
return await self._lt.next_line()
async def skip_until(self, text):
"""Wait until a line matching the specified regexp comes up"""
while True:
line = await self.next_line()
if isinstance(text, str):
if line == text:
return line
else:
m = text.match(line)
if m:
return m
async def send(self, text):
"""Send a command to EQ."""
if not is_ready():
raise NotReadyError("EQ is not currently ready to receive commands")
await _type(text)
async def press(self, key_name):
"""Press a key in the EQ window."""
if not is_ready():
raise NotReadyError("EQ is not currently ready to receive commands")
await _press(key_name)
async def tap():
"""Call as 'with await eqcmd.tap() as t:' to get a CommandTap object to manipulate EQ with."""
await CommandTap._LOCK.acquire()
return CommandTap()
|
python
|
# Generated by Django 2.0.5 on 2018-08-03 11:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('surveys', '0005_answer_training_set'),
]
operations = [
migrations.CreateModel(
name='Translate_Hired_Power',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hired_power_literal', models.CharField(max_length=200)),
('min_hired_power', models.IntegerField(default=0)),
('max_hired_power', models.IntegerField(default=800)),
('deleted', models.IntegerField(default=0)),
],
),
]
|
python
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mixin for device_power capability."""
from unittest import mock
class DevicePowerTestMixin:
"""Mixin for common device unit tests of device power.
Assumes self.uut is set.
"""
def test_power_cycle(self):
"""Test self.uut.device_power.power_cycle is called."""
with mock.patch.object(self.uut.device_power, "off"):
with mock.patch.object(self.uut.device_power, "on"):
self.uut.device_power.cycle()
self.uut.device_power.off.assert_called_once()
self.uut.device_power.on.assert_called_once()
|
python
|
import numpy as np
import tensorflow as tf
import time
import keras
def KL_generated_images(dec, cla, N, dimZ, task, sample_W = True):
z = tf.random_normal(shape=(N, dimZ))
x_gen = dec(z, sampling = sample_W)
y_gen = tf.clip_by_value(cla(x_gen), 1e-9, 1.0)
y_true = np.zeros([N, 10]); y_true[:, task] = 1
y_true = tf.constant(np.asarray(y_true, dtype='f'))
kl = -tf.reduce_sum(y_true * tf.log(y_gen), 1)
kl_mean = tf.reduce_mean(kl)
kl_var = tf.reduce_mean((kl - kl_mean)**2)
return kl_mean, kl_var
def construct_eval_func(dec, cla, batch_size_ph, dimZ, task, sample_W = True):
N_gen = 100
kl_mean, kl_var = KL_generated_images(dec, cla, N_gen, dimZ, task, sample_W)
ops = [kl_mean, kl_var]
def eval(sess):
n_iter = 10
N = n_iter * N_gen
begin = time.time()
kl_total = 0.0; kl_var = 0.0
for j in xrange(0, n_iter):
a, b = sess.run(ops, feed_dict={batch_size_ph: N_gen,
keras.backend.learning_phase(): 0})
kl_total += a / n_iter
kl_var += b / n_iter
end = time.time()
print "kl=%.2f, ste=%.2f, time=%.2f" \
% (kl_total, np.sqrt(kl_var / N), end - begin)
return kl_total, np.sqrt(kl_var / N)
return eval
|
python
|
#!/usr/bin/python
# Written By: Sahar Hathiramani
# Date: 01/07/2021
import os
import socket
from termcolor import colored
os.system("clear")
print("🄱🄰🄳 🄱🄾🅈 🄱🅄🅃 🄰 🅂🄰🄳 🄱🄾🅈")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(2)
host = input("[*] Please Specify a Host to Scan: ")
def portscanner(port):
if sock.connect_ex((host,port)):
print(colored("[-] Port %d is closed" % (port), 'red'))
else:
print(colored("[+] Port %d is open" % (port), 'green'))
for port in range (1, 1000):
portscanner(port);
n = str(input("Enter y to return back:"))
if n=="y":
os.system("clear")
|
python
|
import gzip
import warnings
from pkg_resources import resource_stream, resource_filename
import numpy as np
import matplotlib.image as mpimg
from os.path import dirname, join
def load_hill_topography():
"""
Load hillshading and return elevation
:return: np.array
"""
stream = resource_stream(__name__, 'data/jacksboro_fault_dem.npz')
# module_path = dirname(__file__)
# data_file_name = join(module_path, 'data', 'jacksboro_fault_dem.npz')
with np.load(stream) as dem:
elevation = dem["elevation"]
return elevation
def load_scan_image():
"""
Load image of a medical scan
:return:
"""
# module_path = dirname(__file__)
# data_file_name = join(module_path, 'data', 's1045.ima.gz')
stream = resource_stream(__name__, 'data/s1045.ima.gz')
with gzip.open(stream) as dfile:
scan_im = np.frombuffer(dfile.read(), np.uint16).reshape((256, 256))
return scan_im
def load_pic(name="grmhd"):
if not isinstance(name, str):
TypeError("name should be a string")
module_path = dirname(__file__)
im_path = resource_filename(__name__, 'data/grmhd.png')
# pic_path = join(module_path, 'data', 'grmhd.png')
if name == "grmhd":
im_path = resource_filename(__name__, 'data/grmhd.png')
elif name == "vortex":
im_path = resource_filename(__name__, 'data/vortex.jpg')
elif name == "tng":
im_path = resource_filename(__name__, 'data/tng.jpg')
else:
warnings.warn("Using a default image, name should be in ['grmhd', 'vortex', 'tng']")
img = mpimg.imread(im_path)
return img[:, :, 0]
|
python
|
import textwrap
import uuid
from multiprocessing import Pool
from pprint import pprint
import oyaml as yaml
from cloudmesh.common.DateTime import DateTime
from cloudmesh.common.console import Console
from cloudmesh.configuration.Config import Config
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.storage.Provider import Provider
class StorageQueue:
"""
This class specifies a storage object queue, that allows the queuing of
files to be copied between services.
The queue has a maximal parallelism that can be set to execute the copy in
multiple threads.
Please note that actions only add modify the queue in the db, however,
the run command executes them one by one.
It will be up to thes method to quarantee order. For example, in case of a
recursive copy it wwould make sens to create directories first.
"""
"""
DB object
cm:
id: uuid
collection: storage-queue-{source}-{destination}
...
action: copy
source: the/source/path
destination: the/destination/path
created: date
status:
Actions can be for example
copy
mkdir
delete
cancel
cancel has a specific action allowing all jobs that have not
yet been finished to be canceled.
Each file can be in the state: completed, waiting, inprogress, canceled
here is an example for the status of the queue.
{
"length": 100,
"completed": 10,
"waiting": 80,
"inprogress": 10,
"canceled": 0
}
"""
def __init__(self,
source,
destination,
name="local",
parallelism=4):
"""
:param name: The name of the queue (used as a collection in mongodb)
:param source: The name of the service in cloudmesh.data from which
to copy
:param destination: The name of the service in cloudmesh.data from
which to copy
:param parallelism: The number of parallel threads
"""
self.source = source
self.destination = destination
self.parallelism = parallelism
config = Config()
self.source_spec = config[f"cloudmesh.storage.{source}"]
self.destination_spec = config[f"cloudmesh.storage.{destination}"]
self.provider_source = Provider(service=source)
self.provider_destination = Provider(service=destination)
self.name = name
self.collection = f"storage-queue-{name}-{source}-{destination}"
self.number = 0
#
# TODO: create collection in mongodb
#
Console.ok(f"Collection: {self.name}")
def _copy_file(self, sourcefile, destinationfile):
"""
adds a copy action to the queue
copies the file from the source service to the destination service using
the file located in the path and storing it into the remote. If remote
is not specified path is used for it.
The copy will not be performed if the files are the same.
:param sourcefile:
:param destinationfile:
:return:
"""
date = DateTime.now()
uuid_str = str(uuid.uuid1())
specification = textwrap.dedent(f"""
cm:
number: {self.number}
name: "{self.source}:{sourcefile}"
kind: storage
id: {uuid_str}
cloud: {self.collection}
collection: {self.collection}
created: {date}
action: copy
source:
service: {self.source}
path: {sourcefile}
destination:
service: {self.destination}
path: {destinationfile}
status: waiting
""")
entries = yaml.load(specification, Loader=yaml.SafeLoader)
self.number = self.number + 1
return entries
@DatabaseUpdate()
def copy_file(self, sourcefile, destinationfile):
"""
adds a copy action to the queue
copies the file from the source service to the destination service using
the file located in the path and storing it into the remote. If remote
is not specified path is used for it.
The copy will not be performed if the files are the same.
:param sourcefile:
:param destinationfile:
:return:
"""
self._copy_file(sourcefile, destinationfile)
@DatabaseUpdate()
def copy_tree(self, sourcetree, destinationtree):
"""
adds a tree to be copied to the queue
it will recursively add all files within the tree
:param sourcetree:
:param destinationtree:
:return:
"""
# goes recursively through the dree and adds_the file
sources = self.provider_source.list(sourcetree, recursive=True)
files = []
dirs = []
for source in sources:
if bool(source['file']):
files.append(source)
else:
dirs.append((source))
# create dirs first
actions = []
for file in dirs:
location = file["cm"]["location"]
actions.append(self.mkdir(self.destination, location))
# now copy files
for file in files:
location = file["cm"]["location"]
actions.append(self._copy_file(location, location))
return actions
def sync(self, sourcetree, destinationtree):
"""
just a more convenient name for copy_tree
:param sourcetree:
:param destinationtree:
:return:
"""
self.copy_tree(sourcetree, destinationtree)
def mkdir(self, service, path):
"""
adds a mkdir action to the queue
create the directory in the storage service
:param service: service must be either source or destination
:param path:
:return:
"""
date = DateTime.now()
uuid_str = str(uuid.uuid1())
specification = textwrap.dedent(f"""
cm:
number: {self.number}
name: "{service}:{path}"
kind: storage
id: {uuid_str}
cloud: {self.collection}
collection: {self.collection}
created: {date}
action: mkdir
source:
service: {service}
path: {path}
status: waiting
""")
entries = yaml.load(specification, Loader=yaml.SafeLoader)
self.number = self.number + 1
return entries
def delete(self, service, path):
"""
adds a deleta action to the queue
:param service: service must be either source or destination
:param path:
:return:
"""
date = DateTime.now()
uuid_str = str(uuid.uuid1())
specification = textwrap.dedent(f"""
cm:
number: {self.number}
name: "{service}:{path}"
kind: storage
id: {uuid_str}
cloud: {self.collection}
collection: {self.collection}
created: {date}
action: delete
source:
service: {service}
path: {path}
status: waiting
""")
entries = yaml.load(specification, Loader=yaml.SafeLoader)
self.number = self.number + 1
return entries
def status(self):
"""
provides that status of the queue
{
"length": 100,
"completed": 10,
"waiting": 80,
"inprogress": 10
}
:return:
"""
# find all teh values from within the MongoDB
raise NotImplementedError
def cancel(self, id=None):
"""
cancels a job with a specific id
:param id:
:return:
"""
# if None all are canceled
raise NotImplementedError
def action(self, specification):
"""
executes the action identified by the specification. This is used by the
run command.
:param specification:
:return:
"""
action = specification["action"]
if action == "copy":
print ("COPY", specification)
# update status
elif action == "delete":
print ("DELETE", specification)
# update status
elif action == "mkdir":
print ("MKDIR", specification)
# update status
def get_actions(self):
cm = CmDatabase()
entries = cm.find(cloud=self.collection,
kind='storage')
mkdir = []
copy = []
for entry in entries:
pprint (entry)
if entry['action'] == 'mkdir':
mkdir.append(entry)
elif entry['action'] == 'copy':
copy.append(entry)
return mkdir, copy
def run(self):
"""
runs the copy process for all jobs in the queue and completes when all
actions are completed
:return:
"""
mkdir, copy = self.get_actions()
# create directories
#
p = Pool(self.parallelism)
#
p.map(self.action, mkdir)
# COPY FILES
#
p = Pool(self.parallelism)
#
p.map(self.action, copy)
|
python
|
import collections.abc
import re
import numpy as np
import pandas as pd
import torch
import joblib
import os
from pathlib import Path
from loguru import logger
from sklearn.model_selection import train_test_split
from torch._six import string_classes, int_classes
from ivadomed import utils as imed_utils
from ivadomed.keywords import SplitDatasetKW, LoaderParamsKW, ROIParamsKW, ContrastParamsKW
import nibabel as nib
import random
__numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
TRANSFORM_PARAMS = ['elastic', 'rotation', 'scale', 'offset', 'crop_params', 'reverse',
'translation', 'gaussian_noise']
# Ordered list of supported file extensions
# TODO: Implement support of the following OMETIFF formats (#739):
# [".ome.tif", ".ome.tiff", ".ome.tf2", ".ome.tf8", ".ome.btf"]
# They are included in the list to avoid a ".ome.tif" or ".ome.tiff" following the ".tif" or ".tiff" pipeline
EXT_LST = [".nii", ".nii.gz", ".ome.tif", ".ome.tiff", ".ome.tf2", ".ome.tf8", ".ome.btf", ".tif",
".tiff", ".png", ".jpg", ".jpeg"]
def split_dataset(df, split_method, data_testing, random_seed, train_frac=0.8, test_frac=0.1):
"""Splits dataset into training, validation and testing sets by applying train, test and validation fractions
according to the split_method.
The "data_testing" parameter can be used to specify the data_type and data_value to include in the testing set,
the dataset is then split as not to mix the data_testing between the training/validation set and the testing set.
Args:
df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata.
split_method (str): Used to specify on which metadata to split the dataset (eg. "participant_id", "sample_id", etc.)
data_testing (dict): Used to specify data_type and data_value to include in the testing set.
random_seed (int): Random seed to ensure reproducible splits.
train_frac (float): Between 0 and 1. Represents the train set proportion.
test_frac (float): Between 0 and 1. Represents the test set proportion.
Returns:
list, list, list: Train, validation and test filenames lists.
"""
# Get data_type and data_value from split parameters
# If no data_type is provided, data_type is the same as split_method
data_type = data_testing['data_type'] if data_testing['data_type'] else split_method
data_value = data_testing['data_value']
if not split_method in df:
raise KeyError("No split_method '{}' was not found in metadata".format(split_method))
if not data_type in df:
logger.warning("No data_type named '{}' was found in metadata. Not taken into account "
"to split the dataset.".format(data_type))
data_type = split_method
# Filter dataframe with rows where split_method is not NAN
df = df[df[split_method].notna()]
# If no data_value list is provided, create a random data_value according to data_type and test_fraction
# Split the TEST and remainder set using sklearn function
if len(data_value) == 0 and test_frac != 0:
data_value = sorted(df[data_type].unique().tolist())
test_frac = test_frac if test_frac >= 1 / len(data_value) else 1 / len(data_value)
data_value, _ = train_test_split(data_value, train_size=test_frac, random_state=random_seed)
if len(data_value) != 0:
for value in data_value:
if value not in df[data_type].values:
logger.warning("No data_value '{}' was found in '{}'. Not taken into account "
"to split the dataset.".format(value, data_type))
X_test = df[df[data_type].isin(data_value)]['filename'].unique().tolist()
X_remain = df[~df[data_type].isin(data_value)][split_method].unique().tolist()
# List dataset unique values according to split_method
# Update train fraction to apply to X_remain
data = sorted(df[split_method].unique().tolist())
train_frac_update = train_frac * len(data) / len(X_remain)
if ((train_frac_update > (1 - 1 / len(X_remain)) and len(X_remain) < 2) or train_frac_update > 1):
raise RuntimeError("{}/{} '{}' remaining for training and validation sets, train_fraction {} is too large, "
"validation set would be empty.".format(len(X_remain), len(data), split_method, train_frac))
# Split remainder in TRAIN and VALID sets according to train_frac_update using sklearn function
X_train, X_val = train_test_split(X_remain, train_size=train_frac_update, random_state=random_seed)
# Print the real train, validation and test fractions after splitting
real_train_frac = len(X_train)/len(data)
real_valid_frac = len(X_val)/len(data)
real_test_frac = 1 - real_train_frac - real_valid_frac
logger.warning("After splitting: train, validation and test fractions are respectively {}, {} and {}"
" of {}.".format(round(real_train_frac, 3), round(real_valid_frac, 3),
round(real_test_frac, 3), split_method))
# Convert train and valid sets from list of "split_method" to list of "filename"
X_train = df[df[split_method].isin(X_train)]['filename'].unique().tolist()
X_val = df[df[split_method].isin(X_val)]['filename'].unique().tolist()
# Make sure that test dataset is unseen during training
# (in cases where there are multiple "data_type" for a same "split_method")
X_train = list(set(X_train) - set(X_test))
X_val = list(set(X_val) - set(X_test))
return X_train, X_val, X_test
def get_new_subject_file_split(df, split_method, data_testing, random_seed,
train_frac, test_frac, path_output, balance, subject_selection=None):
"""Randomly split dataset between training / validation / testing.
Randomly split dataset between training / validation / testing\
and save it in path_output + "/split_datasets.joblib".
Args:
df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata.
split_method (str): Used to specify on which metadata to split the dataset (eg. "participant_id", "sample_id", etc.)
data_testing (dict): Used to specify the data_type and data_value to include in the testing set.
random_seed (int): Random seed.
train_frac (float): Training dataset proportion, between 0 and 1.
test_frac (float): Testing dataset proportionm between 0 and 1.
path_output (str): Output folder.
balance (str): Metadata contained in "participants.tsv" file with categorical values. Each category will be
evenly distributed in the training, validation and testing datasets.
subject_selection (dict): Used to specify a custom subject selection from a dataset.
Returns:
list, list list: Training, validation and testing filenames lists.
"""
if subject_selection is not None:
# Verify subject_selection format
if not (len(subject_selection["metadata"]) == len(subject_selection["n"]) == len(subject_selection["value"])):
raise ValueError("All lists in subject_selection parameter should have the same length.")
sampled_dfs = []
random.seed(random_seed)
for m, n, v in zip(subject_selection["metadata"], subject_selection["n"], subject_selection["value"]):
participants = random.sample(df[df[m] == v]['participant_id'].unique().tolist(), n)
for participant in participants:
sampled_dfs.append(df[df['participant_id'] == participant])
if len(sampled_dfs) != 0:
df = pd.concat(sampled_dfs)
# If balance, then split the dataframe for each categorical value of the "balance" column
if balance:
if balance in df.keys():
df_list = [df[df[balance] == k] for k in df[balance][df[balance].notna()].unique().tolist()]
else:
logger.warning("No column named '{}' was found in 'participants.tsv' file. Not taken into account to split "
"the dataset.".format(balance))
df_list = [df]
else:
df_list = [df]
train_lst, valid_lst, test_lst = [], [], []
for df_tmp in df_list:
# Split dataset on each section of subjects
train_tmp, valid_tmp, test_tmp = split_dataset(df=df_tmp,
split_method=split_method,
data_testing=data_testing,
random_seed=random_seed,
train_frac=train_frac,
test_frac=test_frac)
# Update the dataset lists
train_lst += train_tmp
valid_lst += valid_tmp
test_lst += test_tmp
# save the subject distribution
split_dct = {'train': train_lst, 'valid': valid_lst, 'test': test_lst}
split_path = Path(path_output, "split_datasets.joblib")
joblib.dump(split_dct, split_path)
return train_lst, valid_lst, test_lst
def get_subdatasets_subject_files_list(split_params, df, path_output, subject_selection=None):
"""Get lists of subject filenames for each sub-dataset between training / validation / testing.
Args:
split_params (dict): Split parameters, see :doc:`configuration_file` for more details.
df (pd.DataFrame): Dataframe containing all BIDS image files indexed and their metadata.
path_output (str): Output folder.
subject_selection (dict): Used to specify a custom subject selection from a dataset.
Returns:
list, list list: Training, validation and testing filenames lists.
"""
if split_params[SplitDatasetKW.FNAME_SPLIT]:
# Load subjects lists
old_split = joblib.load(split_params[SplitDatasetKW.FNAME_SPLIT])
train_lst, valid_lst, test_lst = old_split['train'], old_split['valid'], old_split['test']
# Backward compatibility for subject_file_lst containing participant_ids instead of filenames
df_subjects = df[df['filename'].isin(train_lst)]
if df_subjects.empty:
df_train = df[df['participant_id'].isin(train_lst)]
train_lst = sorted(df_train['filename'].to_list())
df_subjects = df[df['filename'].isin(valid_lst)]
if df_subjects.empty:
df_valid = df[df['participant_id'].isin(valid_lst)]
valid_lst = sorted(df_valid['filename'].to_list())
df_subjects = df[df['filename'].isin(test_lst)]
if df_subjects.empty:
df_test = df[df['participant_id'].isin(test_lst)]
test_lst = sorted(df_test['filename'].to_list())
else:
train_lst, valid_lst, test_lst = get_new_subject_file_split(df=df,
split_method=split_params[SplitDatasetKW.SPLIT_METHOD],
data_testing=split_params[SplitDatasetKW.DATA_TESTING],
random_seed=split_params[SplitDatasetKW.RANDOM_SEED],
train_frac=split_params[SplitDatasetKW.TRAIN_FRACTION],
test_frac=split_params[SplitDatasetKW.TEST_FRACTION],
path_output=path_output,
balance=split_params[SplitDatasetKW.BALANCE]
if SplitDatasetKW.BALANCE in split_params else None,
subject_selection=subject_selection)
return train_lst, valid_lst, test_lst
def imed_collate(batch):
"""Collates data to create batches
Args:
batch (dict): Contains input and gt data with their corresponding metadata.
Returns:
list or dict or str or tensor: Collated data.
"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if torch.is_tensor(batch[0]):
stacked = torch.stack(batch, 0)
return stacked
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return __numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.abc.Mapping):
return {key: imed_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.abc.Sequence):
return [imed_collate(samples) for samples in batch]
return batch
def filter_roi(roi_data, nb_nonzero_thr):
"""Filter slices from dataset using ROI data.
This function filters slices (roi_data) where the number of non-zero voxels within the
ROI slice (e.g. centerline, SC segmentation) is inferior or equal to a given threshold
(nb_nonzero_thr).
Args:
roi_data (nd.array): ROI slice.
nb_nonzero_thr (int): Threshold.
Returns:
bool: True if the slice needs to be filtered, False otherwise.
"""
# Discard slices with less nonzero voxels than nb_nonzero_thr
return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr
def orient_img_hwd(data, slice_axis):
"""Orient a given RAS image to height, width, depth according to slice axis.
Args:
data (ndarray): RAS oriented data.
slice_axis (int): Indicates the axis used for the 2D slice extraction:
Sagittal: 0, Coronal: 1, Axial: 2.
Returns:
ndarray: Array oriented with the following dimensions: (height, width, depth).
"""
if slice_axis == 0:
return data.transpose(2, 1, 0)
elif slice_axis == 1:
return data.transpose(2, 0, 1)
elif slice_axis == 2:
return data
def orient_img_ras(data, slice_axis):
"""Orient a given array with dimensions (height, width, depth) to RAS orientation.
Args:
data (ndarray): Data with following dimensions (Height, Width, Depth).
slice_axis (int): Indicates the axis used for the 2D slice extraction:
Sagittal: 0, Coronal: 1, Axial: 2.
Returns:
ndarray: Array oriented in RAS.
"""
if slice_axis == 0:
return data.transpose(2, 1, 0) if len(data.shape) == 3 else data.transpose(0, 3, 2, 1)
elif slice_axis == 1:
return data.transpose(1, 2, 0) if len(data.shape) == 3 else data.transpose(0, 2, 3, 1)
elif slice_axis == 2:
return data
def orient_shapes_hwd(data, slice_axis):
"""Swap dimensions according to match the height, width, depth orientation.
Args:
data (list or tuple): Shape or numbers associated with each image dimension
(e.g. image resolution).
slice_axis (int): Indicates the axis used for the 2D slice extraction:
Sagittal: 0, Coronal: 1, Axial: 2.
Returns:
ndarray: Reoriented vector.
"""
if slice_axis == 0:
return np.array(data)[[2, 1, 0]]
elif slice_axis == 1:
return np.array(data)[[2, 0, 1]]
elif slice_axis == 2:
return np.array(data)
def update_metadata(metadata_src_lst, metadata_dest_lst):
"""Update metadata keys with a reference metadata.
A given list of metadata keys will be changed and given the values of the reference metadata.
Args:
metadata_src_lst (list): List of source metadata used as reference for the
destination metadata.
metadata_dest_lst (list): List of metadate that needs to be updated.
Returns:
list: updated metadata list.
"""
if metadata_src_lst and metadata_dest_lst:
if not isinstance(metadata_dest_lst[0], list): # annotation from one rater only
metadata_dest_lst[0]._update(metadata_src_lst[0], TRANSFORM_PARAMS)
else: # annotations from several raters
for idx, _ in enumerate(metadata_dest_lst[0]):
metadata_dest_lst[0][idx]._update(metadata_src_lst[0], TRANSFORM_PARAMS)
return metadata_dest_lst
def reorient_image(arr, slice_axis, nib_ref, nib_ref_canonical):
"""Reorient an image to match a reference image orientation.
It reorients a array to a given orientation and convert it to a nibabel object using the
reference nibabel header.
Args:
arr (ndarray): Input array, array to re orient.
slice_axis (int): Indicates the axis used for the 2D slice extraction:
Sagittal: 0, Coronal: 1, Axial: 2.
nib_ref (nibabel): Reference nibabel object, whose header is used.
nib_ref_canonical (nibabel): `nib_ref` that has been reoriented to canonical orientation (RAS).
"""
# Orient image in RAS according to slice axis
arr_ras = orient_img_ras(arr, slice_axis)
# https://gitship.com/neuroscience/nibabel/blob/master/nibabel/orientations.py
ref_orientation = nib.orientations.io_orientation(nib_ref.affine)
ras_orientation = nib.orientations.io_orientation(nib_ref_canonical.affine)
# Return the orientation that transforms from ras to ref_orientation
trans_orient = nib.orientations.ornt_transform(ras_orientation, ref_orientation)
# apply transformation
return nib.orientations.apply_orientation(arr_ras, trans_orient)
def get_file_extension(filename):
""" Get file extension if it is supported
Args:
filename (str): Path of the file.
Returns:
str: File extension
"""
# Find the first match from the list of supported file extensions
extension = next((ext for ext in EXT_LST if filename.lower().endswith(ext)), None)
return extension
def update_filename_to_nifti(filename):
"""
Update filename extension to 'nii.gz' if not a NifTI file.
This function is used to help make non-NifTI files (e.g. PNG/TIF/JPG)
compatible with NifTI-only pipelines. The expectation is that a NifTI
version of the file has been created alongside the original file, which
allows the extension to be cleanly swapped for a `.nii.gz` extension.
Args:
filename (str): Path of original file.
Returns:
str: Path of the corresponding NifTI file.
"""
extension = get_file_extension(filename)
if not "nii" in extension:
filename = filename.replace(extension, ".nii.gz")
return filename
def dropout_input(seg_pair):
"""Applies input-level dropout: zero to all channels minus one will be randomly set to zeros. This function verifies
if some channels are already empty. Always at least one input channel will be kept.
Args:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata.
Return:
seg_pair (dict): Batch containing torch tensors (input and gt) and metadata with channel(s) dropped.
"""
n_channels = seg_pair['input'].size(0)
# Verify if the input is multichannel
if n_channels > 1:
# Verify if some channels are already empty
n_unique_values = [len(torch.unique(input_data)) > 1 for input_data in seg_pair['input']]
idx_empty = np.where(np.invert(n_unique_values))[0]
# Select how many channels will be dropped between 0 and n_channels - 1 (keep at least one input)
n_dropped = random.randint(0, n_channels - 1)
if n_dropped > len(idx_empty):
# Remove empty channel to the number of channels to drop
n_dropped = n_dropped - len(idx_empty)
# Select which channels will be dropped
idx_dropped = []
while len(idx_dropped) != n_dropped:
idx = random.randint(0, n_channels - 1)
# Don't include the empty channel in the dropped channels
if idx not in idx_empty:
idx_dropped.append(idx)
else:
idx_dropped = idx_empty
seg_pair['input'][idx_dropped] = torch.zeros_like(seg_pair['input'][idx_dropped])
else:
logger.warning("\n Impossible to apply input-level dropout since input is not multi-channel.")
return seg_pair
|
python
|
"""
tunning, featuralization, output formatting
"""
import numpy as np
import time
def functionongraph(graphs_, i, key='deg', edge_flag=False):
# for graphs_[i], get the key-val distribution
components = len(graphs_[i]); lis = []
for j in range(components):
g = graphs_[i][j]
try:
assert (str(type(g)) == "<class 'networkx.classes.graphviews.SubGraph'>") or (str(type(g))) == "<class 'networkx.classes.graph.Graph'>"
except AssertionError:
if g is None:
print('wired case: g is None')
return [0]
else:
print('Unconsidered Cases in function on graph')
if edge_flag==False:
tmp = [g.nodes[k][key] for k in g.nodes]
lis += tmp
return lis
def hisgram_single_feature(graphs_, n_bin, key='deg', his_norm_flag='yes', edge_flag=False, lowerbound=-1, upperbound=1, cdf_flag=False, uniform_flag = True):
import numpy as np
n = len(graphs_)
feature_vec = np.zeros((n, n_bin))
for i in range(n):
lis = functionongraph(graphs_, i, key, edge_flag=edge_flag)
if lis == []:
feature_vec[i] = 0
feature_vec[i] = hisgram(lis, n_bin, his_norm_flag=his_norm_flag,
lowerbound=lowerbound, upperbound=upperbound,
cdf_flag=cdf_flag, uniform_flag=uniform_flag)
return feature_vec
def hisgram(lis, n_bin=100, his_norm_flag='yes', lowerbound=-1, upperbound=1, cdf_flag=False, uniform_flag=True):
if lis == []:
print ('lis is empty')
return [0]*n_bin
# normalize lis
# needs to be more rigirous
# TODO: test if it helps to normalize lis
if his_norm_flag == 'yes':
try:
assert max(lis) < 1.1 # * 100000 # delelte 100 later
except AssertionError:
print ('The max of list is %s' %max(lis)),
assert min(lis) > -1.1
max_ = max(lis)
# if max_ !=0:
# lis = [i/float(max_) for i in lis]
if not uniform_flag:
assert lowerbound + 1e-3 > 0
n_bin_ = np.logspace(np.log(lowerbound + 1e-3), np.log(upperbound),n_bin+1, base = np.e)
else:
n_bin_ = n_bin
if cdf_flag == True:
from statsmodels.distributions.empirical_distribution import ECDF
ecdf = ECDF(lis)
if uniform_flag:
return ecdf([i / np.float(n_bin) for i in range(0, n_bin)])
else:
return ecdf([i / np.float(n_bin) for i in range(0, n_bin)])
result = np.histogram(lis, bins=n_bin_, range=(lowerbound,upperbound))
return result[0]
def remove_zero_col(data, cor_flag=False):
# data = np.zeros((2,10))
# data[1,3] = data[1,5] = data[1,7] = 1
n_col = np.shape(data)[1]
del_col_idx = np.where(~data.any(axis=0))[0]
remain_col_idx = set(range(n_col)) - set(del_col_idx)
correspondence_dict = dict(zip(range(len(remain_col_idx)), remain_col_idx))
inverse_correspondence_dict = dict(zip(remain_col_idx, range(len(remain_col_idx))))
X = np.delete(data, np.where(~data.any(axis=0))[0], axis=1)
print('the shape after removing zero columns is ', np.shape(X))
if cor_flag == True:
return (X, correspondence_dict, inverse_correspondence_dict)
else:
return X
def merge_features(graph, graphs_, allowed, n_bin=30, his_norm_flag='yes', edge_flag=False, cdf_flag=False, uniform_flag = True):
print('Number of bins are %s'%n_bin)
n = len(graphs_)
X = np.zeros((n, 1))
for key in allowed:
# print(key)
if (key=='label') :
if graph == 'dd_test':
nbin = 90
else:
nbin = 40
tmp = hisgram_single_feature(graphs_, nbin, 'label', his_norm_flag=his_norm_flag, edge_flag=edge_flag, lowerbound=0, upperbound=1, cdf_flag=cdf_flag, uniform_flag=uniform_flag)
elif key == 'ricciCurvature': # use default bound for ricci curvature
tmp = hisgram_single_feature(graphs_, n_bin, key, his_norm_flag=his_norm_flag, edge_flag=edge_flag, cdf_flag=cdf_flag, uniform_flag=uniform_flag)
else:
tmp = hisgram_single_feature(graphs_, n_bin, key, his_norm_flag=his_norm_flag, edge_flag=edge_flag, cdf_flag=cdf_flag, uniform_flag=uniform_flag, lowerbound=0)
X = np.append(X, tmp, axis=1)
return remove_zero_col(X[:,1:])
|
python
|
import discord
from discord.ext import commands
from discord.ext.commands.cooldowns import BucketType
from .utils.video import run_command, video_size, has_audio
from .utils.message import get_nearest, get_msg_video, get_msg_image, get_msg_video_or_img
import tempfile
import os
import io
from PIL import Image
import random
class FFmpegError(Exception):
def __init__(self, process):
self.ret = process.ret
self.error = process.err.decode('utf-8')
class Video(commands.Cog):
__slots__ = 'bot',
def __init__(self, bot):
self.bot: commands.Bot = bot
async def basic_ffmpeg_command(self, ctx: commands.Context, ffmpeg_func, *args, filename='video.mp4', lookup=get_msg_video):
msg = await ctx.send('Looking for video...')
video = await get_nearest(ctx, lookup=lookup)
if video:
try:
await msg.edit(content='Rendering video...')
video = await self.bot.loop.run_in_executor(None, ffmpeg_func, video, *args)
video = io.BytesIO(video)
await msg.edit(content='Uploading video...')
await ctx.send(file=discord.File(video, filename=filename))
await msg.delete()
except FFmpegError as error:
await msg.edit(content=f'FFmpeg error:\n```\n{error.error[:1500]}```')
self.bot.logger.error(error.error)
else:
await msg.edit(content='No video found')
@staticmethod
def _sound_ffmpeg(media, media_type: str, sound: str):
ext = 'webm' if media_type == 'video' else 'mp4'
with tempfile.TemporaryDirectory() as folder:
outpath = os.path.join(folder, 'out.' + ext)
# enums?? what are those
if media_type == 'image':
inpath = os.path.join(folder, 'input.png')
Image.open(io.BytesIO(media)).convert('RGB').save(inpath)
cmd = [
'ffmpeg', '-i', sound,
'-loop', '1', '-i', inpath,
'-shortest', '-pix_fmt', 'yuv420p',
'-filter_complex', 'pad=ceil(iw/2)*2:ceil(ih/2)*2',
'-c:v', 'mpeg4',
'-f', 'mp4', outpath
]
elif media_type == 'video':
inpath = os.path.join(folder, 'input')
with open(inpath, 'wb') as file:
file.write(media)
cmd = [
'ffmpeg', '-v', 'error',
'-i', inpath,
'-i', sound,
'-map', '0:v',
'-map', '1:a',
'-shortest',
'-f', 'webm', outpath
]
else:
# ???
raise Exception(f'What {media_type!r}')
process = run_command(cmd)
if process.ret:
raise FFmpegError(process)
with open(outpath, 'rb') as file:
data = file.read()
return (data, ext)
async def sound_ffmpeg_command(self, ctx: commands.Context, sound: str, filename: str='video'):
msg = await ctx.send('Looking for media...')
media = await get_nearest(ctx, lookup=get_msg_video_or_img)
if media:
try:
await msg.edit(content='Rendering video...')
video, ext = await self.bot.loop.run_in_executor(None, self._sound_ffmpeg, media[0], media[1], sound)
video = io.BytesIO(video)
await msg.edit(content='Uploading video...')
await ctx.send(file=discord.File(video, filename=filename + f'.{ext}'))
await msg.delete()
except FFmpegError as error:
await msg.edit(content=f'FFmpeg error:\n```\n{error.error[:500]}```')
self.bot.logger.error(error.error)
else:
await msg.edit(content='No media found')
def how_ffmpeg(self, video) -> bytes:
with tempfile.TemporaryDirectory() as folder:
inpath = os.path.join(folder, 'input')
with open(inpath, 'wb') as file:
file.write(video)
outpath = os.path.join(folder, 'out.mp4')
cmd = [
'ffmpeg', '-i', inpath, '-i', 'assets/how.jpg',
'-c:v', 'h264', '-c:a', 'copy',
'-filter_complex', '[0]scale=height=529:width=544[scaled];[1][scaled]overlay=88:0[out]',
'-map', '0:a?', '-map', '[out]', '-f', 'mp4', outpath,
'-hide_banner', '-v', 'error'
]
process = run_command(cmd)
if process.ret:
raise FFmpegError(process)
with open(outpath, 'rb') as file:
data = file.read()
return data
@commands.command(aliases=['howvideo'])
@commands.cooldown(2, 20, BucketType.default)
async def howv(self, ctx):
"""
HOW (video)
looks for recent video and runs command on it
"""
return await self.basic_ffmpeg_command(ctx, self.how_ffmpeg, filename='HOW.mp4')
def keem_ffmpeg(self, video) -> bytes:
# hard coded as to not do an unecessary ffprobe command everytime
keem_length = 9.985
keem_size = (118, 94)
with tempfile.TemporaryDirectory() as folder:
inpath = os.path.join(folder, 'input')
with open(inpath, 'wb') as file:
file.write(video)
outpath = os.path.join(folder, 'out.mp4')
size = video_size(inpath)
if size[0] < size[1]:
w = size[0] // 3
h = (keem_size[1] * w) // keem_size[0]
else:
h = size[1] // 3
w = (keem_size[0] * h) // keem_size[1]
cmd = [
'ffmpeg', '-i', 'assets/keem.mp4', '-i', inpath,
'-c:v', 'h264',
'-filter_complex', f'[0]scale=width={w}:height={h}[scaled];[1][scaled]overlay=x=main_w-overlay_w:y=0:eval=init:eof_action=endall[final];[final]pad=ceil(iw/2)*2:ceil(ih/2)*2', '-shortest',
'-f', 'mp4', outpath,
'-hide_banner', '-v', 'error'
]
# only add amix if video has audio
# as it would error otherwise
if has_audio(inpath):
cmd[8] = 'amix=duration=shortest;' + cmd[8]
process = run_command(cmd)
if process.ret:
raise FFmpegError(process)
with open(outpath, 'rb') as file:
data = file.read()
return data
@commands.command(aliases=['keemstar', 'keemscream'])
@commands.cooldown(2, 20, BucketType.default)
async def keem(self, ctx):
"""
keemstar scream
looks for recent video and runs command on it
"""
return await self.basic_ffmpeg_command(ctx, self.keem_ffmpeg, filename='keem.mp4')
def vibrato_ffmpeg(self, video, f) -> bytes:
with tempfile.TemporaryDirectory() as folder:
inpath = os.path.join(folder, 'input')
with open(inpath, 'wb') as file:
file.write(video)
if not has_audio(inpath):
return None
outpath = os.path.join(folder, 'out.mp4')
cmd = [
'ffmpeg', '-i', inpath,
'-af', f'vibrato={f:.2f}:1,aformat=s16p', '-c:v', 'copy',
'-f', 'mp4', outpath,
'-hide_banner', '-v', 'error'
]
process = run_command(cmd)
if process.ret:
raise FFmpegError(process)
with open(outpath, 'rb') as file:
data = file.read()
return data
@commands.command()
@commands.cooldown(2, 20, BucketType.default)
async def vibrato(self, ctx, modulation: float=0.5):
"""
vibrato audio ooOoOooOOOooooOoo
looks for recent video and runs command on it
"""
f = modulation * 16
if f >= 20000 or f <= 0:
return await ctx.send(f'Modulation is too big, has to be in range of [0.1 - 1250]')
return await self.basic_ffmpeg_command(ctx, self.vibrato_ffmpeg, f, filename='vibrato.mp4')
@commands.command(aliases=['cave'])
@commands.cooldown(2, 20, BucketType.default)
async def cavesounds(self, ctx):
"""
minecraft cave sound to a picture
looks for recent image/video and runs command on it
"""
return await self.sound_ffmpeg_command(ctx, f'assets/cave/cave{random.randint(0, 7)}.mp3', filename='cave')
@commands.command(aliases=['fnaf'])
@commands.cooldown(2, 20, BucketType.default)
async def fnafsounds(self, ctx, fnaf=None):
"""
fnaf sound
looks for recent image/video and runs command on it
`fnaf` can be either set to `1`, `2`, `3`, `4`, `6`, `sl` or `ucn`. defaults to random
"""
options = ('1', '2', '3', '4', '6', 'sl', 'ucn')
if fnaf is None or fnaf not in options:
fnaf = random.choice(options)
folder = os.path.join('assets/fnaf', fnaf)
sounds = os.listdir(folder)
sound = os.path.join(folder, random.choice(sounds))
return await self.sound_ffmpeg_command(ctx, sound, filename='fnaf')
@commands.command(aliases=['amongus'])
@commands.cooldown(2, 20, BucketType.default)
async def amongussounds(self, ctx, sfx=None):
"""among us sound fx on video or img"""
options = ('amongus', 'death', 'drip', 'report', 'vent')
if sfx not in options:
sfx = random.choice(options)
return await self.sound_ffmpeg_command(ctx, f'assets/amongus/{sfx}.mp3', filename='amongus')
def setup(bot):
bot.add_cog(Video(bot))
|
python
|
# This file contain all routes of secretary
####################################################################
# import
####################################################################
from flask_restx import Resource, reqparse # to use Resource, that expose http request method
from application.api.secretary.database_functions import *
from application.api.secretary.models import *
from application.api.database_config import DatabaseConnector
####################################################################
# object
####################################################################
# instance of the database connection
# connection = DatabaseConnector('localhost', 'my_university_db', 'root', '')
connection = DatabaseConnector('bvrhcrukmknumkqtieuk-mysql.services.clever-cloud.com',
3306,
'bvrhcrukmknumkqtieuk',
'ud3untakpkengqz5',
'kHbxAB3JuoNygcXdXbH9')
####################################################################
# routing
####################################################################
# ============================ sede ========================== #
@secretary.route('/sede')
class HeadOffice(Resource):
@secretary.marshal_with(get_head_office_model)
def get(self):
print(get_all_head_offices(connection.get_connection()))
return get_all_head_offices(connection.get_connection()), 250
@secretary.expect(insert_headoffice_model)
@secretary.marshal_with(insert_headoffice_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome della sede universitaria')
parser.add_argument('orario_apertura', type=int, help='orario apertura della sede universitaria')
parser.add_argument('orario_chiusura', type=int, help='orario chiusura della sede universitaria')
parser.add_argument('numero_piani', type=int, help='numero piani della sede universitaria')
parser.add_argument('cap', type=int, help='cap della sede universitaria')
parser.add_argument('via_piazza', type=str, help='cap della sede universitaria')
parser.add_argument('civico', type=str, help='civico della sede universitaria')
args = parser.parse_args(strict=True)
insertHeadOffice(args['nome_sede'],
args['orario_apertura'],
args['orario_chiusura'],
args['numero_piani'],
args['cap'],
args['via_piazza'],
args['civico'],
connection.get_connection())
return args, 250
# ============================ aggiungi contatto ========================== #
@secretary.route('/contatto_sede')
class Contact(Resource):
@secretary.expect(insert_contact_model)
@secretary.marshal_with(insert_contact_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome della sede universitaria')
parser.add_argument('tipo_contatto', type=str, help='tipo contatto della sede universitaria')
parser.add_argument('valore_contatto', type=str, help='valore contatto della sede universitaria')
args = parser.parse_args(strict=True)
insertHeadOfficeContact(args['nome_sede'],
args['tipo_contatto'],
args['valore_contatto'],
connection.get_connection())
return args, 250
# ============================ cancella sede ========================== #
@secretary.route('/cancella_sede')
class DelHeadOffice(Resource):
@secretary.expect(delete_head_office_model)
@secretary.marshal_with(delete_head_office_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome della sede universitaria')
args = parser.parse_args(strict=True)
deleteHeadOffice(args['nome_sede'], connection.get_connection())
return args, 250
# ============================ aula ========================== #
@secretary.route('/aula')
class Room(Resource):
@secretary.expect(insert_room_model)
@secretary.marshal_with(insert_room_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome della sede universitaria')
parser.add_argument('numero_piano', type=int, help='numero piano dell\' aula universitaria')
parser.add_argument('numero_aula', type=int, help='numero aula universitaria')
parser.add_argument('capienza', type=int, help='capienza dell\' aula universitaria')
args = parser.parse_args(strict=True)
insertRoom(args['nome_sede'],
args['numero_piano'],
args['numero_aula'],
args['capienza'],
connection.get_connection())
return args, 250
# ============================ cancella aula ========================== #
@secretary.route('/cancella_aula')
class DelRoom(Resource):
@secretary.expect(delete_room_model)
@secretary.marshal_with(delete_room_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome della sede universitaria')
parser.add_argument('numero_piano', type=int, help='numero piano dell\' aula universitaria')
parser.add_argument('numero_aula', type=int, help='numero aula universitaria')
args = parser.parse_args(strict=True)
deleteRoom(args['nome_sede'],
args['numero_piano'],
args['numero_aula'],
connection.get_connection())
return args, 250
# ============================ corso laurea ========================== #
@secretary.route('/corso_laurea')
class DegreeCourse(Resource):
@secretary.marshal_with(insert_degree_course_model)
def get(self):
return get_all_degree_courses(connection.get_connection()), 250
@secretary.expect(insert_degree_course_model)
@secretary.marshal_with(insert_degree_course_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
parser.add_argument('nome_corso', type=str, help='nome corso universitario')
parser.add_argument('durata_corso_laurea', type=int, help='durata corso laurea universitario')
args = parser.parse_args(strict=True)
insertDegreeCourse(args['codice_corso'],
args['nome_corso'],
args['durata_corso_laurea'],
connection.get_connection())
return args, 250
# ============================ cancella corso laurea ========================== #
@secretary.route('/cancella_corso_laurea')
class DelDegreeCourse(Resource):
@secretary.expect(delete_degree_course_model)
@secretary.marshal_with(delete_degree_course_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
args = parser.parse_args(strict=True)
deleteDegreeCourse(args['codice_corso'], connection.get_connection())
return args, 250
# ============================ locazione ========================== #
@secretary.route('/locazione')
class Located(Resource):
@secretary.marshal_with(get_all_location_model)
def get(self):
return get_all_locations(connection.get_connection()), 250
@secretary.expect(insert_location_model)
@secretary.marshal_with(insert_location_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome sede universitaria')
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
args = parser.parse_args(strict=True)
insertLocation(args['nome_sede'], args['codice_corso'], connection.get_connection())
return args, 250
# ============================ cancella locazione ========================== #
@secretary.route('/cancella_locazione')
class DelLocated(Resource):
@secretary.expect(insert_location_model)
@secretary.marshal_with(insert_location_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('nome_sede', type=str, help='nome sede universitaria')
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
args = parser.parse_args(strict=True)
deleteLocation(args['nome_sede'], args['codice_corso'], connection.get_connection())
return args, 250
# ============================ disciplina ========================== #
@secretary.route('/disciplina')
class Discipline(Resource):
@secretary.marshal_with(get_all_discipline_model)
def get(self):
return get_all_discipline(connection.get_connection())
@secretary.expect(insert_discipline_model)
@secretary.marshal_with(insert_discipline_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria')
parser.add_argument('nome_disciplina', type=str, help='nome della disciplina universitaria')
parser.add_argument('cfu', type=int, help='numero di cfu della disciplina universitaria')
parser.add_argument('semestre', type=int, help='semestre della disciplina universitaria')
parser.add_argument('anno', type=int, help='anna della disciplina universitaria')
args = parser.parse_args(strict=True)
insertDiscipline(args['codice_corso'],
args['codice_disciplina'],
args['nome_disciplina'],
args['cfu'],
args['semestre'],
args['anno'],
connection.get_connection())
return args, 250
# ============================ cancella disciplina ========================== #
@secretary.route('/cancella_disciplina')
class DelDiscipline(Resource):
@secretary.expect(delete_discipline_model)
@secretary.marshal_with(delete_discipline_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice corso universitario')
parser.add_argument('codice_disciplina', type=str, help='codice disciplina universitaria')
args = parser.parse_args(strict=True)
deleteDiscipline(args['codice_corso'], args['codice_disciplina'], connection.get_connection())
return args, 250
# ============================ docente ========================== #
@secretary.route('/docente')
class Professor(Resource):
@secretary.marshal_with(get_all_teacher_model)
def get(self):
# print(get_all_teachers(connection.get_connection()))
return get_all_teachers(connection.get_connection()), 250
@secretary.expect(insert_teacher_model)
@secretary.marshal_with(insert_teacher_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf del docente')
parser.add_argument('nome', type=str, help='nome del docente')
parser.add_argument('cognome', type=str, help='cognome del docente')
parser.add_argument('data_di_nascita', type=str, help='data di nascita del docente')
parser.add_argument('luogo_di_nascita', type=str, help='luogo di nascita del docente')
parser.add_argument('cap', type=int, help='cap del docente')
parser.add_argument('via_piazza', type=str, help='indirizzo del docente')
parser.add_argument('civico', type=str, help='civico del docente')
parser.add_argument('matricola_docente', type=str, help='matricola del docente')
parser.add_argument('email_docente', type=str, help='email del docente')
parser.add_argument('password_docente', type=str, help='password del docente')
args = parser.parse_args(strict=True)
insertTeacher(args['cf'],
args['nome'],
args['cognome'],
args['data_di_nascita'],
args['luogo_di_nascita'],
args['cap'],
args['via_piazza'],
args['civico'],
args['matricola_docente'],
args['email_docente'],
args['password_docente'],
connection.get_connection())
return args, 250
# ============================ cancella docente ========================== #
@secretary.route('/cancella_docente')
class DelProfessor(Resource):
@secretary.expect(delete_teacher_model)
@secretary.marshal_with(delete_teacher_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf del docente')
parser.add_argument('matricola_docente', type=str, help='matricola del docente')
args = parser.parse_args(strict=True)
deleteTeacher(args['cf'], args['matricola_docente'], connection.get_connection())
return args, 250
# ============================ studente ========================== #
@secretary.route('/studente')
class Student(Resource):
@secretary.marshal_with(get_all_student_model)
def get(self):
return get_all_students(connection.get_connection())
@secretary.expect(insert_student_model)
@secretary.marshal_with(insert_student_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf dello studente')
parser.add_argument('nome', type=str, help='nome dello studente')
parser.add_argument('cognome', type=str, help='cognome dello studente')
parser.add_argument('data_di_nascita', type=str, help='data di nascita dello studente')
parser.add_argument('luogo_di_nascita', type=str, help='luogo di nascita dello studente')
parser.add_argument('cap', type=str, help='cap dello studente')
parser.add_argument('via_piazza', type=str, help='via piazza dello studente')
parser.add_argument('civico', type=str, help='civico dello studente')
parser.add_argument('matricola_studente', type=str, help='matricola studente dello studente')
parser.add_argument('email_studente', type=str, help='email dello studente')
parser.add_argument('data_immatricolazione', type=str, help='data immatricolazione dello studente')
parser.add_argument('password_studente', type=str, help='password dello studente')
parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario')
args = parser.parse_args(strict=True)
insertStudent(args['cf'],
args['nome'],
args['cognome'],
args['data_di_nascita'],
args['luogo_di_nascita'],
args['cap'],
args['via_piazza'],
args['civico'],
args['matricola_studente'],
args['email_studente'],
args['data_immatricolazione'],
args['password_studente'],
args['codice_corso'],
connection.get_connection())
return args, 201
# ============================ cancella studente ========================== #
@secretary.route('/cancella_studente')
class DelStudent(Resource):
@secretary.expect(delete_student_model)
@secretary.marshal_with(delete_student_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf dello studente')
parser.add_argument('matricola_studente', type=str, help='matricola studente universitario')
args = parser.parse_args(strict=True)
deleteStudent(args['cf'], args['matricola_studente'], connection.get_connection())
return args, 250
# ============================ insegnamento ========================== #
@secretary.route('/insegnamento')
class Teaching(Resource):
@secretary.marshal_with(get_all_teachings_model)
def get(self):
return get_all_teachings(connection.get_connection()), 250
@secretary.expect(delete_teach_model)
@secretary.marshal_with(delete_teach_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('matricola_docente', type=str, help='matricola del docente')
parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario')
parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria')
args = parser.parse_args(strict=True)
insertTeach(args['matricola_docente'],
args['codice_corso'],
args['codice_disciplina'],
connection.get_connection())
return args, 250
# ============================ cancella insegnamento ========================== #
@secretary.route('/cancella_insegnamento')
class DelTeaching(Resource):
@secretary.expect(delete_teach_model)
@secretary.marshal_with(delete_teach_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('matricola_docente', type=str, help='matricola del docente')
parser.add_argument('codice_corso', type=str, help='codice del corso di laurea universitario')
parser.add_argument('codice_disciplina', type=str, help='codice della disciplina universitaria')
args = parser.parse_args(strict=True)
deleteTeach(args['matricola_docente'],
args['codice_corso'],
args['codice_disciplina'],
connection.get_connection())
return args, 250
# =================
@secretary.route('/aggiorna_anno_in_corso')
class UpdateYearStudent(Resource):
@secretary.expect(update_anno_in_corso_studente_model)
@secretary.marshal_with(update_anno_in_corso_studente_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('anno_in_corso', type=str, help='anno in corso dello studente')
parser.add_argument('matricola_studente', type=str, help='matricola studente universitario')
args = parser.parse_args(strict=True)
updateAnnoInCorso(args['anno_in_corso'], args['matricola_studente'], connection.get_connection())
return args, 250
# ============================ lavora ========================== #
@secretary.route('/lavora')
class Lavora(Resource):
@secretary.expect(insert_lavora_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice del corso di laurea')
parser.add_argument('matricola_docente', type=int, help='matricola del docente')
args = parser.parse_args(strict=True)
insertLavora(args['codice_corso'],
args['matricola_docente'],
connection.get_connection())
return args, 250
@secretary.route('/delete_lavora')
class DeleteLavora(Resource):
@secretary.expect(insert_lavora_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('codice_corso', type=str, help='codice del corso di laurea')
parser.add_argument('matricola_docente', type=int, help='matricola del docente')
args = parser.parse_args(strict=True)
deleteLavora(args['codice_corso'],
args['matricola_docente'],
connection.get_connection())
return args, 250
# ============================ insert_contatto_persona ========================== #
@secretary.route('/insert_contatto_persona')
class InsertCOntattoPersona(Resource):
@secretary.expect(person_contact_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf')
parser.add_argument('tipo_contatto', type=str, help='tipo_contatto')
parser.add_argument('valore_contatto', type=str, help='valore_contatto')
args = parser.parse_args(strict=True)
insertContattoPersona(args['tipo_contatto'], args['valore_contatto'], args['cf'], connection.get_connection())
return args, 250
# ============================ delete_contatto_persona ========================== #
@secretary.route('/delete__contatto_persona')
class DeleteContattoPersona(Resource):
@secretary.expect(person_contact_model)
def post(self):
# arguments
parser = reqparse.RequestParser()
parser.add_argument('cf', type=str, help='cf')
parser.add_argument('tipo_contatto', type=str, help='tipo_contatto')
parser.add_argument('valore_contatto', type=str, help='valore_contatto')
args = parser.parse_args(strict=True)
deleteContattoPersona(args['cf'], args['tipo_contatto'], args['valore_contatto'], connection.get_connection())
return args, 250
|
python
|
from view import View
from serialConnection import SerialConnection
from PyQt5.QtWidgets import QApplication
import time
class Controller:
def __init__(self, serialConnection, Instructions):
self.serialConnection = serialConnection
self.Instructions = Instructions
self.samples = []
self.times = []
self.stop = False
self.stepsPerMM = 0.018
def handleCalibrate(self):
self.handleStop()
self.serialConnection.sendInstruction(self.Instructions.CALIBRATE)
self.stepsPerMM = 1 / (self.serialConnection.readSample() / 100)
print("NUMSTEPS ",self.stepsPerMM)
def handleScanBetween(self, P1, P2, sampleDuration, stepLength, stepSize):
""" Sends the TWOPOS_SCAN instruction along with its associated values.
:param: P1 - the first position from the top slider widget
:param: P2 - the second position from the bottom slider widget
:param: sampleDuration - the value from sampleDuration spinbox indicating how low to sample for at each step.
:param: stepLength - the value selected in the stepLength_combobox (Full, Half, or Quarter)
"""
self.handleStop()
self.stop = False
self.serialConnection.sendInstruction(self.Instructions.TWOPOS_SCAN)
self.serialConnection.sendValue(P1)
self.serialConnection.sendValue(P2)
sampleDurationWhole = int(float(sampleDuration) * 1000)
sampleDurationLower = sampleDurationWhole & 0xFF
sampleDurationMiddle = (sampleDurationWhole & 0xFF00) >> 8
sampleDurationUpper = (sampleDurationWhole & 0xFF0000) >> 16
self.serialConnection.sendValue(sampleDurationLower)
self.serialConnection.sendValue(sampleDurationMiddle)
self.serialConnection.sendValue(sampleDurationUpper)
stepLengthWhole = int(float(stepLength) * 1000)
stepLengthLower = stepLengthWhole & 0xFF
stepLengthUpper = (stepLengthWhole & 0xFF00) >> 8
self.serialConnection.sendValue(stepLengthLower)
self.serialConnection.sendValue(stepLengthUpper)
if (stepSize == "Full"):
self.serialConnection.sendValue(2)
elif (stepSize == "Half"):
self.serialConnection.sendValue(1)
elif (stepSize == "Quarter"):
self.serialConnection.sendValue(0)
else:
self.serialConnection.sendValue(2)
while(not self.stop):
currentSample = self.serialConnection.readSample()
if currentSample == 0xFFFF:
break
currentTime = self.serialConnection.readTime()
print(currentTime)
self.samples.append(currentSample)
self.times.append(currentTime)
QApplication.processEvents()
def handleStop(self):
""" Sets the stop boolean to true so that we cease reading samples.
"""
self.stop = True
self.serialConnection.sendStopInstruction(self.Instructions.STOP)
def handleGoToPoint(self, P1):
""" sends the GOTO instruction to move to position 1
"""
self.handleStop()
self.serialConnection.sendInstruction(self.Instructions.GOTO)
self.serialConnection.sendValue(P1)
def handleStartSample(self, averageInterval):
""" Sends the START_SAMPLE instruction to turn on the ADC clock and wait to receive samples from the sensor through the tiva UART connection.
Samples until the stop button is pressed.
:param: sampleDuration - [Deprecated]
:param: averageInterval - the amount of samples to take to average on.
"""
self.handleStop()
self.stop = False
self.serialConnection.sendInstruction(self.Instructions.START_SAMPLE)
#self.serialConnection.sendValue(sampleDuration)
#self.serialConnection.sendValue(averageInterval)
while(not self.stop):
currentSample = self.serialConnection.readSample()
# print("Current sample", currentSample)
if currentSample == 0xFFFF:
break
self.samples.append(currentSample)
self.times.append(20)
# print(self.samples)
QApplication.processEvents()
def handleClearSamples(self):
"""
Clear the samples list for the controller. [ Need to relink the list on the view. ]
"""
self.samples = []
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from configurable import Configurable
#***************************************************************
class BaseOptimizer(Configurable):
""""""
#=============================================================
def __init__(self, *args, **kwargs):
""""""
self._global_step = kwargs.pop('global_step', tf.Variable(0., trainable=False))
super(BaseOptimizer, self).__init__(*args, **kwargs)
self._accumulators = {}
return
#=============================================================
def minimize(self, loss, name=None):
""""""
# Error checking
var_list = tf.trainable_variables()
for x_tm1 in var_list:
if not isinstance(x_tm1, tf.Variable):
raise TypeError("Argument is not a tf.Variable: %s" % x_tm1)
if not var_list:
raise ValueError("No variables to optimize")
if loss.dtype.base_dtype != tf.float32:
raise ValueError('Loss is not float32')
# Compute gradients
var_refs = [x_tm1._ref() for x_tm1 in var_list]
grads = tf.gradients(loss, var_refs,
colocate_gradients_with_ops=True,
gate_gradients=True,
aggregation_method=2)
for x_tm1, g_t in zip(var_list, grads):
if g_t is not None:
if x_tm1.dtype.base_dtype != tf.float32:
raise ValueError('%s is not float32' % x_tm1.name)
# Apply gradients
with tf.control_dependencies(None):
self._init_acc(var_list, grads)
with tf.name_scope(name, self._name,[]) as name:
caches = filter(lambda cache: cache['g_t'] is not None, self._prepare(var_list, grads))
for cache in caches:
x_tm1, g_t = cache['x_tm1'], cache['g_t']
with tf.name_scope("update_" + x_tm1.op.name), tf.device(x_tm1.device):
if isinstance(g_t, tf.Tensor):
cache['g_t'] = tf.select(tf.is_finite(g_t), g_t, tf.zeros_like(g_t))
self._apply_dense(cache)
else:
cache['g_t'] = tf.select(tf.is_finite(g_t.values), g_t.values, tf.zeros_like(g_t.values))
cache['idxs'] = g_t.indices
self._apply_sparse(cache)
with tf.control_dependencies([self._finish(caches)]):
with tf.device(self.global_step.device):
return tf.assign_add(self.global_step, 1, name=name).op
#=============================================================
def _init_acc(self, var_list, grads):
""""""
for x_tm1, g_t in zip(var_list, grads):
if self.chi > 0:
tf.add_to_collection(self.get_accumulator(x_tm1, 'x'),
tf.GraphKeys.MOVING_AVERAGE_VARIABLES)
shape = self.get_variable_shape(x_tm1)
if isinstance(g_t, tf.Tensor):
self.get_accumulator(x_tm1, 'x/tm1', [])
else:
self.get_accumulator(x_tm1, 'x/tm1', [shape[0]]+[1]*(len(shape)-1))
return
#=============================================================
def _prepare(self, var_list, grads):
""""""
caches = []
for x_tm1, g_t in zip(var_list, grads):
caches.append({'x_tm1': x_tm1, 'g_t': g_t, 'updates': []})
return caches
#=============================================================
def _apply_dense(self, cache):
""""""
raise NotImplementedError()
#=============================================================
def _apply_sparse(self, cache):
""""""
raise NotImplementedError()
#=============================================================
@staticmethod
def get_variable_shape(x_tm1):
return x_tm1.initialized_value().get_shape().as_list()
#=============================================================
def get_accumulator(self, x_tm1, acc_name, shape=None):
""""""
if shape is None:
shape = self.get_variable_shape(x_tm1)
if acc_name not in self._accumulators:
self._accumulators[acc_name] = {}
accumulator = self._accumulators[acc_name]
if x_tm1 not in accumulator:
new_name = '%s/%s' % (self.name, acc_name)
zeros = tf.zeros(shape, dtype=x_tm1.dtype)
with tf.name_scope('%s/%s' % (x_tm1.op.name, new_name)) as scope:
with tf.device(x_tm1.device):
accumulator[x_tm1] = b_tm1 = tf.Variable(zeros, name=scope, trainable=False)
if isinstance(x_tm1, tf.Variable) and x_tm1._save_slice_info:
real_acc_name = scope[len(x_tm1.op.name + '/'):-1]
slice_info = x_tm1._save_slice_info
b_tm1._set_save_slice_info(tf.Variable.SaveSliceInfo(
'%s/%s' % (slice_info.full_name, real_slot_name),
slice_info.full_shape[:],
slice_info.var_offset[:],
slice_info.var_shape[:]))
return accumulator[x_tm1]
#=============================================================
def _dense_moving_average(self, x_tm1, a_t, name, beta=.9):
""""""
b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[])
t = tf.assign_add(tm1, 1)
if beta < 1:
beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
beta_t = beta_t * (1-beta**tm1) / (1-beta**t)
else:
beta_t = tm1 / t
b_t = tf.assign(b_tm1, beta_t*b_tm1)
b_t = tf.assign_add(b_t, (1-beta_t)*a_t)
return b_t, t
#=============================================================
def _sparse_moving_average(self, x_tm1, idxs, a_t_, name, beta=.9):
""""""
b_tm1 = self.get_accumulator(x_tm1, '%s' % name)
b_tm1_ = tf.gather(b_tm1, idxs)
shape = self.get_variable_shape(x_tm1)
tm1 = self.get_accumulator(x_tm1, '%s/tm1' % name, shape=[shape[0]]+[1]*(len(shape)-1))
tm1_ = tf.gather(tm1, idxs)
t = tf.scatter_add(tm1, idxs, tf.ones_like(tm1_))
t_ = tf.gather(t, idxs)
if beta < 1:
beta_t = tf.convert_to_tensor(beta, name='%s/decay' % name)
beta_t_ = beta_t * (1-beta_t**tm1_) / (1-beta_t**t_)
else:
beta_t_ = tm1_/t_
b_t = tf.scatter_update(b_tm1, idxs, beta_t_*b_tm1_)
b_t = tf.scatter_add(b_t, idxs, (1-beta_t_)*a_t_)
return b_t, t
#=============================================================
def _finish(self, caches):
""""""
if self.clip > 0:
S_t = [cache['s_t'] for cache in caches]
S_t, _ = tf.clip_by_global_norm(S_t, self.clip)
for cache, s_t in zip(caches, S_t):
cache['s_t'] = s_t
for cache in caches:
x_tm1 = cache['x_tm1']
s_t = cache['s_t']
updates = cache['updates']
with tf.name_scope('update_' + x_tm1.op.name), tf.device(x_tm1.device):
if 'idxs' in cache:
idxs = cache['idxs']
x_t = tf.scatter_sub(x_tm1, idxs, s_t)
if self.chi > 0:
x_t_ = tf.gather(x_t, idxs)
x_bar_t, t_x_bar = self._sparse_moving_average(x_tm1, idxs, x_t_, 'x', beta=self.chi)
else:
x_t = tf.assign_sub(x_tm1, s_t)
if self.chi > 0:
x_bar_t, t_x_bar = self._dense_moving_average(x_tm1, x_t, 'x', beta=self.chi)
updates.append(x_t)
if self.chi > 0:
updates.extend([x_bar_t, t_x_bar])
update_ops = [tf.group(*cache['updates']) for cache in caches]
return tf.group(*update_ops, name='update')
#==============================================================
def average(self, x_tm1):
""""""
if 'x' in self._accumulators:
return x_tm1
#return self._accumulators['x'].get(x_tm1, x_tm1)
else:
return x_tm1
#==============================================================
def average_name(self, x_tm1):
""""""
return x_tm1.op.name + '/' + self._name + '/' + 'x'
#==============================================================
def variables_to_restore(self, moving_avg_variables=None):
""""""
name_map = {}
if moving_avg_variables is None:
moving_avg_variables = tf.trainable_variables()
moving_avg_variables += tf.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving average as well.
for v in list(set(tf.all_variables()) - moving_avg_variables):
if v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
#===============================================================
@property
def learning_rate(self):
if self.decay_steps > 0:
return super(BaseOptimizer, self).learning_rate * self.decay**(self.global_step / self.decay_steps)
else:
return super(BaseOptimizer, self).learning_rate
@property
def global_step(self):
return self._global_step
@property
def accumulators(self):
return self._accumulators
|
python
|
# %%
from sre_constants import error
import pandas as pd
import openpyxl as pxl
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common import exceptions as e
import time
import datetime
import re
import traceback
#! EXTRACTION
def call_driver_(url):
"""
This function instantiates a remotely operated browser.
Returns:
[WebDriver]: A driver.
"""
DRIVER_PATH = r'/Users/studocu/Downloads/chromedriver'
driver = webdriver.Chrome(DRIVER_PATH)
driver.get(url)
driver.maximize_window()
return driver
def load_button_(driver):
"""
This function clicks load button until the last load.
"""
LOAD_MORE_XPATH = r'//span[text()="Load More"]'
while True:
try:
load_more = driver.find_element_by_xpath(LOAD_MORE_XPATH)
actions = ActionChains(driver)
actions.move_to_element(load_more).perform()
driver.execute_script('arguments[0].scrollIntoView({behavior: "smooth", block: "center", inline: "center"});', load_more)
WebDriverWait(driver, 4).until(
EC.element_to_be_clickable((By.XPATH, LOAD_MORE_XPATH)))
load_more.click()
except:
break
def get_links_(driver):
LINKS_PATH = r'//ul[@class="MuiList-root MuiList-padding"]//a'
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH,LINKS_PATH)))
links_web_elem = driver.find_elements_by_xpath(LINKS_PATH)
links = []
for link in links_web_elem:
links.append(link.get_attribute('href'))
return links
def pull_association_info_(links, driver):
all_rows = []
error_links = []
for i, link in enumerate(links):
driver.get(link)
try:
NAME_XPATH = r'//h1'
DESC_XPATH = r'//div[@class="bodyText-large userSupplied"]'
ADDRESS_XPATH = r'//span[text()="Address"]/..'
EMAIL_XPATH = r'//span[text()="Contact Email"]/..'
PHONE_XPATH = r'//span[text()="Phone Number"]/..'
XPATH_LINK = r''
INFO_XPATHS = [NAME_XPATH, DESC_XPATH, ADDRESS_XPATH, EMAIL_XPATH, PHONE_XPATH, XPATH_LINK]
INFO_NAMES = ['ASSOCIATION NAME','ASSOCIATION DESCRIPTION', 'ASSOCIATION ADDRESS', 'ASSOCIATION EMAIL', 'ASSOCIATION PHONE', 'ASSOCIATION LINK']
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, INFO_XPATHS[0])))
all_info_row = []
print('PULLING DATA FROM ASSOCIATION ' + str(i) + ' OUT OF ' + str(len(links)) + ' ASSOCIATIONS...')
for info_name, info_xpath in zip(INFO_NAMES, INFO_XPATHS):
try:
if info_xpath != '':
info_data_web_elem = driver.find_element_by_xpath(info_xpath)
info_data = info_data_web_elem.text
if info_name == 'ASSOCIATION NAME':
info_data = info_data_web_elem.text.title()
# treating if description is empty
if info_data == '':
all_info_row.append('Null')
# treating if address is empty
elif info_data == 'Address':
all_info_row.append('Null')
# treating if email is empty
elif info_data == 'Contact Email\nE: ':
all_info_row.append('Null')
# cleaning email data
elif info_data.startswith('Contact Email'):
info_data = re.sub('Contact Email\nE: ', '', info_data)
all_info_row.append(info_data.lower())
# cleaning phone data
elif info_data.startswith('Phone'):
info_data = re.sub('Phone Number\nP: ', '', info_data)
all_info_row.append(info_data)
else:
all_info_row.append(info_data)
else:
all_info_row.append(link)
except:
all_info_row.append('Null')
except:
print(e)
traceback.print_exc()
error_links.append(link)
pass
all_rows.append(all_info_row)
return all_rows, error_links
def extract_(url):
print('CALLING DRIVER...')
driver = call_driver_(url)
print('DRIVER CALLED.')
print('LOADIND BUTTONS...')
load_button_(driver)
print('ALL BUTTONS LOADED.')
print('PULLING LINKS...')
links = get_links_(driver)
print('LINKS PULLED.')
print('PULLING ASSOCIATION DATA...')
all_rows, error_links = pull_association_info_(links, driver)
print('ASSOCIATION DATA PULLED')
print('CLOSING DRIVER...')
driver.close()
print('DRIVER CLOSED.')
if len(error_links)==0:
return all_rows
else:
if((len(error_links)))>1:
print(str(len(error_links)) + ' association sites failed.\n')
for link in error_links:
print(link)
elif((len(error_links)))==1:
print('One association link failed: ' + error_links)
#! here we could call the function again on the error_links
elif ((len(error_links)))==0:
print('All associations was scraped.')
return all_rows
# ! WRANGLING
def transform_(all_rows):
try:
df = pd.DataFrame(all_rows, columns=['Name', 'Descrip', 'Address', 'Email', 'Phone', 'Link'])
df = df[['Name', 'Email']]
df = df.loc[(df['Name'] != 'Null') & (df['Email'] != 'Null')]
print(df)
except:
print(e)
traceback.print_exc()
pass
return df
def load_(file_name, df):
"""
This function gets a file name and a DataFrame and converts into a excel file, and save it at excel_files folder.
Args:
file_name (str): the excel file name that it will be created, WITHOUT the extension.
df (pd.DataFrame): a DataFrame containing the code (if any) and courses name.
"""
EXCEL_FILES_PATH = r'/Users/studocu/Downloads'
EXTENSION = '.xlsx'
PATH_FILE = EXCEL_FILES_PATH + '/' + file_name + EXTENSION
df.to_excel(PATH_FILE, index=False, engine='xlsxwriter')
def pipeline(url, uniID, uni_name):
file_name = uniID + ' ' + uni_name + ' ASSOCIATIONS'
file_name = re.sub(' ', '_', file_name)
all_rows = extract_(url)
df_ = transform_(all_rows)
load_(file_name, df_)
def scrape_single(url, uniID, uni_name):
pipeline(url, uniID, uni_name)
def scrape_multiples():
start_time = time.time()
EXCEL_PATH = r'/Users/studocu/Desktop/excel_input/input.xlsx'
df_ = pd.read_excel(EXCEL_PATH)
urls = df_.iloc[:,0]
uni_IDS = df_.iloc[:,1]
uni_names = df_.iloc[:,2]
for i, url in enumerate(urls):
uni_ID = str(uni_IDS[i])
uni_name = uni_names[i]
print('PULLING DATA FROM: ' + uni_name)
pipeline(url,uni_ID, uni_name)
total_seconds = time.time() - start_time
if total_seconds <= 60:
print("EXECUTION TIME: {:.2f} SECONDS".format(total_seconds))
else:
print("EXECUTION TIME: {} ".format(datetime.timedelta(seconds=total_seconds)))
# %%
# %%
|
python
|
import unittest
from libpysal.examples import load_example
import geopandas as gpd
import numpy as np
from segregation.aspatial import MultiRelativeDiversity
class Multi_Relative_Diversity_Tester(unittest.TestCase):
def test_Multi_Relative_Diversity(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
groups_list = ['WHITE', 'BLACK', 'ASIAN','HISP']
df = s_map[groups_list]
index = MultiRelativeDiversity(df, groups_list)
np.testing.assert_almost_equal(index.statistic, 0.15820019878220337)
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
### BEGIN LICENSE
#Copyright (c) 2009 Eugene Kaznacheev <[email protected]>
#Copyright (c) 2013 Joshua Tasker <[email protected]>
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
### END LICENSE
""" Fetches weather reports from Yahoo! Weather, Weather.com and NOAA """
__version__ = "0.3.8"
try:
# Python 3 imports
from urllib.request import urlopen
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.error import URLError
# needed for code to work on Python3
xrange = range
unicode = str
except ImportError:
# Python 2 imports
from urllib2 import urlopen
from urllib import quote
from urllib import urlencode
from urllib2 import URLError
import sys
import re
from math import pow
from xml.dom import minidom
import json
try:
from unidecode import unidecode
except ImportError:
pass
GOOGLE_COUNTRIES_URL = 'http://www.google.com/ig/countries?output=xml&hl=%s'
GOOGLE_CITIES_URL = 'http://www.google.com/ig/cities?output=xml&' + \
'country=%s&hl=%s'
YAHOO_WEATHER_URL = 'http://xml.weather.yahoo.com/forecastrss/%s_%s.xml'
YAHOO_WEATHER_NS = 'http://xml.weather.yahoo.com/ns/rss/1.0'
NOAA_WEATHER_URL = 'http://www.weather.gov/xml/current_obs/%s.xml'
WEATHER_COM_URL = 'http://wxdata.weather.com/wxdata/weather/local/%s?' + \
'unit=%s&dayf=5&cc=*'
LOCID_SEARCH_URL = 'http://wxdata.weather.com/wxdata/search/search?where=%s'
WOEID_SEARCH_URL = 'http://query.yahooapis.com/v1/public/yql'
WOEID_QUERY_STRING = 'select line1, line2, line3, line4, ' + \
'woeid from geo.placefinder where text="%s"'
#WXUG_BASE_URL = 'http://api.wunderground.com/auto/wui/geo'
#WXUG_FORECAST_URL = WXUG_BASE_URL + '/ForecastXML/index.xml?query=%s'
#WXUG_CURRENT_URL = WXUG_BASE_URL + '/WXCurrentObXML/index.xml?query=%s'
#WXUG_GEOLOOKUP_URL = WXUG_BASE_URL + '/GeoLookupXML/index.xml?query=%s'
#WXUG_ALERTS_URL = WXUG_BASE_URL + '/AlertsXML/index.xml?query=%s'
class WindUnits:
"""Class for available wind unit systems"""
MPS = 1
MPH = 2
BEAUFORT = 3
KPH = 4
KNOTS = 5
def get_weather_from_weather_com(location_id, units = 'metric'):
"""Fetches weather report from Weather.com
Parameters:
location_id: A five digit US zip code or location ID. To find your
location ID, use function get_loc_id_from_weather_com().
units: type of units. 'metric' for metric and 'imperial' for non-metric.
Note that choosing metric units changes all the weather units to metric.
For example, wind speed will be reported as kilometers per hour and
barometric pressure as millibars.
Returns:
weather_data: a dictionary of weather data that exists in XML feed.
"""
location_id = quote(location_id)
if units == 'metric':
unit = 'm'
elif units == 'imperial' or units == '': # for backwards compatibility
unit = ''
else:
unit = 'm' # fallback to metric
url = WEATHER_COM_URL % (location_id, unit)
try:
handler = urlopen(url)
except URLError:
return {'error': 'Could not connect to Weather.com'}
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
try:
weather_dom = dom.getElementsByTagName('weather')[0]
except IndexError:
error_data = {'error': dom.getElementsByTagName('error')[
0].getElementsByTagName('err')[0].firstChild.data}
dom.unlink()
return error_data
key_map = {'head':'units', 'ut':'temperature', 'ud':'distance',
'us':'speed', 'up':'pressure', 'ur':'rainfall',
'loc':'location', 'dnam':'name', 'lat':'lat', 'lon':'lon',
'cc':'current_conditions', 'lsup':'last_updated',
'obst':'station', 'tmp':'temperature',
'flik':'feels_like', 't':'text', 'icon':'icon',
'bar':'barometer', 'r':'reading', 'd':'direction',
'wind':'wind', 's':'speed', 'gust':'gust', 'hmid':'humidity',
'vis':'visibility', 'uv':'uv', 'i':'index', 'dewp':'dewpoint',
'moon':'moon_phase', 'hi':'high', 'low':'low', 'sunr':'sunrise',
'suns':'sunset', 'bt':'brief_text', 'ppcp':'chance_precip'}
data_structure = {'head': ('ut', 'ud', 'us', 'up', 'ur'),
'loc': ('dnam', 'lat', 'lon'),
'cc': ('lsup', 'obst', 'tmp', 'flik', 't',
'icon', 'hmid', 'vis', 'dewp')}
cc_structure = {'bar': ('r','d'),
'wind': ('s','gust','d','t'),
'uv': ('i','t'),
'moon': ('icon','t')}
# sanity check, skip missing items
try:
for (tag, list_of_tags2) in data_structure.items():
for tag2 in list_of_tags2:
if weather_dom.getElementsByTagName(tag)[0].childNodes.length == 0:
data_structure[tag] = []
except IndexError:
error_data = {'error': 'Error parsing Weather.com response. Full response: %s' % xml_response}
return error_data
try:
weather_data = {}
for (tag, list_of_tags2) in data_structure.items():
key = key_map[tag]
weather_data[key] = {}
for tag2 in list_of_tags2:
key2 = key_map[tag2]
try:
weather_data[key][key2] = weather_dom.getElementsByTagName(
tag)[0].getElementsByTagName(tag2)[0].firstChild.data
except AttributeError:
# current tag has empty value
weather_data[key][key2] = unicode('')
except IndexError:
error_data = {'error': 'Error parsing Weather.com response. Full response: %s' % xml_response}
return error_data
if weather_dom.getElementsByTagName('cc')[0].childNodes.length > 0:
cc_dom = weather_dom.getElementsByTagName('cc')[0]
for (tag, list_of_tags2) in cc_structure.items():
key = key_map[tag]
weather_data['current_conditions'][key] = {}
for tag2 in list_of_tags2:
key2 = key_map[tag2]
try:
weather_data['current_conditions'][key][key2] = cc_dom.getElementsByTagName(
tag)[0].getElementsByTagName(tag2)[0].firstChild.data
except AttributeError:
# current tag has empty value
weather_data['current_conditions'][key][key2] = unicode('')
forecasts = []
if len(weather_dom.getElementsByTagName('dayf')) > 0:
time_of_day_map = {'d':'day', 'n':'night'}
for forecast in weather_dom.getElementsByTagName('dayf')[0].getElementsByTagName('day'):
tmp_forecast = {}
tmp_forecast['day_of_week'] = forecast.getAttribute('t')
tmp_forecast['date'] = forecast.getAttribute('dt')
for tag in ('hi', 'low', 'sunr', 'suns'):
key = key_map[tag]
try:
tmp_forecast[key] = forecast.getElementsByTagName(
tag)[0].firstChild.data
except AttributeError:
# if nighttime on current day, key 'hi' is empty
tmp_forecast[key] = unicode('')
for part in forecast.getElementsByTagName('part'):
time_of_day = time_of_day_map[part.getAttribute('p')]
tmp_forecast[time_of_day] = {}
for tag2 in ('icon', 't', 'bt', 'ppcp', 'hmid'):
key2 = key_map[tag2]
try:
tmp_forecast[time_of_day][
key2] = part.getElementsByTagName(tag2)[0].firstChild.data
except AttributeError:
# if nighttime on current day, keys 'icon' and 't' are empty
tmp_forecast[time_of_day][key2] = unicode('')
tmp_forecast[time_of_day]['wind'] = {}
for tag2 in ('s', 'gust', 'd', 't'):
key2 = key_map[tag2]
tmp_forecast[time_of_day]['wind'][key2] = part.getElementsByTagName(
'wind')[0].getElementsByTagName(tag2)[0].firstChild.data
forecasts.append(tmp_forecast)
weather_data['forecasts'] = forecasts
dom.unlink()
return weather_data
def get_weather_from_google(location_id, hl = ''):
"""Fetches weather report from Google. No longer functional,
since Google discontinued their Weather API as of Sep 2012.
Method retained for backwards compatibility.
Returns:
weather_data: a dictionary containing only the key 'error'
"""
weather_data = {'error': 'The Google Weather API has been ' + \
'discontinued as of September 2012.'}
return weather_data
def get_countries_from_google(hl = ''):
"""Get list of countries in specified language from Google
Parameters:
hl: the language parameter (language code). Default value is empty
string, in this case Google will use English.
Returns:
countries: a list of elements(all countries that exists in XML feed).
Each element is a dictionary with 'name' and 'iso_code' keys.
For example: [{'iso_code': 'US', 'name': 'USA'},
{'iso_code': 'FR', 'name': 'France'}]
"""
url = GOOGLE_COUNTRIES_URL % hl
try:
handler = urlopen(url)
except URLError:
return [{'error':'Could not connect to Google'}]
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
countries = []
countries_dom = dom.getElementsByTagName('country')
for country_dom in countries_dom:
country = {}
country['name'] = country_dom.getElementsByTagName(
'name')[0].getAttribute('data')
country['iso_code'] = country_dom.getElementsByTagName(
'iso_code')[0].getAttribute('data')
countries.append(country)
dom.unlink()
return countries
def get_cities_from_google(country_code, hl = ''):
"""Get list of cities of necessary country in specified language from Google
Parameters:
country_code: code of the necessary country. For example 'de' or 'fr'.
hl: the language parameter (language code). Default value is empty
string, in this case Google will use English.
Returns:
cities: a list of elements(all cities that exists in XML feed). Each
element is a dictionary with 'name', 'latitude_e6' and 'longitude_e6'
keys. For example: [{'longitude_e6': '1750000', 'name': 'Bourges',
'latitude_e6': '47979999'}]
"""
url = GOOGLE_CITIES_URL % (country_code.lower(), hl)
try:
handler = urlopen(url)
except URLError:
return [{'error':'Could not connect to Google'}]
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
cities = []
cities_dom = dom.getElementsByTagName('city')
for city_dom in cities_dom:
city = {}
city['name'] = city_dom.getElementsByTagName(
'name')[0].getAttribute('data')
city['latitude_e6'] = city_dom.getElementsByTagName(
'latitude_e6')[0].getAttribute('data')
city['longitude_e6'] = city_dom.getElementsByTagName(
'longitude_e6')[0].getAttribute('data')
cities.append(city)
dom.unlink()
return cities
def get_weather_from_yahoo(location_id, units = 'metric'):
"""Fetches weather report from Yahoo! Weather
Parameters:
location_id: A five digit US zip code or location ID. To find your
location ID, use function get_location_ids().
units: type of units. 'metric' for metric and 'imperial' for non-metric.
Note that choosing metric units changes all the weather units to
metric. For example, wind speed will be reported as kilometers per
hour and barometric pressure as millibars.
Returns:
weather_data: a dictionary of weather data that exists in XML feed.
See http://developer.yahoo.com/weather/#channel
"""
location_id = quote(location_id)
if units == 'metric':
unit = 'c'
elif units == 'imperial' or units == '': # for backwards compatibility
unit = 'f'
else:
unit = 'c' # fallback to metric
url = YAHOO_WEATHER_URL % (location_id, unit)
try:
handler = urlopen(url)
except URLError:
return {'error': 'Could not connect to Yahoo! Weather'}
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
weather_data = {}
try:
weather_data['title'] = dom.getElementsByTagName(
'title')[0].firstChild.data
weather_data['link'] = dom.getElementsByTagName(
'link')[0].firstChild.data
except IndexError:
error_data = {'error': dom.getElementsByTagName('item')[
0].getElementsByTagName('title')[0].firstChild.data}
dom.unlink()
return error_data
ns_data_structure = {
'location': ('city', 'region', 'country'),
'units': ('temperature', 'distance', 'pressure', 'speed'),
'wind': ('chill', 'direction', 'speed'),
'atmosphere': ('humidity', 'visibility', 'pressure', 'rising'),
'astronomy': ('sunrise', 'sunset'),
'condition': ('text', 'code', 'temp', 'date')
}
for (tag, attrs) in ns_data_structure.items():
weather_data[tag] = xml_get_ns_yahoo_tag(
dom, YAHOO_WEATHER_NS, tag, attrs
)
weather_data['geo'] = {}
try:
weather_data['geo']['lat'] = dom.getElementsByTagName(
'geo:lat')[0].firstChild.data
weather_data['geo']['long'] = dom.getElementsByTagName(
'geo:long')[0].firstChild.data
except AttributeError:
weather_data['geo']['lat'] = unicode()
weather_data['geo']['long'] = unicode()
weather_data['condition']['title'] = dom.getElementsByTagName(
'item')[0].getElementsByTagName('title')[0].firstChild.data
weather_data['html_description'] = dom.getElementsByTagName(
'item')[0].getElementsByTagName('description')[0].firstChild.data
forecasts = []
for forecast in dom.getElementsByTagNameNS(YAHOO_WEATHER_NS, 'forecast'):
forecasts.append(xml_get_attrs(forecast,('day', 'date', 'low', 'high',
'text', 'code')))
weather_data['forecasts'] = forecasts
dom.unlink()
return weather_data
def get_everything_from_yahoo(country_code, cities):
"""Get all weather data from yahoo for a specific country.
Parameters:
country_code: A four letter code of the necessary country.
For example 'GMXX' or 'FRXX'.
cities: The maximum number of cities for which to get data.
Returns:
weather_reports: A dictionary containing weather data for each city.
"""
city_codes = yield_all_country_city_codes_yahoo(country_code, cities)
weather_reports = {}
for city_c in city_codes:
weather_data = get_weather_from_yahoo(city_c)
if ('error' in weather_data):
return weather_data
city = weather_data['location']['city']
weather_reports[city] = weather_data
return weather_reports
def yield_all_country_city_codes_yahoo(country_code, cities):
"""Yield all cities codes for a specific country.
Parameters:
country_code: A four letter code of the necessary country.
For example 'GMXX' or 'FRXX'.
cities: The maximum number of cities to yield.
Returns:
country_city_codes: A generator containing the city codes.
"""
# cities stands for the number of available cities
for i in range(1, cities + 1):
yield ''.join([country_code, (4 - len(str(i))) * '0', str(i)])
def get_weather_from_noaa(station_id):
"""Fetches weather report from NOAA: National Oceanic and Atmospheric
Administration (United States)
Parameter:
station_id: the ID of the weather station near the desired location
To find your station ID, perform the following steps:
1. Open this URL: http://www.weather.gov/xml/current_obs/seek.php?state=az&Find=Find
2. Select the necessary state state. Click 'Find'.
3. Find the necessary station in the 'Observation Location' column.
4. The station ID is in the URL for the weather page for that station.
For example if the weather page is http://weather.noaa.gov/weather/current/KPEO.html -- the station ID is KPEO.
Another way to get the station ID: use the 'Weather.location2station'
function of this library: http://code.google.com/p/python-weather/
Returns:
weather_data: a dictionary of weather data that exists in XML feed.
( useful icons: http://www.weather.gov/xml/current_obs/weather.php )
"""
station_id = quote(station_id)
url = NOAA_WEATHER_URL % (station_id)
try:
handler = urlopen(url)
except URLError:
return {'error': 'Could not connect to NOAA'}
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
data_structure = ('suggested_pickup',
'suggested_pickup_period',
'location',
'station_id',
'latitude',
'longitude',
'observation_time',
'observation_time_rfc822',
'weather',
'temperature_string',
'temp_f',
'temp_c',
'relative_humidity',
'wind_string',
'wind_dir',
'wind_degrees',
'wind_mph',
'wind_gust_mph',
'pressure_string',
'pressure_mb',
'pressure_in',
'dewpoint_string',
'dewpoint_f',
'dewpoint_c',
'heat_index_string',
'heat_index_f',
'heat_index_c',
'windchill_string',
'windchill_f',
'windchill_c',
'icon_url_base',
'icon_url_name',
'two_day_history_url',
'ob_url'
)
weather_data = {}
current_observation = dom.getElementsByTagName('current_observation')[0]
for tag in data_structure:
try:
weather_data[tag] = current_observation.getElementsByTagName(
tag)[0].firstChild.data
except IndexError:
pass
dom.unlink()
return weather_data
def xml_get_ns_yahoo_tag(dom, ns, tag, attrs):
"""Parses the necessary tag and returns the dictionary with values
Parameters:
dom: DOM
ns: namespace
tag: necessary tag
attrs: tuple of attributes
Returns:
a dictionary of elements
"""
element = dom.getElementsByTagNameNS(ns, tag)[0]
return xml_get_attrs(element,attrs)
def xml_get_attrs(xml_element, attrs):
"""Returns the list of necessary attributes
Parameters:
element: xml element
attrs: tuple of attributes
Returns:
a dictionary of elements
"""
result = {}
for attr in attrs:
result[attr] = xml_element.getAttribute(attr)
return result
def wind_direction(degrees):
""" Convert wind degrees to direction """
try:
degrees = int(degrees)
except ValueError:
return ''
if degrees < 23 or degrees >= 338:
return 'N'
elif degrees < 68:
return 'NE'
elif degrees < 113:
return 'E'
elif degrees < 158:
return 'SE'
elif degrees < 203:
return 'S'
elif degrees < 248:
return 'SW'
elif degrees < 293:
return 'W'
elif degrees < 338:
return 'NW'
def wind_beaufort_scale(value, wind_units = WindUnits.KPH):
"""Convert wind speed value to Beaufort number (0-12)
The Beaufort wind force scale is an empirical measure that
relates wind speed to observed conditions at sea or on land.
Parameters:
value: wind speed value to convert
wind_units: unit system of value, defaults to km/h
Returns:
a string containing the Beaufort number from 0 to 12
"""
if wind_units == WindUnits.BEAUFORT:
return str(value)
try:
value = float(value)
except ValueError:
return ''
if value < 0.0:
return ''
if wind_units == WindUnits.KPH:
if value < 1:
# Calm
return '0'
elif value <= 5.5:
# Light air
return '1'
elif value <= 11:
# Light breeze
return '2'
elif value <= 19:
# Gentle breeze
return '3'
elif value <= 28:
# Moderate breeze
return '4'
elif value <= 38:
# Fresh breeze
return '5'
elif value <= 49:
# Strong breeze
return '6'
elif value <= 61:
# High wind, moderate gale, near gale
return '7'
elif value <= 74:
# Gale, fresh gale
return '8'
elif value <= 88:
# Strong gale
return '9'
elif value <= 102:
# Storm, whole gale
return '10'
elif value <= 117:
# Violent storm
return '11'
else:
# Hurricane
return '12'
if wind_units == WindUnits.MPH:
if value < 1:
return '0'
elif value <= 3:
return '1'
elif value <= 7:
return '2'
elif value <= 12:
return '3'
elif value <= 17:
return '4'
elif value <= 24:
return '5'
elif value <= 30:
return '6'
elif value <= 38:
return '7'
elif value <= 46:
return '8'
elif value <= 54:
return '9'
elif value <= 63:
return '10'
elif value <= 73:
return '11'
else:
return '12'
if wind_units == WindUnits.MPS:
if value < 0.3:
return '0'
elif value <= 1.5:
return '1'
elif value <= 3.4:
return '2'
elif value <= 5.4:
return '3'
elif value <= 7.9:
return '4'
elif value <= 10.7:
return '5'
elif value <= 13.8:
return '6'
elif value <= 17.1:
return '7'
elif value <= 20.7:
return '8'
elif value <= 24.4:
return '9'
elif value <= 28.4:
return '10'
elif value <= 32.6:
return '11'
else:
return '12'
if wind_units == WindUnits.KNOTS:
if value < 1:
return '0'
if value <= 3:
return '1'
if value <= 6:
return '2'
if value <= 10:
return '3'
if value <= 16:
return '4'
if value <= 21:
return '5'
if value <= 27:
return '6'
if value <= 33:
return '7'
if value <= 40:
return '8'
if value <= 47:
return '9'
if value <= 55:
return '10'
if value <= 63:
return '11'
else:
return '12'
def get_wind_direction(degrees):
""" Same as wind_direction """
return wind_direction(degrees)
def getText(nodelist):
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def get_location_ids(search_string):
"""Get location IDs for place names matching a specified string.
Same as get_loc_id_from_weather_com() but different return format.
Parameters:
search_string: Plaintext string to match to available place names.
For example, a search for 'Los Angeles' will return matches for the
city of that name in California, Chile, Cuba, Nicaragua, etc as well
as 'East Los Angeles, CA', 'Lake Los Angeles, CA', etc.
Returns:
location_ids: A dictionary containing place names keyed to location ID
"""
loc_id_data = get_loc_id_from_weather_com(search_string)
if 'error' in loc_id_data:
return loc_id_data
location_ids = {}
for i in xrange(loc_id_data['count']):
location_ids[loc_id_data[i][0]] = loc_id_data[i][1]
return location_ids
def get_loc_id_from_weather_com(search_string):
"""Get location IDs for place names matching a specified string.
Same as get_location_ids() but different return format.
Parameters:
search_string: Plaintext string to match to available place names.
For example, a search for 'Los Angeles' will return matches for the
city of that name in California, Chile, Cuba, Nicaragua, etc as well
as 'East Los Angeles, CA', 'Lake Los Angeles, CA', etc.
Returns:
loc_id_data: A dictionary of tuples in the following format:
{'count': 2, 0: (LOCID1, Placename1), 1: (LOCID2, Placename2)}
"""
# Weather.com stores place names as ascii-only, so convert if possible
try:
# search_string = unidecode(search_string.encode('utf-8'))
search_string = unidecode(search_string)
except NameError:
pass
url = LOCID_SEARCH_URL % quote(search_string)
try:
handler = urlopen(url)
except URLError:
return {'error': 'Could not connect to server'}
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
xml_response = handler.read().decode(charset).encode('utf-8')
else:
xml_response = handler.read()
dom = minidom.parseString(xml_response)
handler.close()
loc_id_data = {}
try:
num_locs = 0
for loc in dom.getElementsByTagName('search')[0].getElementsByTagName('loc'):
loc_id = loc.getAttribute('id') # loc id
place_name = loc.firstChild.data # place name
loc_id_data[num_locs] = (loc_id, place_name)
num_locs += 1
loc_id_data['count'] = num_locs
except IndexError:
error_data = {'error': 'No matching Location IDs found'}
return error_data
finally:
dom.unlink()
return loc_id_data
def get_where_on_earth_ids(search_string):
"""Get Yahoo 'Where On Earth' ID for the place names that best match the
specified string. Same as get_woeid_from_yahoo() but different return format.
Parameters:
search_string: Plaintext string to match to available place names.
Place can be a city, country, province, airport code, etc. Yahoo returns
the 'Where On Earth' ID (WOEID) for the place name(s) that is the best
match to the full string.
For example, 'Paris' will match 'Paris, France', 'Deutschland' will match
'Germany', 'Ontario' will match 'Ontario, Canada', 'SFO' will match 'San
Francisco International Airport', etc.
Returns:
where_on_earth_ids: A dictionary containing place names keyed to WOEID.
"""
woeid_data = get_woeid_from_yahoo(search_string)
if 'error' in woeid_data:
return woeid_data
where_on_earth_ids = {}
for i in xrange(woeid_data['count']):
where_on_earth_ids[woeid_data[i][0]] = woeid_data[i][1]
return where_on_earth_ids
def get_woeid_from_yahoo(search_string):
"""Get Yahoo WOEID for the place names that best match the specified string.
Same as get_where_on_earth_ids() but different return format.
Parameters:
search_string: Plaintext string to match to available place names.
Place can be a city, country, province, airport code, etc. Yahoo returns
the WOEID for the place name(s) that is the best match to the full string.
For example, 'Paris' will match 'Paris, France', 'Deutschland' will match
'Germany', 'Ontario' will match 'Ontario, Canada', 'SFO' will match 'San
Francisco International Airport', etc.
Returns:
woeid_data: A dictionary of tuples in the following format:
{'count': 2, 0: (WOEID1, Placename1), 1: (WOEID2, Placename2)}
"""
## This uses Yahoo's YQL tables to directly query Yahoo's database, e.g.
## http://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20geo.placefinder%20where%20text%3D%22New%20York%22
if sys.version > '3':
# Python 3
encoded_string = search_string
else:
# Python 2
encoded_string = search_string.encode('utf-8')
params = {'q': WOEID_QUERY_STRING % encoded_string, 'format': 'json'}
url = '?'.join((WOEID_SEARCH_URL, urlencode(params)))
try:
handler = urlopen(url)
except URLError:
return {'error': 'Could not connect to server'}
if sys.version > '3':
# Python 3
content_type = dict(handler.getheaders())['Content-Type']
else:
# Python 2
content_type = handler.info().dict['content-type']
try:
charset = re.search('charset\=(.*)', content_type).group(1)
except AttributeError:
charset = 'utf-8'
if charset.lower() != 'utf-8':
json_response = handler.read().decode(charset).encode('utf-8')
else:
json_response = handler.read()
handler.close()
yahoo_woeid_result = json.loads(json_response)
try:
result = yahoo_woeid_result['query']['results']['Result']
except KeyError:
# On error, returned JSON evals to dictionary with one key, 'error'
return yahoo_woeid_result
except TypeError:
return {'error': 'No matching place names found'}
woeid_data = {}
woeid_data['count'] = yahoo_woeid_result['query']['count']
for i in xrange(yahoo_woeid_result['query']['count']):
try:
place_data = result[i]
except KeyError:
place_data = result
name_lines = [place_data[tag]
for tag in ['line1','line2','line3','line4']
if place_data[tag] is not None]
place_name = ', '.join(name_lines)
woeid_data[i] = (place_data['woeid'], place_name)
return woeid_data
def heat_index(temperature, humidity, units = 'metric'):
"""Calculate Heat Index for the specified temperature and humidity
The formula below approximates the heat index in degrees
Fahrenheit, to within ±1.3 °F. It is the result of a
multivariate fit (temperature equal to or greater than
80°F and relative humidity equal to or greater than 40%)
to a model of the human body.
Heat Index = c_1 + (c_2 * T) + (c_3 * R) + (c_4 * T * R) +
(c_5 * T^2) + (c_6 * R^2) + (c_7 * T^2 * R) +
(c_8 * T * R^2) + (c_9 * T^2 * R^2)
where:
T = ambient dry-bulb temperature (in degrees Fahrenheit)
R = relative humidity (percentage value between 0 and 100)
Parameters:
temperature: air temperature in specified units
humidity: relative humidity (a percentage) at specified air temperature
units: type of units. 'metric' for metric and 'imperial' for non-metric.
Returns:
heat_index: a numerical value representing the heat index
in the temperature scale of the specified unit system.
Returns None if the specified temperature is less than 80°F
or the specified relative humidity is less than 40%.
"""
# fallback to metric
if units != 'imperial' and units != '' and units != 'metric':
units = 'metric'
R = float(humidity)
if units == 'imperial' or units == '': # for backwards compatibility
T = float(temperature)
elif units == 'metric':
# Heat Index is calculated in F
T = (float(temperature) * 9.0/5.0) + 32.0
# Heat Index is only valid for temp >= 80°F and humidity >= 40%)
if (R < 40.0 or T < 80.0):
return None
Rsquared = pow(R, 2.0)
Tsquared = pow(T, 2.0)
# coefficients for calculation
c = [None, -42.379, 2.04901523, 10.14333127, -0.22475541,
-6.83783 * pow(10.0,-3.0), -5.481717 * pow(10.0,-2.0),
1.22874 * pow(10.0,-3.0), 8.5282 * pow(10.0,-4.0),
-1.99 * pow(10.0,-6.0)]
heat_index = ( c[1] + (c[2]* T) + (c[3]* R) + (c[4]* T * R) +
(c[5]* Tsquared) + (c[6]* Rsquared) +
(c[7]* Tsquared * R) + (c[8]* T * Rsquared) +
(c[9]* Tsquared * Rsquared) )
# round to one decimal place
if units == 'metric':
return round(((heat_index - 32.0) * 5.0/9.0), 1)
else:
return round(heat_index, 1)
|
python
|
#! /usr/bin/python3
from __future__ import unicode_literals
# from _typeshed import NoneType
import math
import os
import sys
import datetime
from typing import TextIO
import python_magnetrun
import numpy as np
import matplotlib
# print("matplotlib=", matplotlib.rcParams.keys())
matplotlib.rcParams['text.usetex'] = True
# matplotlib.rcParams['text.latex.unicode'] = True key not available
import matplotlib.pyplot as plt
import pandas as pd
import water as w
import ht
import tabulate
import datatools
tables = []
def mixingTemp(Flow1, P1, T1, Flow2, P2, T2):
"""
computes the mixing temperature
"""
Flow = Flow1 + Flow2
Tmix = w.getRho(P1, T1) * w.getCp(P1, T1) * T1 * Flow1
Tmix += w.getRho(P2, T2) * w.getCp(P2, T2) * T2 * Flow2
Tmix /= w.getRho((P1+P2)/2., T2) * w.getCp((P1+P2)/2., T2) * Flow
return Tmix
def display_Q(inputfile, f_extension, mrun, debit_alim, ohtc, dT, show=False, extension="-Q.png"):
"""
plot Heat profiles
"""
df = mrun.getData()
# print("type(mrun.getData()):", type(mrun.getData()))
# print("type(df):", type(df), type(df['Tin']))
df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*debit_alim)/3600.), axis=1)
df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1)
if ohtc != "None":
df['QNTU'] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[2]/1.e+6, axis=1)
else:
df['QNTU'] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[2]/1.e+6, axis=1)
df["Qhot"] = df.apply(lambda row: ((row.Flow)*1.e-3+0/3600.)*(w.getRho(row.BP, row.Tout)*w.getCp(row.BP, row.Tout)*(row.Tout)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.Tin)/1.e+6, axis=1)
df["Qhot1"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.Thi)*w.getCp(row.BP, row.Thi)*(row.Thi)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.Tin)/1.e+6, axis=1)
df["Qcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.tsb)*w.getCp(10, row.tsb)*row.tsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1)
# print("df.keys:", df.columns.values.tolist(), "mrun.keys=", mrun.getKeys())
# heat Balance on Magnet side
ax = plt.gca()
df.plot(x='t', y='Qhot', ax=ax, color='red')
df.plot(x='t', y='Pt', ax=ax, color='yellow', marker='o', alpha = .5, markevery=args.markevery)
df.plot(x='t', y='Pmagnet', ax=ax, color='yellow')
plt.ylabel(r'Q[MW]')
plt.xlabel(r't [s]')
plt.grid(b=True)
if ohtc != "None":
if isinstance(ohtc, (float, int, str)):
plt.title("HeatBalance Magnet side:"
+ mrun.getInsert().replace(r"_",r"\_")
+ ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT))
else:
#if isinstance(ohtc, type(df['Tin'])):
plt.title("HeatBalance Magnet side:"
+ mrun.getInsert().replace(r"_",r"\_")
+ ": h=%s $W/m^2/K$, dT=%g" % ("formula",dT))
if show:
plt.show()
else:
extension="-Q_magnetside.png"
imagefile = inputfile.replace(f_extension, extension)
print("save to %s" % imagefile)
plt.savefig(imagefile, dpi=300)
plt.close()
# heat Balance on HX side
ax = plt.gca()
df.plot(x='t', y='Qhot1', ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery)
df.plot(x='t', y='Qcold', ax=ax, color='blue')
plt.ylabel(r'Q[MW]')
plt.xlabel(r't [s]')
plt.grid(b=True)
if ohtc != "None":
if isinstance(ohtc, (float, int, str)):
plt.title("HeatBalance HX side:"
+ mrun.getInsert().replace(r"_",r"\_")
+ ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT))
else:
#if isinstance(ohtc, type(df['Tin'])):
plt.title("HeatBalance HX side:"
+ mrun.getInsert().replace(r"_",r"\_")
+ ": h=%s $W/m^2/K$, dT=%g" % ("formula",dT))
if show:
plt.show()
else:
extension="-Q_hxside.png"
imagefile = inputfile.replace(f_extension, extension)
print("save to %s" % imagefile)
plt.savefig(imagefile, dpi=300)
plt.close()
def display_T(inputfile, f_extension, mrun, tsb_key, tin_key, debit_alim, ohtc, dT, show=False, extension="-coolingloop.png", debug=False):
"""
plot Temperature profiles
"""
print("othc=", ohtc)
df = mrun.getData()
df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*debit_alim)/3600.), axis=1)
df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1)
if ohtc != "None":
df[tin_key] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1)
df[tsb_key] = df.apply(lambda row: heatexchange(ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1)
else:
df[tin_key] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1)
df[tsb_key] = df.apply(lambda row: heatexchange(row.Ohtc, row.teb, row.Thi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1)
ax = plt.gca()
df.plot(x='t', y=tsb_key, ax=ax, color='blue', marker='o', alpha = .5, markevery=args.markevery)
df.plot(x='t', y='tsb', ax=ax, color='blue')
df.plot(x='t', y='teb', ax=ax, color='blue', linestyle='--')
df.plot(x='t', y=tin_key, ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery)
df.plot(x='t', y='Tin', ax=ax, color='red')
df.plot(x='t', y='Tout', ax=ax, color='red', linestyle='--')
df.plot(x='t', y='Thi', ax=ax, color='yellow', marker='o', alpha = .5, markevery=args.markevery)
plt.xlabel(r't [s]')
plt.grid(b=True)
if ohtc != "None":
if isinstance(ohtc, (float, int, str)):
plt.title(mrun.getInsert().replace(r"_",r"\_") + ": h=%g $W/m^2/K$, dT=%g" % (ohtc,dT))
else:
plt.title(mrun.getInsert().replace(r"_",r"\_") + ": h=%s $W/m^2/K$, dT=%g" % ("computed",dT))
if show:
plt.show()
else:
imagefile = inputfile.replace(f_extension, extension)
print("save to %s" % imagefile)
plt.savefig(imagefile, dpi=300)
plt.close()
def heatBalance(Tin, Pin, Debit, Power, debug=False):
"""
Computes Tout from heatBalance
inputs:
Tin: input Temp in K
Pin: input Pressure (Bar)
Debit: Flow rate in kg/s
"""
dT = Power / ( w.getRho(Tin, Pin) * Debit * w.getCp(Tin, Pin) )
Tout = Tin + dT
return Tout
def heatexchange(h, Tci, Thi, Debitc, Debith, Pci, Phi, debug=False):
"""
NTU Model for heat Exchanger
compute the output temperature for the heat exchanger
as a function of input temperatures and flow rates
Tci: input Temp on cold side
Thi: input Temp on hot side
TA: output from cooling alim (on hot side)
Debitc: m^3/h
Debith: l/s
"""
# if debug:
# print("heatexchange:",
# "h=", U,
# "Tci=", Tci, "Thi=", Thi,
# "Pci=", Pci, "Phi=", Phi,
# "Debitc=", Debitc, "Debith=", Debith, "DebitA=", DebitA)
A = 1063.4 # m^2
Cp_cold = w.getCp(Pci, Tci) # J/kg/K
Cp_hot = w.getCp(Phi, Thi) # J/kg/K
m_hot = w.getRho(Phi, Thi) * Debith # kg/s
m_cold = w.getRho(Pci, Tci) * Debitc # kg/s
# For plate exchanger
result = ht.hx.P_NTU_method(m_hot, m_cold, Cp_hot, Cp_cold, UA=h*A, T1i=Thi, T2i=Tci, subtype='1/1')
# returns a dictionnary:
# Q : Heat exchanged in the heat exchanger, [W]
# UA : Combined area-heat transfer coefficient term, [W/K]
# T1i : Inlet temperature of stream 1, [K]
# T1o : Outlet temperature of stream 1, [K]
# T2i : Inlet temperature of stream 2, [K]
# T2o : Outlet temperature of stream 2, [K]
# P1 : Thermal effectiveness with respect to stream 1, [-]
# P2 : Thermal effectiveness with respect to stream 2, [-]
# R1 : Heat capacity ratio with respect to stream 1, [-]
# R2 : Heat capacity ratio with respect to stream 2, [-]
# C1 : The heat capacity rate of fluid 1, [W/K]
# C2 : The heat capacity rate of fluid 2, [W/K]
# NTU1 : Thermal Number of Transfer Units with respect to stream 1 [-]
# NTU2 : Thermal Number of Transfer Units with respect to stream 2 [-]
NTU = result["NTU1"]
if NTU == float('inf') or math.isnan(NTU):
print("Tci=", Tci, "Thi=", Thi)
print("Pci=", Pci, "Phi=", Phi)
print("Debitc=", Debitc, "Debith=", Debith)
raise Exception("NTU not valid")
Q = result["Q"]
if Q == float('inf') or math.isnan(Q):
print("Tci=", Tci, "Thi=", Thi)
print("Pci=", Pci, "Phi=", Phi)
print("Debitc=", Debitc, "Debith=", Debith)
raise Exception("Q not valid")
Tco = result["T2o"]
if Tco == None:
print("h=", h)
print("Tci=", Tci, "Thi=", Thi)
print("Pci=", Pci, "Phi=", Phi)
print("Debitc=", Debitc, "Debith=", Debith)
raise Exception("Tco not valid")
Tho = result["T1o"]
if Tho == None:
print("h=", h)
print("Tci=", Tci, "Thi=", Thi)
print("Pci=", Pci, "Phi=", Phi)
print("Debitc=", Debitc, "Debith=", Debith)
raise Exception("Tho not valid")
"""
if dT != 0 and m_alim_A1A2*m_alim_A3A4 != 0:
dT -= Thi * ( m_hot/(m_hot + m_alim_A1A2 + m_alim_A3A4) -1)
dT_alim = ( dT/(m_alim_A1A2/(m_hot + m_alim_A1A2 + m_alim_A3A4)) ) / 2. - Tho
P_A1A2 = dT_alim*m_alim_A1A2*Cp_hot
P_A3A4 = dT_alim*m_alim_A3A4*Cp_hot
if debug:
print("heatexchange: ", NTU, Tco, Tho, Q)
print("m_alim: ", m_alim_A1A2 + m_alim_A1A2, "m_hot:", m_hot, "%.2f" % ((m_alim_A1A2 + m_alim_A1A2)/m_hot*100), "%")
# TODO check with site
print("dT_alim:", dT_alim,
"P_A1A2[MW]:", P_A1A2/1.e+6, "%.2f" % (P_A1A2/abs(PowerH)*100), "%",
"P_A3A4[MW]:", P_A3A4/1.e+6, "%.2f" % (P_A3A4/abs(PowerB)*100), "%",
"PH[MW]", abs(PowerH/1.e+6),
"PB[MW]", abs(PowerB/1.e+6))
"""
return (Tco, Tho, Q)
def find(df,
unknows: list,
dTini: float, hini: float, hmin: float, hmax: float,
algo: str, lalgo: str, maxeval: float, stopval: float, select=0,
site="M9", debit_alim="30", debug=False):
"""
Use nlopt to find h, dT that give the best approximation for Hx output temperature
unknows = list of optim var (eg ["dT"] or ["h", "dT"])
returns a dict
"""
tables = []
headers = ["dT[C]", "h[W/m\u00b2/K]", "e_Tin[]", "e_tsb[]", "e_T[]", "Heat Balance[MW]"]
import nlopt
print("find %d params:" % len(unknows), unknows)
opt = None
if algo == "Direct":
opt = nlopt.opt(nlopt.GN_DIRECT, len(unknows))
elif algo == "Direct_L":
opt = nlopt.opt(nlopt.GN_DIRECT_L, len(unknows))
elif algo == "CRS2":
opt = nlopt.opt(nlopt.GN_CRS2_LM, len(unknows))
elif algo == "MLSL":
opt = nlopt.opt(nlopt.G_MLSL, len(unknows))
# if lalgo == "Nelder-Mead":
# local_opt = nlopt.opt(nlopt.LN_NELDER_MEAD, len(unknows))
# elif lalgo == "Cobyla":
# local_opt = nlopt.opt(nlopt.LN_LN_COBYLA, len(unknows))
# local_opt.set_maxeval(maxeval)
# local_opt.set_ftol_rel(stopval)
# if lalgo != "None":
# opt.set_local_optimizer(local_opt)
opt.set_maxeval(maxeval)
opt.set_ftol_rel(stopval)
opt.set_ftol_abs(1.e-5)
# opt.set_xtol_rel([tol, tol]) if 2 params? or float?
# opt.set_xtol_abs([1.e-5, 1.e-5]) if 2 opt params
if args.debug:
print("nlopt [ftol fabs xtol xabs]: ", opt.get_ftol_rel(), opt.get_ftol_abs() , opt.get_xtol_rel(), opt.get_xtol_abs() )
print("nlopt [ftol fabs xtol xabs]: ", opt.get_ftol_rel(), opt.get_ftol_abs() , opt.get_xtol_rel(), opt.get_xtol_abs() )
# bounds
lbounds = []
ubounds = []
for unknow in unknows:
if unknow == "dT":
lbounds.append(-10)
ubounds.append(10)
elif unknow == "h":
lbounds.append(hmin)
ubounds.append(hmax)
opt.set_lower_bounds(lbounds)
opt.set_upper_bounds(ubounds)
print("bound:", lbounds, ubounds)
# init_vals
init_vals = []
for unknow in unknows:
if unknow == "dT":
init_vals.append(dTini)
elif unknow == "h":
init_vals.append(hini)
print("init_vals:", init_vals)
# use *f_data to pass extra args: df_, subtype
# ex:
# fdata = (df_, sbutype)
# error_Tin(x, **fdata)
# df_ = fdata[0], subtype = fdata[1], debug = fdata[2]
# eventually check type with isinstanceof() / type()
# question: how to take into account error_tsb also??
if len(unknows) == 2:
select = 2
print("select: ", select)
def error_Tin(x, df_=df, unknows: list=unknows, hini: float=hini, dTini: float=dTini, select: int=select, debug: bool=debug):
"""compute error between measures and computed data"""
ohtc = hini
dT = dTini
if len(unknows) == 1:
if unknows[0] == "dT":
dT = x[0]
elif unknows[0] == "h":
ohtc = x[0]
else:
ohtc = x[1]
dT = x[0]
df['cThi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout+dT, 2*debit_alim/3600., row.BP, row.TAlimout), axis=1)
df_['cTin'] = df_.apply(lambda row: heatexchange(ohtc, row.teb, row.cThi, row.debitbrut/3600., row.FlowH, 10, row.BP)[1], axis=1)
diff = np.abs(df_["Tin"] - df_['cTin'])
L2_Tin = math.sqrt(np.dot( df_['Tin'], df_['Tin'] ))
error_Tin = math.sqrt(np.dot( diff, diff )) /L2_Tin # diff.size
df_['ctsb'] = df_.apply(lambda row: heatexchange(ohtc, row.teb, row.cThi, row.debitbrut/3600., row.FlowH, 10, row.BP)[0], axis=1)
diff = np.abs(df_["tsb"] - df_['ctsb'])
L2_tsb = math.sqrt(np.dot( df_['tsb'], df_['tsb'] ))
error_tsb = math.sqrt(np.dot( diff, diff )) / L2_tsb #diff.size
df["cQhot"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.cThi)*w.getCp(row.BP, row.cThi)*(row.cThi)-w.getRho(row.HP, row.cTin)*w.getCp(row.HP, row.cTin)*row.cTin)/1.e+6, axis=1)
df["cQcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.ctsb)*w.getCp(10, row.ctsb)*row.ctsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1)
df["cdQ"] = df.apply(lambda row: row.cQhot - row.cQcold, axis=1)
df["Qhot"] = df.apply(lambda row: (row.FlowH)*(w.getRho(row.BP, row.Thi)*w.getCp(row.BP, row.Thi)*(row.Thi)-w.getRho(row.HP, row.Tin)*w.getCp(row.HP, row.Tin)*row.cTin)/1.e+6, axis=1)
df["Qcold"] = df.apply(lambda row: row.debitbrut/3600.*(w.getRho(10, row.tsb)*w.getCp(10, row.tsb)*row.tsb-w.getRho(10, row.teb)*w.getCp(10, row.teb)*row.teb)/1.e+6, axis=1)
df["dQ"] = df.apply(lambda row: row.Qhot - row.Qcold, axis=1)
diff = np.abs(df_["Qhot"] - df_['cQhot'])
L2_Qhot = math.sqrt(np.dot( df_['Qhot'], df_['Qhot'] ))
error_qhot = math.sqrt(np.dot( diff, diff )) / L2_Qhot
diff = np.abs(df_["Qcold"] - df_['cQcold'])
L2_Qcold = math.sqrt(np.dot( df_['Qcold'], df_['Qcold'] ))
error_qcold = math.sqrt(np.dot( diff, diff )) / L2_Qcold
error_T = 0
if select == 0:
error_T = math.sqrt(error_Tin*error_Tin)
if select == 1:
error_T = math.sqrt(error_tsb*error_tsb)
if select == 2:
error_T = math.sqrt(error_Tin*error_Tin + error_tsb*error_tsb)
if select == 3:
error_T = df["cdQ"].mean()
if debug:
print("error_Tin(%s)" % x, error_Tin, error_tsb, error_T, df["cdQ"].mean(), select, ohtc, dT)
tables.append([dT, ohtc, error_Tin, error_tsb, error_T, df["cdQ"].mean()])
del df_['ctsb']
del df_['cTin']
return error_T
def myfunc(x, grad):
if grad.size > 0:
grad[0] = 0.0
grad[1] = 0.0
return error_Tin(x)
opt.set_min_objective(myfunc)
x = opt.optimize(init_vals)
minf = opt.last_optimum_value()
status = opt.last_optimize_result()
print("optimum: x=", x, "obj=", minf, "(code = ", status, ")")
# how to mark line with optimum value in red??
# loop over tables, if line correspond to x[0] then change line to red: a = "\033[1;32m%s\033[0m" %a
# #Color
# R = "\033[0;31;40m" #RED
# G = "\033[0;32;40m" # GREEN
# Y = "\033[0;33;40m" # Yellow
# B = "\033[0;34;40m" # Blue
# N = "\033[0m" # Reset
if status >= 0:
for line in tables:
tmp = 0
for i, unknow in enumerate(unknows):
tmp += int(line[i] == x[i])
if tmp == len(unknows):
for i,item in enumerate(line):
line[i] = "\033[1;32m%s\033[0m" % item
print( "\n", tabulate.tabulate(tables, headers, tablefmt="simple"), "\n")
optval = {}
for i,unknow in enumerate(unknows):
optval[unknow] = x[i]
return (optval, status)
if __name__ == "__main__":
command_line = None
import argparse
parser = argparse.ArgumentParser("Cooling loop Heat Exchanger")
parser.add_argument("input_file", help="input txt file (ex. M10_2020.10.04_20-2009_43_31.txt)")
parser.add_argument("--nhelices", help="specify number of helices", type=int, default=14)
parser.add_argument("--ohtc", help="specify heat exchange coefficient (ex. 4000 W/K/m^2 or None)", type=str, default="None")
parser.add_argument("--dT", help="specify dT for Tout (aka accounting for alim cooling, ex. 0)", type=float, default=0)
parser.add_argument("--site", help="specify a site (ex. M8, M9,...)", type=str)
parser.add_argument("--debit_alim", help="specify flowrate for power cooling - one half only (default: 30 m3/h)", type=float, default=30)
parser.add_argument("--show", help="display graphs (requires X11 server active)", action='store_true')
parser.add_argument("--debug", help="activate debug mode", action='store_true')
# parser.add_argument("--save", help="save graphs to png", action='store_true')
# raw|filter|smooth post-traitement of data
parser.add_argument("--pre", help="select a pre-traitment for data", type=str, choices=['raw','filtered','smoothed'], default='smoothed')
# define params for post traitment of data
parser.add_argument("--pre_params", help="pass param for pre-traitment method", type=str, default='400')
parser.add_argument("--markevery", help="set marker every ... display method", type=int, default='800')
# define subparser: find
subparsers = parser.add_subparsers(title="commands", dest="command", help='sub-command help')
# make the following options dependent to find + nlopt
parser_nlopt = subparsers.add_parser('find', help='findh help') #, parents=[parser])
parser_nlopt.add_argument("--error", help="specify error (0 for hot, 1 for cold, 2 for a mix)", type=int, choices=range(0, 2), default=0)
parser_nlopt.add_argument("--unknows", help="specifiy optim keys (eg h or dTh or dT;h", type=str, default="dT;h")
parser_nlopt.add_argument("--tol", help="specifiy relative tolerances (eg h or dTh or dT;h", type=str, default="1.e-5;1.e-5")
parser_nlopt.add_argument("--abstol", help="specifiy absolute tolerances (eg h or dTh or dT;h", type=str, default="1.e-5;1.e-5")
parser_nlopt.add_argument("--algo", help="specifiy optim algo", type=str, choices=["Direct_L", "Direct", "CRS2", "MLSL"], default="Direct_L")
parser_nlopt.add_argument("--local", help="specifiy optim algo", type=str, choices=["None", "Nelder-Mead", "Cobyla"], default="None")
parser_nlopt.add_argument("--stopval", help="stopping criteria for nlopt", type=float, default=1.e-2)
parser_nlopt.add_argument("--maxeval", help="stopping max eval for nlopt", type=int, default=1000)
#parser_nlopt.set_defaults(func=optim)
args = parser.parse_args(command_line)
tau = 400
if args.pre == 'smoothed':
print("smoothed options")
tau = float(args.pre_params)
threshold = 0.5
twindows = 10
if args.pre == 'filtered':
print("filtered options")
params = args.pre_params.split(';')
threshold = float(params[0])
twindows = int(params[1])
optkeys = []
if args.command == 'find':
print("find options")
optkeys = args.unknows.split(";") # returns a list
# check valid keys
# nlopt_args = parser_nlopt.parse_args()
# smoothed_args = parser_smoothed.parse_args()
print("args: ", args)
# check extension
f_extension = os.path.splitext(args.input_file)[-1]
if f_extension != ".txt":
print("so far only txt file support is implemented")
sys.exit(0)
filename = os.path.basename(args.input_file)
result = filename.startswith("M")
if result:
try:
index = filename.index("_")
args.site = filename[0:index]
print("site detected: %s" % args.site)
except:
print("no site detected - use args.site argument instead")
pass
mrun = python_magnetrun.MagnetRun.fromtxt(args.site, args.input_file)
if not args.site:
args.site = mrun.getSite()
# Adapt filtering and smoothing params to run duration
duration = mrun.getDuration()
if duration <= 10*tau:
tau = min(duration // 10, 10)
print("Modified smoothing param: %g over %g s run" % (tau, duration) )
args.markevery = 8 * tau
# print("type(mrun):", type(mrun))
mrun.getMData().addTime()
start_timestamp = mrun.getMData().getStartDate()
if not "Flow" in mrun.getKeys():
mrun.getMData().addData("Flow", "Flow = Flow1 + Flow2")
if not "Tin" in mrun.getKeys():
mrun.getMData().addData("Tin", "Tin = (Tin1 + Tin2)/2.")
if not "HP" in mrun.getKeys():
mrun.getMData().addData("HP", "HP = (HP1 + HP2)/2.")
if not "Talim" in mrun.getKeys():
# Talim not defined, try to estimate it
print("Talim key not present - set Talim=0")
mrun.getMData().addData("Talim", "Talim = 0")
# extract data
keys = ["t", "teb", "tsb", "debitbrut", "Tout", "Tin", "Flow", "BP", "HP", "Pmagnet"]
units = ["s","C","C","m\u00B3/h","C","C","l/s","bar", "MW"]
# df = mrun.getMData().extractData(keys)
if args.debug:
pd.set_option("display.max_rows", None, "display.max_columns", None)
# TODO: move to magnetdata
max_tap = 0
for i in range(1,args.nhelices+1):
ukey = "Ucoil%d" % i
# print ("Ukey=%s" % ukey, (ukey in keys) )
if ukey in mrun.getKeys():
max_tap=i
if max_tap != args.nhelices and max_tap != args.nhelices//2:
print("Check data: inconsistant U probes and helices")
sys.exit(1)
missing_probes=[]
for i in range(1,max_tap+1):
ukey = "Ucoil%d" % i
if not ukey in mrun.getKeys():
# Add an empty column
# print ("Ukey=%s" % ukey, (ukey in keys) )
mrun.getMData().addData(ukey, "%s = 0" % ukey)
missing_probes.append(i)
if missing_probes:
print("Missing U probes:", missing_probes)
# TODO verify if Ucoil starts at 1 if nhelices < 14
formula = "UH = "
for i in range(args.nhelices+1):
ukey = "Ucoil%d" % i
if ukey in mrun.getKeys():
if i != 1:
formula += " + "
formula += ukey
# print("UH", formula)
if not "UH" in mrun.getKeys():
mrun.getMData().addData("UH", formula)
formula = "UB = Ucoil15 + Ucoil16"
# print("UB", formula)
if not "UB" in mrun.getKeys():
mrun.getMData().addData("UB", formula)
if not "PH" in mrun.getKeys():
mrun.getMData().addData("PH", "PH = UH * IH")
if not "PB" in mrun.getKeys():
mrun.getMData().addData("PB", "PB = UB * IB")
if not "Pt" in mrun.getKeys():
mrun.getMData().addData("Pt", "Pt = (PH + PB)/1.e+6")
# estimate dTH: PH / (rho * Cp * Flow1)
mrun.getMData().addData("dTh", "dTh = PH / (1000 * 4180 * Flow1*1.e-3)")
# estimate dTB: PB / (rho * Cp * Flow2)
mrun.getMData().addData("dTb", "dTb = PB / (1000 * 4180 * Flow2*1.e-3)")
# estimate Tout: ( (Tin1+dTh)*Flow1 + (Tin2+dTb)*Flow2 ) / (Flow1+Flow2)
mrun.getMData().addData(
"cTout", "( (Tin1+dTh)*Flow1 + (Tin2+dTb)*Flow2 ) / (Flow1+Flow2)")
# Geom specs from HX Datasheet
Nc = int((553 - 1)/2.) # (Number of plates -1)/2
Ac = 3.e-3 * 1.174 # Plate spacing * Plate width [m^2]
de = 2 * 3.e-3 # 2*Plate spacing [m]
# coolingparams = [0.207979, 0.640259, 0.397994]
coolingparams = [0.07, 0.8, 0.4]
# Compute OHTC
df = mrun.getData()
df['MeanU_h'] = df.apply(lambda row: ((row.Flow)*1.e-3+args.debit_alim/3600.) / (Ac * Nc), axis=1)
df['MeanU_c'] = df.apply(lambda row: (row.debitbrut/3600.) / ( Ac * Nc), axis=1)
df['Ohtc'] = df.apply(lambda row: w.getOHTC(row.MeanU_h, row.MeanU_c, de, row.BP, row.Tout, row.BP, row.teb, coolingparams), axis=1)
ax = plt.gca()
df.plot(x='t', y='Ohtc', ax=ax, color='red', marker='o', alpha = .5, markevery=args.markevery)
plt.xlabel(r't [s]')
plt.ylabel(r'$W/m^2/K$')
plt.grid(b=True)
plt.title(mrun.getInsert().replace(r"_",r"\_") + ": Heat Exchange Coefficient")
if args.show:
plt.show()
else:
imagefile = args.input_file.replace(".txt", "-ohtc.png")
plt.savefig(imagefile, dpi=300 )
print("save to %s" % imagefile)
plt.close()
pretreatment_keys = ["debitbrut", "Flow", "teb", "Tout", "PH", "PB", "Pt"]
if "TAlimout" in mrun.getKeys():
pretreatment_keys.append("TAlimout")
else:
mrun.getMData().addData("TAlimout", "TAlimout = 0")
# filter spikes
# see: https://ocefpaf.github.io/python4oceanographers/blog/2015/03/16/outlier_detection/
if args.pre == 'filtered':
for key in pretreatment_keys:
mrun = datatools.filterpikes(mrun, key, inplace=True, threshold=threshold, twindows=twindows, debug=args.debug, show=args.show, input_file=args.input_file)
print("Filtered pikes done")
# smooth data Locally Weighted Linear Regression (Loess)
# see: https://xavierbourretsicotte.github.io/loess.html(
if args.pre == 'smoothed':
for key in pretreatment_keys:
mrun = datatools.smooth(mrun, key, inplace=True, tau=tau, debug=args.debug, show=args.show, input_file=args.input_file)
print("smooth data done")
display_T(args.input_file, f_extension, mrun, 'itsb', 'iTin', args.debit_alim, args.ohtc, args.dT, args.show, "-coolingloop.png", args.debug)
display_Q(args.input_file, f_extension, mrun, args.debit_alim, args.ohtc, args.dT, args.show, "-Q.png")
if args.command == 'find':
# Compute Tin, Tsb
df = mrun.getData()
if not "FlowH" in df:
df["FlowH"] = df.apply(lambda row: ((row.Flow)*1.e-3+(2*args.debit_alim)/3600.), axis=1)
if not "Thi" in df:
df['Thi'] = df.apply(lambda row: mixingTemp(row.Flow*1.e-3, row.BP, row.Tout, 2*args.debit_alim/3600., row.BP, row.TAlimout), axis=1)
(opt, status) = find(df, optkeys, args.dT, args.ohtc, 100, 6000, args.algo, args.local, args.maxeval, args.stopval, select=args.error, site=args.site, debit_alim=args.debit_alim, debug=args.debug)
if status < 0:
print("Optimization %s failed with %d error: ", (args.algo, status) )
sys.exit(1)
dT = args.dT
h = args.ohtc
for key in optkeys:
if key == "dT":
dT = opt["dT"]
elif key == "h":
h = opt["h"]
# Get solution for optimum
display_T(args.input_file, f_extension, mrun, 'ctsb', 'cTin', args.debit_alim, h, dT, args.show, "-T-find.png")
display_Q(args.input_file, f_extension, mrun, args.debit_alim, h, dT, args.show, "-Q-find.png")
|
python
|
import itertools
import numpy as np
import pytest
from pyquil import Program
from pyquil.gate_matrices import QUANTUM_GATES as GATES
from pyquil.gates import *
from pyquil.numpy_simulator import targeted_einsum, NumpyWavefunctionSimulator, \
all_bitstrings, targeted_tensordot, _term_expectation
from pyquil.paulis import sZ, sX
from pyquil.pyqvm import PyQVM
from pyquil.reference_simulator import ReferenceWavefunctionSimulator
from pyquil.tests.test_reference_wavefunction_simulator import _generate_random_program, \
_generate_random_pauli
def test_H_einsum():
h_mat = GATES['H']
one_q_wfn = np.zeros((2,), dtype=np.complex128)
one_q_wfn[0] = 1 + 0.j
one_q_wfn = targeted_einsum(gate=h_mat, wf=one_q_wfn, wf_target_inds=[0])
np.testing.assert_allclose(one_q_wfn, 1 / np.sqrt(2) * np.ones(2))
def test_H_tensordot():
h_mat = GATES['H']
one_q_wfn = np.zeros((2,), dtype=np.complex128)
one_q_wfn[0] = 1 + 0.j
one_q_wfn = targeted_tensordot(gate=h_mat, wf=one_q_wfn, wf_target_inds=[0])
np.testing.assert_allclose(one_q_wfn, 1 / np.sqrt(2) * np.ones(2))
def test_wfn_ordering_einsum():
h_mat = GATES['H']
two_q_wfn = np.zeros((2, 2), dtype=np.complex128)
two_q_wfn[0, 0] = 1 + 0.j
two_q_wfn = targeted_einsum(gate=h_mat, wf=two_q_wfn, wf_target_inds=[0])
np.testing.assert_allclose(two_q_wfn[:, 0], 1 / np.sqrt(2) * np.ones(2))
def test_wfn_ordering_tensordot():
h_mat = GATES['H']
two_q_wfn = np.zeros((2, 2), dtype=np.complex128)
two_q_wfn[0, 0] = 1 + 0.j
two_q_wfn = targeted_tensordot(gate=h_mat, wf=two_q_wfn, wf_target_inds=[0])
np.testing.assert_allclose(two_q_wfn[:, 0], 1 / np.sqrt(2) * np.ones(2))
def test_einsum_simulator_H():
prog = Program(H(0))
qam = PyQVM(n_qubits=1, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(prog)
wf = qam.wf_simulator.wf
np.testing.assert_allclose(wf, 1 / np.sqrt(2) * np.ones(2))
def test_einsum_simulator_1():
prog = Program(H(0), CNOT(0, 1))
qam = PyQVM(n_qubits=2, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(prog)
wf = qam.wf_simulator.wf
np.testing.assert_allclose(wf, 1 / np.sqrt(2) * np.reshape([1, 0, 0, 1], (2, 2)))
def test_einsum_simulator_CNOT():
prog = Program(X(0), CNOT(0, 1))
qam = PyQVM(n_qubits=2, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(prog)
wf = qam.wf_simulator.wf
np.testing.assert_allclose(wf, np.reshape([0, 0, 0, 1], (2, 2)))
def test_einsum_simulator_CCNOT():
prog = Program(X(2), X(0), CCNOT(2, 1, 0))
qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(prog)
wf = qam.wf_simulator.wf
should_be = np.zeros((2, 2, 2))
should_be[1, 0, 1] = 1
np.testing.assert_allclose(wf, should_be)
def test_einsum_simulator_10q():
prog = Program(H(0))
for i in range(10 - 1):
prog += CNOT(i, i + 1)
qam = PyQVM(n_qubits=10, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(prog)
wf = qam.wf_simulator.wf
should_be = np.zeros((2,) * 10)
should_be[0, 0, 0, 0, 0, 0, 0, 0, 0, 0] = 1 / np.sqrt(2)
should_be[1, 1, 1, 1, 1, 1, 1, 1, 1, 1] = 1 / np.sqrt(2)
np.testing.assert_allclose(wf, should_be)
def test_measure():
qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator)
qam.execute(Program(
H(0),
CNOT(0, 1),
MEASURE(0, 63)
))
measured_bit = qam.ram['ro'][-1]
should_be = np.zeros((2, 2, 2))
if measured_bit == 1:
should_be[1, 1, 0] = 1
else:
should_be[0, 0, 0] = 1
np.testing.assert_allclose(qam.wf_simulator.wf, should_be)
@pytest.fixture(params=list(range(3, 5)))
def n_qubits(request):
return request.param
@pytest.fixture(params=[2, 50, 100])
def prog_length(request):
return request.param
@pytest.fixture(params=[True, False])
def include_measures(request):
return request.param
def test_vs_ref_simulator(n_qubits, prog_length, include_measures):
if include_measures:
seed = 52
else:
seed = None
for _ in range(10):
prog = _generate_random_program(n_qubits=n_qubits, length=prog_length,
include_measures=include_measures)
ref_qam = PyQVM(n_qubits=n_qubits, seed=seed,
quantum_simulator_type=ReferenceWavefunctionSimulator)
ref_qam.execute(prog)
ref_wf = ref_qam.wf_simulator.wf
es_qam = PyQVM(n_qubits=n_qubits, seed=seed,
quantum_simulator_type=NumpyWavefunctionSimulator)
es_qam.execute(prog)
es_wf = es_qam.wf_simulator.wf
# einsum has its wavefunction as a vector of shape (2, 2, 2, ...) where qubits are indexed
# from left to right. We transpose then flatten.
es_wf = es_wf.transpose().reshape(-1)
np.testing.assert_allclose(ref_wf, es_wf, atol=1e-15)
def test_all_bitstrings():
for n_bits in range(2, 10):
bitstrings_ref = np.array(list(itertools.product((0, 1), repeat=n_bits)))
bitstrings_new = all_bitstrings(n_bits)
np.testing.assert_array_equal(bitstrings_ref, bitstrings_new)
def test_sample_bitstrings():
prog = Program(H(0), H(1))
qam = PyQVM(n_qubits=3, quantum_simulator_type=NumpyWavefunctionSimulator, seed=52)
qam.execute(prog)
bitstrings = qam.wf_simulator.sample_bitstrings(10000)
assert bitstrings.shape == (10000, 3)
np.testing.assert_allclose([0.5, 0.5, 0], np.mean(bitstrings, axis=0), rtol=1e-2)
def test_expectation_helper():
n_qubits = 3
wf = np.zeros(shape=((2,) * n_qubits), dtype=np.complex)
wf[0, 0, 0] = 1
z0 = _term_expectation(wf, 0.4 * sZ(0))
assert z0 == 0.4
x0 = _term_expectation(wf, sX(2))
assert x0 == 0
def test_expectation():
wfn = NumpyWavefunctionSimulator(n_qubits=3)
val = wfn.expectation(0.4 * sZ(0) + sX(2))
assert val == 0.4
def test_expectation_vs_ref_qvm(qvm, n_qubits):
for repeat_i in range(20):
prog = _generate_random_program(n_qubits=n_qubits, length=10)
operator = _generate_random_pauli(n_qubits=n_qubits, n_terms=5)
print(prog)
print(operator)
ref_wf = ReferenceWavefunctionSimulator(n_qubits=n_qubits).do_program(prog)
ref_exp = ref_wf.expectation(operator=operator)
np_wf = NumpyWavefunctionSimulator(n_qubits=n_qubits).do_program(prog)
np_exp = np_wf.expectation(operator=operator)
np.testing.assert_allclose(ref_exp, np_exp, atol=1e-15)
# The following tests are lovingly copied with light modification from the Cirq project
# https://github.com/quantumlib/Cirq
#
# With the original copyright disclaimer:
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def kron(*matrices: np.ndarray) -> np.ndarray:
"""Computes the kronecker product of a sequence of matrices.
A *args version of lambda args: functools.reduce(np.kron, args).
Args:
*matrices: The matrices and controls to combine with the kronecker
product.
Returns:
The resulting matrix.
"""
product = np.eye(1)
for m in matrices:
product = np.kron(product, m)
return np.array(product)
def test_einsum_matches_kron_then_dot():
t = np.array([1, 2, 3, 4, 5, 6, 7, 8])
m = np.array([[2, 3], [5, 7]])
i = np.eye(2)
np.testing.assert_allclose(
targeted_einsum(gate=m,
wf=t.reshape((2, 2, 2)),
wf_target_inds=[0]),
np.dot(kron(m, i, i), t).reshape((2, 2, 2)),
atol=1e-8)
np.testing.assert_allclose(
targeted_einsum(gate=m,
wf=t.reshape((2, 2, 2)),
wf_target_inds=[1]),
np.dot(kron(i, m, i), t).reshape((2, 2, 2)),
atol=1e-8)
np.testing.assert_allclose(
targeted_einsum(gate=m,
wf=t.reshape((2, 2, 2)),
wf_target_inds=[2]),
np.dot(kron(i, i, m), t).reshape((2, 2, 2)),
atol=1e-8)
def test_tensordot_matches_kron_then_dot():
t = np.array([1, 2, 3, 4, 5, 6, 7, 8])
m = np.array([[2, 3], [5, 7]])
i = np.eye(2)
np.testing.assert_allclose(
targeted_tensordot(m,
t.reshape((2, 2, 2)),
[0]),
np.dot(kron(m, i, i), t).reshape((2, 2, 2)),
atol=1e-8)
np.testing.assert_allclose(
targeted_tensordot(m,
t.reshape((2, 2, 2)),
[1]),
np.dot(kron(i, m, i), t).reshape((2, 2, 2)),
atol=1e-8)
np.testing.assert_allclose(
targeted_tensordot(m,
t.reshape((2, 2, 2)),
[2]),
np.dot(kron(i, i, m), t).reshape((2, 2, 2)),
atol=1e-8)
def test_einsum_reorders_matrices():
t = np.eye(4).reshape((2, 2, 2, 2))
m = np.array([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 0, 1,
0, 0, 1, 0,
]).reshape((2, 2, 2, 2))
np.testing.assert_allclose(
targeted_einsum(gate=m,
wf=t,
wf_target_inds=[0, 1]),
m,
atol=1e-8)
np.testing.assert_allclose(
targeted_einsum(gate=m,
wf=t,
wf_target_inds=[1, 0]),
np.array([
1, 0, 0, 0,
0, 0, 0, 1,
0, 0, 1, 0,
0, 1, 0, 0,
]).reshape((2, 2, 2, 2)),
atol=1e-8)
def test_tensordot_reorders_matrices():
t = np.eye(4).reshape((2, 2, 2, 2))
m = np.array([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 0, 1,
0, 0, 1, 0,
]).reshape((2, 2, 2, 2))
np.testing.assert_allclose(
targeted_tensordot(gate=m,
wf=t,
wf_target_inds=[0, 1]),
m,
atol=1e-8)
np.testing.assert_allclose(
targeted_tensordot(gate=m,
wf=t,
wf_target_inds=[1, 0]),
np.array([
1, 0, 0, 0,
0, 0, 0, 1,
0, 0, 1, 0,
0, 1, 0, 0,
]).reshape((2, 2, 2, 2)),
atol=1e-8)
|
python
|
from future.utils import iteritems
from pandaharvester.harvestercore.plugin_base import PluginBase
from pandaharvester.harvestermover import mover_utils
# preparator plugin with RSE + no data motion
class RseDirectPreparator(PluginBase):
"""The workflow for RseDirectPreparator is as follows. First panda makes a rule to
transfer files to an RSE which is associated to the resource. Once files are transferred
to the RSE, job status is changed to activated from assigned. Then Harvester fetches
the job and constructs input file paths that point to pfns in the storage. This means
that the job directly read input files from the storage.
"""
# constructor
def __init__(self, **kwarg):
PluginBase.__init__(self, **kwarg)
# check status
def check_stage_in_status(self, jobspec):
return True, ''
# trigger preparation
def trigger_preparation(self, jobspec):
return True, ''
# resolve input file paths
def resolve_input_paths(self, jobspec):
# get input files
inFiles = jobspec.get_input_file_attributes()
# set path to each file
for inLFN, inFile in iteritems(inFiles):
inFile['path'] = mover_utils.construct_file_path(self.basePath, inFile['scope'], inLFN)
# set
jobspec.set_input_file_paths(inFiles)
return True, ''
|
python
|
'''
Created on Jun 6, 2012
@author: kristof
'''
import time
import datetime
import general_settings
from twython import Twython
from klout import KloutInfluence
import tweeql.extras.sentiment
import tweeql.extras.sentiment.analysis
from pkg_resources import resource_filename
from dateutil import parser
import itertools
from pygeocoder import Geocoder
import language
import urllib
from collections import defaultdict
import ordereddict
import gzip
import math
import re
import os
import pickle
import sys
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.anchored_artists import AnchoredText
from nltk import word_tokenize, sent_tokenize, corpus
DEBUG = True
MAIN_KEYWORD = 'koffie'
COMPETITOR1_KEYWORD = 'koffieapparaat'
COMPETITOR2_KEYWORD = ''
MAIN_ENTERPRISE = 'PhilipsNL'
MAIN_LOCATION = 'Amsterdam'
MAIN_LANGUAGE = 'nl'
MAIN_COUNTRY = 'The Netherlands'
MAIN_SCREEN_NAME_LIST = ['PhilipsNL', 'PhilipsCare_NL']
MAIL_TO_LIST = ['[email protected]']
SEARCH_PAGES = 10
SEARCH_RPP = 5000
#REPORT1
import report
report = report.Report()
main_data = []
competitor1_data = []
competitor2_data = []
tweet_list = []
tweet_list2 = []
tweet_list3 = []
#init sentiment analysis
classifier = None
classinfo = None
analysis = tweeql.extras.sentiment.analysis
fname = resource_filename(tweeql.extras.sentiment.__name__, 'sentiment.pkl.gz')
fp = gzip.open(fname)
classifier_dict = pickle.load(fp)
fp.close()
classifier = classifier_dict['classifier']
classinfo = { classifier_dict['pos_label'] :
{ 'cutoff': classifier_dict['pos_cutoff'],
'value' : 1.0/classifier_dict['pos_recall'] },
classifier_dict['neg_label'] :
{ 'cutoff': classifier_dict['neg_cutoff'],
'value': -1.0/classifier_dict['neg_recall'] }
}
def sentiment(text):
global classinfo
words = analysis.words_in_tweet(text)
features = analysis.word_feats(words)
dist = classifier.prob_classify(features)
retval = 0
maxlabel = dist.max()
classinf = classinfo[maxlabel]
if dist.prob(maxlabel) > classinf['cutoff']:
retval = classinf['value']
return retval
# search keywords
twitter = Twython(app_key=general_settings.CONSUMER_KEY, app_secret=general_settings.CONSUMER_SECRET, oauth_token=general_settings.ACCESS_TOKEN, oauth_token_secret=general_settings.ACCESS_SECRET)
for i in (map(lambda x : x+1, range(SEARCH_PAGES))):
try:
print "Searching tweets page %i" % i
# TODO: country language
search_results = twitter.search(q=MAIN_KEYWORD, page=i, rpp=SEARCH_RPP)
except:
pass
print "Indexing tweets page %i" % i
for tweet in search_results["results"]:
print tweet
tweet_data = {}
print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at'])
#print tweet['text'].encode('utf-8'),"\n"
tweet_data['text'] = tweet['text']#.encode('utf-8')
tweet_data['username'] = tweet['from_user']
tweet_data['created_at'] = tweet['created_at']
#===================================================================
# klout = KloutInfluence(tweet['from_user'].encode('utf-8'))
# try:
# tweet_data['influence'] = klout.score()
# tweet_data['influences'] = klout.influences()
# tweet_data['influence_topics'] = klout.topics()
# except:
# tweet_data['influence'] = 0
# tweet_data['influence_topics'] = {}
#===================================================================
tweet_data['influence'] = 0
tweet_data['sentiment'] = sentiment(tweet['text'])
tweet_data['ws'] = 0
tweet_data['hour_string'] = "00:00"
#geo
if tweet['geo']:
print tweet['geo']
tweet_data['geo'] = tweet['geo']
results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1])
tweet_data['country'] = results[0].country
tweet_data['city'] = results[0].locality
tweet_data['postalcode'] = results[0].postal_code
print results[0]
else:
tweet_data['geo'] = None
tweet_data['country'] = None
#gender
#avatar
tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https'])
#number, save and use
#language
#ld = language.LangDetect()
#tweet_data['lang'] = ld.detect(tweet_data['text'])
tweet_data['lang'] = tweet['iso_language_code']
print tweet_data['lang']
#filter out retweets
if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list):
main_data.append(tweet_data)
if tweet_data['text'] not in tweet_list:
tweet_list.append(tweet_data['text'])
main_data = sorted(main_data, key=lambda k: k['created_at'])
report.spike_keyword = MAIN_KEYWORD
report.spike_location = MAIN_LOCATION
for i in (map(lambda x : x+1, range(SEARCH_PAGES))):
try:
print "Searching tweets page %i" % i
# TODO: country language
search_results = twitter.search(q=COMPETITOR1_KEYWORD, page=i, rpp=SEARCH_RPP)
except:
pass
print "Indexing tweets page %i" % i
for tweet in search_results["results"]:
print tweet
tweet_data = {}
print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at'])
#print tweet['text'].encode('utf-8'),"\n"
tweet_data['text'] = tweet['text'].encode('utf-8')
tweet_data['username'] = tweet['from_user']
tweet_data['created_at'] = tweet['created_at']
#===================================================================
# klout = KloutInfluence(tweet['from_user'].encode('utf-8'))
# try:
# tweet_data['influence'] = klout.score()
# tweet_data['influences'] = klout.influences()
# tweet_data['influence_topics'] = klout.topics()
# except:
# tweet_data['influence'] = 0
# tweet_data['influence_topics'] = {}
#===================================================================
tweet_data['influence'] = 0
tweet_data['sentiment'] = sentiment(tweet['text'])
tweet_data['ws'] = 0
tweet_data['hour_string'] = "00:00"
#geo
if tweet['geo']:
print tweet['geo']
tweet_data['geo'] = tweet['geo']
results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1])
tweet_data['country'] = results[0].country
tweet_data['city'] = results[0].locality
tweet_data['postalcode'] = results[0].postal_code
print results[0]
else:
tweet_data['geo'] = None
tweet_data['country'] = None
#gender
#avatar
tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https'])
#language
#ld = language.LangDetect()
#tweet_data['lang'] = ld.detect(tweet_data['text'])
tweet_data['lang'] = tweet['iso_language_code']
print tweet_data['lang']
#filter out retweets
if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list2):
competitor1_data.append(tweet_data)
if tweet_data['text'] not in tweet_list2:
tweet_list2.append(tweet_data['text'])
competitor1_data = sorted(competitor1_data, key=lambda k: k['created_at'])
for i in (map(lambda x : x+1, range(SEARCH_PAGES))):
try:
print "Searching tweets page %i" % i
# TODO: country language
search_results = twitter.search(q=COMPETITOR2_KEYWORD, page=i, rpp=SEARCH_RPP)
except:
pass
print "Indexing tweets page %i" % i
for tweet in search_results["results"]:
print tweet
tweet_data = {}
print "Tweet from @%s Date: %s" % (tweet['from_user'].encode('utf-8'),tweet['created_at'])
#print tweet['text'].encode('utf-8'),"\n"
tweet_data['text'] = tweet['text'].encode('utf-8')
tweet_data['username'] = tweet['from_user']
tweet_data['created_at'] = tweet['created_at']
#===================================================================
# klout = KloutInfluence(tweet['from_user'].encode('utf-8'))
# try:
# tweet_data['influence'] = klout.score()
# tweet_data['influences'] = klout.influences()
# tweet_data['influence_topics'] = klout.topics()
# except:
# tweet_data['influence'] = 0
# tweet_data['influence_topics'] = {}
#===================================================================
tweet_data['influence'] = 0
tweet_data['sentiment'] = sentiment(tweet['text'])
tweet_data['ws'] = 0
tweet_data['hour_string'] = "00:00"
#geo
if tweet['geo']:
print tweet['geo']
tweet_data['geo'] = tweet['geo']
results = Geocoder.reverse_geocode(tweet_data['geo']['coordinates'][0], tweet_data['geo']['coordinates'][1])
tweet_data['country'] = results[0].country
tweet_data['city'] = results[0].locality
tweet_data['postalcode'] = results[0].postal_code
#print results[0]
else:
tweet_data['geo'] = None
tweet_data['country'] = None
#gender
#avatar
tweet_data['avatar'] = urllib.urlretrieve(tweet['profile_image_url_https'])
#language
#ld = language.LangDetect()
#tweet_data['lang'] = ld.detect(tweet_data['text'])
tweet_data['lang'] = tweet['iso_language_code']
print tweet_data['lang']
#filter out retweets
if (MAIN_COUNTRY == tweet_data['country']) or (tweet_data['lang'] == MAIN_LANGUAGE) and (tweet_data['username'] not in MAIN_SCREEN_NAME_LIST) and (tweet_data['text'] not in tweet_list3):
competitor1_data.append(tweet_data)
if tweet_data['text'] not in tweet_list3:
tweet_list3.append(tweet_data['text'])
competitor2_data = sorted(competitor2_data, key=lambda k: k['created_at'])
print "Calculating cumulative volumes... comp2"
x= []
y = []
volume = -1
for tweet_data in competitor2_data:
d = parser.parse(tweet_data['created_at']).hour #daily or hourly
tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute)
if not d in x:
if volume != -1:
y.append(volume)
volume = 0
x.append(d)
volume += 1
y.append(volume)
print x
print y
volumegraph3 = tuple(y)
print "Calculating cumulative volumes... comp1"
x= []
y = []
volume = -1
for tweet_data in competitor1_data:
d = parser.parse(tweet_data['created_at']).hour #daily or hourly
tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute)
if not d in x:
if volume != -1:
y.append(volume)
volume = 0
x.append(d)
volume += 1
y.append(volume)
print x
print y
volumegraph2 = tuple(y)
print "Calculating cumulative volumes..."
x= []
y = []
volume = -1
for tweet_data in main_data:
d = parser.parse(tweet_data['created_at']).hour #daily or hourly
tweet_data['hour_string'] = str(parser.parse(tweet_data['created_at']).hour) + ":" + str(parser.parse(tweet_data['created_at']).minute)
if not d in x:
if volume != -1:
y.append(volume)
volume = 0
x.append(d)
volume += 1
y.append(volume)
print x
print y
volumegraph1 = tuple(y)
report.volumekeywords = [MAIN_KEYWORD, COMPETITOR1_KEYWORD, COMPETITOR2_KEYWORD]
report.volumebegintime = str(parser.parse(main_data[0]['created_at']).hour) + ":" + str(parser.parse(main_data[0]['created_at']).minute)
report.volumeendtime = str(parser.parse(main_data[-1]['created_at']).hour) + ":" + str(parser.parse(main_data[-1]['created_at']).minute)
report.volumegraphs = [volumegraph1, volumegraph2, volumegraph3]
print "Calculating the freq times..."
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
times = [item['created_at'] for item in main_data]
sum_deltas = 0
count_deltas = 1
for (t0, t1) in pairwise(times):
sum_deltas += (parser.parse(t1) - parser.parse(t0)).seconds #seconds, minutes, hours
#print t0, t1, (sum_deltas) / count_deltas
count_deltas += 1
delta_time = (sum_deltas) / count_deltas
print(delta_time) #minutes or seconds ?
report.freq_time = delta_time
print "Calculating the delta's of Volume..."
comb_list = itertools.combinations(y, 2)
max_volume_delta = 0
max_volume_sign = 1
max_volume_s0 = 1
max_volume_s1 = 0
for comb in comb_list:
delta = abs(comb[1] - comb[0])
if delta:
sign = (comb[1] - comb[0]) / abs(comb[1] - comb[0])
else:
sign = 1
if delta > max_volume_delta:
max_volume_delta = delta
max_volume_sign = sign
if (comb[0] < comb[1]):
max_volume_s0 = comb[0]
max_volume_s1 = comb[1]
else:
max_volume_s0 = comb[1]
max_volume_s1 = comb[0]
max_volume_percentage = (max_volume_delta / max_volume_s0) * 100
print max_volume_s0, max_volume_s1
print "Creating sentiment plot..."
x= []
y = []
sentiment = -100
counter = 0
for tweet_data in main_data:
d = parser.parse(tweet_data['created_at']).hour
if not d in x:
if sentiment > -100:
y.append((sentiment/counter))
sentiment = 0
counter = 0
x.append(d)
sentiment += tweet_data['sentiment']
counter += 1
y.append(sentiment/counter)
print x
print y
report.sentimentgraph = tuple(y)
print "Calculating the delta's of sentiment..."
comb_list = itertools.combinations(y, 2)
max_sentiment_delta = 0
max_sentiment_sign = 1
max_sentiment_s0 = 1
max_sentiment_s1 = 0
for comb in comb_list:
delta = abs(comb[1] - comb[0])
if delta:
sign = (comb[1] - comb[0]) / abs(comb[1] - comb[0])
else:
sign= 1
if delta > max_sentiment_delta:
max_sentiment_delta = delta
max_sentiment_sign = sign
if comb[0] < comb[1]:
max_sentiment_s0 = comb[0]
max_sentiment_s1 = comb[1]
else:
max_sentiment_s0 = comb[1]
max_sentiment_s1 = comb[0]
max_sentiment_percentage = (max_sentiment_delta / max_sentiment_s0) * 100
print max_sentiment_s0, max_sentiment_s1
if max_volume_percentage > max_sentiment_percentage:
report.spike_percentage = max_volume_sign * max_volume_percentage
else:
report.spike_percentage = max_sentiment_sign * max_sentiment_percentage
report.mentions_percentage = max_volume_percentage
report.sentiment_percentage = max_sentiment_percentage
'''years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
days = mdates.DayLocator()
hours = mdates.HourLocator(interval=2)
fmt = mdates.DateFormatter('%d %b %Y')
fig = plt.figure()
ax = fig.add_subplot(111)
# format the ticks
ax.xaxis.set_major_locator(days)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_minor_locator(hours)
datemin = min(x)
datemax = max(x)
ax.set_xlim(datemin, datemax)
ax.set_ylim(0, max(y))
ax.format_xdata = mdates.DateFormatter('%a, %d %b %Y %H:%M:%S %z')
ax.format_ydata = '$%1.2f'
ax.grid(True)
ax.plot(x, y)
'''
a = np.diff(np.sign(np.diff(y))).nonzero()[0] + 1 # local min+max
b = (np.diff(np.sign(np.diff(y))) > 0).nonzero()[0] + 1 # local min
c = (np.diff(np.sign(np.diff(y))) < 0).nonzero()[0] + 1 # local max
xmins = [x[i] for i in b]
ymins = [y[i] for i in b]
xmaxs = [x[i] for i in c]
ymaxs = [y[i] for i in c]
print xmins
print ymins
print xmaxs
print ymaxs
report.optima = zip(xmins, ymins)
report.optima.extend(zip(xmaxs, xmins))
'''
if b.any():
ax.plot(xmins, ymins, "o", label="min")
if c.any():
ax.plot(xmaxs, ymaxs, "o", label="max")
plt.legend()
'''
'''# rotates and right aligns the x labels, and moves the bottom of the
# axes up to make room for them
fig.autofmt_xdate()'''
#plt.show()
print "Calculating weighted scores..."
for xmin, xmax in map(None, xmins, xmaxs):
for tweet_data in main_data:
if parser.parse(tweet_data['created_at']).hour == xmax:
tweet_data['ws'] = 30 * tweet_data['sentiment'] + 1 * tweet_data['influence'] + 1000 * (xmaxs.index(xmax) + 1)
if parser.parse(tweet_data['created_at']).hour == xmin:
tweet_data['ws'] = -30 * tweet_data['sentiment'] - 1 * tweet_data['influence'] - 1000 * (xmins.index(xmin) + 1)
conversationlist = []
#TODO: generalize for more clusters
# calculate top 5 of ws in different maxima regions
print "Creating clusters of local optima..."
cluster1 = []
cluster2 = []
cluster3 = []
cluster4 = []
for tweet_data in main_data:
ws = tweet_data['ws']
#todo: check for more clusters?
if ws > 1999:
cluster1.append(tweet_data)
if ws > 999 and ws < 1190:
cluster2.append(tweet_data)
if ws < -1001 and ws > -1191:
cluster3.append(tweet_data)
if ws < -1189:
cluster4.append(tweet_data)
print "Sort clusters..."
#todo: check is reverse or not
sorted_cluster1 = sorted(cluster1, key=lambda k: k['ws'], reverse=True)
sorted_cluster2 = sorted(cluster2, key=lambda k: k['ws'], reverse=True)
sorted_cluster3 = sorted(cluster3, key=lambda k: k['ws'])
sorted_cluster4 = sorted(cluster4, key=lambda k: k['ws'])
print sorted_cluster1
print sorted_cluster2
print sorted_cluster3
print sorted_cluster4
#todo get conversation! get original tweet id
print "Creating conversation list..."
conversationlist.extend(sorted_cluster1[:3])
conversationlist.extend(sorted_cluster2[:3])
conversationlist.extend(sorted_cluster3[:3])
conversationlist.extend(sorted_cluster4[:3])
conversations = sorted(conversationlist, key=lambda k: k['created_at'])
for conv in conversations:
print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence'])
report.conversationlist = conversations
sorted_sentiment = sorted(main_data, key=lambda k: k['sentiment'])
sorted_negative = sorted_sentiment[:5]
sorted_positive = sorted_sentiment[-6:-1]
report.top5positive = sorted_positive
report.top5negative = sorted_negative
print "Top 5 Positive:"
for conv in sorted_positive:
print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence'])
print "Top 5 Negative:"
for conv in sorted_negative:
print "%s (%s): %s (sent: %f) (klout: %f)" % (conv['username'], conv['created_at'], conv['text'], conv['sentiment'], conv['influence'])
word_cloud = {}
key_infl = {}
word_sent = {}
word_klout = {}
c = 0
#word cloud
#TODO stop words and stem
#TODO calculate KLOUT, partnership with KLOUT ???
for tweet in main_data:
#for word in word_tokenize(tweet['text']):
for word in tweet['text'].split():
word = word.lower()
if len(word) > 5 and word not in corpus.stopwords.words('dutch') and word[0] != '@' and re.match("^[A-Za-z0-9_-]*(\#)*[A-Za-z0-9_-]*$", word):
print word
if word_cloud.has_key(word):
word_cloud[word] += tweet['sentiment']
else:
word_cloud[word] = tweet['sentiment']
key_infl[word] = tweet['username']
if word_sent.has_key(word):
word_sent[word].append(tweet['sentiment'])
else:
word_sent[word] = list()
word_sent[word].append(tweet['sentiment'])
if not word_klout.has_key(word):
try:
klout = KloutInfluence(tweet['username'].encode('utf-8'))
word_klout[word] = klout.score()
except:
word_klout[word] = -1
c += 1
if DEBUG:
if c > 100:
break
report.word_cloud = sorted(word_cloud.items(), key=lambda k:k[1], reverse=True)
report.key_infl = key_infl
report.word_sent = word_sent
report.word_klout = sorted(word_klout.items(), key=lambda k:k[1], reverse = True)
report.create(MAIN_ENTERPRISE)
|
python
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../')))
from src.Utils.Point import Point
from src.Utils.Vector import Vector
from src.Utils.LinearEquation import LinearEquation
import math
"""
This module regroups a lot of class definition that are basic encapsulations
of other concepts. Found here so far :
- Player (Abstract definition of Defender and Opponent)
- Defender
- Opponent
- Shot
- Goal
"""
class Player:
"""
This class represents any player on the field. It is abstract (eventhough it doesn't
mean much in Python) and therefore has no reason to be instantiated.
"""
def __init__(self, pos, radius):
"""
Constructs a new 'Player' object.
:param pos: The initial position of the player.
:param radius: The radius of the player.
:return: returns nothing.
"""
self.pos = pos
self.radius = radius
def __str__(self):
"""
Allows the use of print(p) where p is a 'Player' object.
:return: The corresponding string.
"""
res = "Pos: " + str(self.pos) + " Radius: " + str(self.radius)
return res
def collision(self, player, distance=None):
"""
Checks if this player and the given one collide. It can also be used
to check if they are a certain distance apart with the optional parameter.
:param player: The other player to check collisions with.
:param distance (opt): The distance to have between the two robots to not have \
a collision.
:return: True if there is a collision, False otherwise.
"""
if distance == None:
distance = 2 * self.radius
return self.pos.distance(player.pos) < distance
def in_zone(self, bottom_left, top_right):
"""
Checks if this player is in a rectangular zone defined by its bottom left point
and top right point.
:param bottom_left: The bottom left point of the zone.
:param top_right: The top right point of the zone.
:return: True if the player is in the zone, False otherwise.
"""
return (bottom_left.x <= self.pos.x and self.pos.x <= bottom_left.x and
top_right.y <= self.pos.y and self.pos.y <= top_right.y)
class Defender(Player):
"""
This class represents a defender on the field. This is basically renaming what a player is
which is about renaming what a point is. Although, in this case a radius needs to be specified.
"""
def __init__(self, pos, radius):
"""
Constructs a new 'Defender' object.
:param pos: The initial position of the defender.
:param radius: The radius of the defender.
:return: returns nothing.
"""
super().__init__(pos, radius)
def is_valid_pos(self, pos_step):
"""
Check if the position of this player is valid regarding the given
step between two positions.
:param pos_step: The distance between two positions next to each other \
in all four cardinal directions.
:return: True if the position is valid, False otherwise.
"""
return not (self.pos.x % pos_step or self.pos.y % pos_step)
class Opponent(Player):
"""
This class represents an opponent on the field. This is basically renaming what a player is
which is about renaming what a point is. Although, in this case a radius doesn't need to be specified.
"""
def __init__(self, pos, radius=0):
"""
Constructs a new 'Opponent' object.
:param pos: The initial position of the opponent.
:param radius (opt): The radius of the opponent.
:return: returns nothing.
"""
super().__init__(pos, radius)
class Shot:
"""
This class represents what a shot is, which is an opponent and an angle.
"""
def __init__(self, opponent, angle):
"""
Constructs a new 'Shot' object.
:param opponent: The opponent that is taking the shot.
:param angle: The angle at which the opponent is shooting, with regard to the
origin of the field (in the center).
:return: returns nothing.
"""
self.opponent = opponent
self.angle = angle
def __str__(self):
"""
Allows the use of print(s) where s is a 'Shot' object.
:return: The corresponding string.
"""
res = "Opponent: " + str(self.opponent) + " Angle: " + str(self.angle)
return res
def is_valid_angle(self, theta_step):
"""
Check if the angle of this shot is valid regarding the given
step between two angles.
:param theta_step: The angle between two consecutive angles.
:return: True if the angle is valid, False otherwise.
"""
return not (self.angle % theta_step)
class Goal:
"""
This class represents a Goal. A goal is a defined by two points (to form a segment)
and a vector that defines the orientation of the goal (where you can score from).
"""
def __init__(self, start_pos, end_pos, direction):
"""
Creates a new 'Goal' object.
:param start_pos: The starting point of the segment.
:param end_pos: The ending point of the segment.
:param direction: The orientation of the goal.
:return: returns nothing.
"""
self.s_pos = start_pos
self.e_pos = end_pos
self.dir = direction
def __str__(self):
"""
Allows the use of print(g) where g is a 'Goal' object.
:return: The corresponding string.
"""
res = "Pos 1: " + str(self.s_pos) + " Pos 2: " + str(self.e_pos) + " Dir: " + str(self.dir)
return res
def is_in_interval(self, low, high, value):
"""
Check if the given value in in the interval [low ; high].
Useful method to make the code easier to read. It is not specific
to this class and could be used in different classes but for now
it will remain here.
:param low: Low bound of the interval.
:param high: High bound of the interval.
:param value: The value to check.
:return: True if value is in the interval, false otherwise.
"""
return low <= value and value <= high
def check_position(self, player):
"""
Checks if the given player is correctly placed with regard to the orientation
of the goal. If the player is 'behind' the goal, then it is not correctly placed,
if it is in front of the goal, then it is correctly placed.
This is done by checking the angle formed between the direction vector of the goal and
the vector going from the center of the goal to the player. This angle must be in
[-pi/2 ; pi/2] if the player is correctly placed (draw it yourself or check out paper
about this problem for more information).
:param player: The player to consider.
:return: True if the player is correctly placed, False otherwise.
"""
# Mid point of the segment defining the goal
mid = Point.mid_point(self.s_pos, self.e_pos)
# Transposition of this point by the direction vector of the goal
# to get the direction vector with its origin in the center of the goal
mid_prime = self.dir + mid
# Creating both needed vectors
v1 = Vector.v_from_pp(mid, player.pos)
v2 = Vector.v_from_pp(mid, mid_prime)
# Getting the angle and checking if it is a valid one
angle = v1.angle(v2)
return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)
def check_shot_direction(self, shot):
"""
Checks if the given shot goes towards this goal. To do so,
simply consider that whether the shot is valid or not, for it to be
going towards the goal, it needs to go towards the half-plane define by the goal's segment
(well, goal's line in this case, it is considered infinite here). For more information,
check our paper on this subject or try drawing it yourself.
To know if this is the case, the scalar product of the vector of the shot and the
direction of the goal is checked. There are supposed to be going in opposite direction,
therefore the scalar product must be negative.
:param shot: The shot to consider.
:return: True if the shot goes towards the goal (if it was infinite), False otherwise.
"""
return Vector.v_from_a(shot.angle) * self.dir < 0
def check_shot_on_target(self, shot):
"""
Checks if the shot (abstracted to an infinite line) intersects the goal's
segment.
To do so,find the intersection point between the shot corresponding linear equation
and the goal's segment corresponding linear equation. Then check if this point is
in the goal's segment.
:param shot: The shot to consider.
:return: True if the shot intersects the goal's segment, False otherwise.
"""
# Defining a few variables to ease the reading
# Here we define the x and y interval of the goal's segment
x_min = min(self.s_pos.x, self.e_pos.x)
x_max = max(self.s_pos.x, self.e_pos.x)
y_min = min(self.s_pos.y, self.e_pos.y)
y_max = max(self.s_pos.y, self.e_pos.y)
# Shortening variables names
o_x = shot.opponent.pos.x
o_y = shot.opponent.pos.y
# If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined
# In these cases, the shot is vertical, therefore it is valid
# iff the x coordinate of the opponent is in the goal's x interval
if abs(shot.angle) == math.pi / 2:
return self.is_in_interval(x_min, x_max, o_x)
# If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to
# undefined intersection points (if the goal is vertical for example)
# although there is an intersection point
#
# In these cases, the shot is horizontal, therefore it is valid
# iff the y coordinate of the opponent is in the goal's y interval
if abs(shot.angle) == math.pi or shot.angle == 0:
return self.is_in_interval(y_min, y_max, o_y)
# Using tan the least amount of time possible, for this is a slow function
tan_theta = math.tan(shot.angle)
# Define the LE of the shot
le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)
le2 = None
# If the goal is vertical, finding the intersection point
# is not possible using the normal way
#
# That being said, unless the LE of the shot is vertical too (which it
# isn't as it is checked before hand) there has to be an intersection point
# This intersection must happen when at the x coodinate of the goal's segment
# therefore, it is possible to compute the y coordinate of the intersection by
# computing the application of the shot's LE on this ex coordinate
#
# Then, the resulting y is valid iff it is in the goal's segment interval
if self.e_pos.x - self.s_pos.x == 0:
y = le1.apply(self.e_pos.x)
return self.is_in_interval(y_min, y_max, y)
# The normal way of solving the intersection of these two LEs
else:
# Shortening variables by computing the coefficient of the goal's LE
ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)
# If the lines are parallels (have the same coefficient) return False
if math.tan(shot.angle) == ratio:
return False
# Defining the goal's LE
le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)
# Finding the intersection point of the two LEs
# If there isn't one, return False (but there should be one
# given all the asserts we do before hand, this is just for completion sake)
p_intersect = le1.intersection(le2)
if p_intersect == None:
return False
# If the intersection point's abscissa is in the goal's x interval, then it is
# a valid abstracted shot going
return self.is_in_interval(x_min, x_max, p_intersect.x)
def is_shot_valid(self, shot):
"""
Checks if a shot is valid (going in the goal) or not. To do so, three
things are checked :
1 -> Is the player ABLE to shoot in the goal, namely is it in front of the goal and not behind?
2 -> Is the shot going towards the half plane defined by the goal?
3 -> Is the linear equation defined by the shot intersecting the goal's segment?
(3) is obviously required. (2) is required because if it isn't checked, the player could shoot
away from the goal and it would be considered valid since in (3) we consider a linear equation
and not a half-line. (1) is required because otherwise it would be true even if the player
shoots from behind the goal.
:param shot: The shot to check.
:return: True if the shot is valid, False otherwise.
"""
a = self.check_position(shot.opponent)
b = self.check_shot_direction(shot)
c = self.check_shot_on_target(shot)
return a and b and c
def shot_intercepted(self, defender, shot):
"""
Checks if the given shot is intercepted by the given player with regard to this goal.
To do so, we check if the circle defined by the player and its radius intersects the
shot. Then, it is checked if the intersection is between the opponent and the goal.
There are plenty of special cases, find more information below.
:param defender: The defender that should intercept the shot.
:param shot: The shot to intercept.
:return: True if the shot is intercepted, False otherwise.
"""
o_x = shot.opponent.pos.x
o_y = shot.opponent.pos.y
le1 = None
le2 = None
p = None
q = None
p = LinearEquation.intersection_circle(shot.opponent, shot.angle, defender.pos, defender.radius)
if p == None:
return False
# If the goal is vertical, solving the intersection won't work
# it is then done "by hand"
if self.e_pos.x - self.s_pos.x == 0:
# If the goal and the shot are vertical, return False
if abs(shot.angle) == math.pi / 2:
return False
# If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to
# undefined behaviors (namely if the goal is vertical)
#
# In these cases, the shot is horizontal, therefore it is valid
# iff the x coordinate of the intersection point of the defender and the shot
# is between the goal and the opponent x coordinates
if abs(shot.angle) == math.pi or shot.angle == 0:
q = Point(self.e_pos.x, o_y)
return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)
tan_theta = math.tan(shot.angle)
le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x)
q = Point(self.e_pos.x, le2.apply(self.e_pos.x))
return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)
# If the goal is not vertical, it is now possible to define the coefficient
# of the goal's LE
ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)
# If the shot is parallel to the goal (same coefficient) it doesn't
# matter if it is intercepted (this method should only be used
# with valid shot in the first place, this is just for completion sake)
if math.tan(shot.angle) == ratio:
return False
# LE of the goal
le1 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)
# If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined
# In these cases, the shot is vertical, therefore it is valid
# iff the y coordinate of the intersection point of the defender and the shot
# is between the goal and the opponent
if abs(shot.angle) == math.pi / 2:
q = Point(o_x, le1.apply(o_x))
return self.is_in_interval(min(q.y, o_y), max(q.y, o_y), p.y)
# If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to
# undefined behaviors (namely if the goal is vertical)
#
# In these cases, the shot is horizontal, therefore it is valid
# iff the x coordinate of the intersection point of the defender and the shot
# is between the goal and the opponent y coordinates
if abs(shot.angle) == math.pi or shot.angle == 0:
q = Point(le1.reverse(o_y), o_y)
return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)
tan_theta = math.tan(shot.angle)
# LE of the shot
le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x)
# Find the intersection of the two lines and check if the defender
# is between this point and the opponent
q = le1.intersection(le2)
return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p.x)
def shot_intercepted_with_speed(self, defender, shot, ball_speed, player_speed):
"""
Checks if the given defender intercepts the given shot wrt this goal.
This method also takes into account that the defender can move.
:param defender: The defender that should intercept the shot.
:param shot: The shot to intercept.
:return: True if the shot is intercepted, False otherwise.
"""
o_x = shot.opponent.pos.x
o_y = shot.opponent.pos.y
le1 = None
le2 = None
q = None
p_inter = None
tan_theta = None
# If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined
# In these cases, the shot is vertical, therefore it is valid
# iff the y coordinate of the intersection point of the defender and the shot
# is between the goal and the opponent
if abs(shot.angle) == math.pi / 2:
p_inter = Point(shot.opponent.pos.x, defender.pos.y)
else:
tan_theta = math.tan(shot.angle)
# LE of the shot
le2 = LinearEquation(tan_theta, o_y - tan_theta * o_x)
# check if this point is reachable by the defender quickly enough
# first get the intersection point of the shot and the shortest line from the
# defender to the shot
p_inter = LinearEquation.perpendicular_intersection_point_line(le2, defender.pos)
# compute the distances between p_inter and the defender/opponent
d_opponent = p_inter.distance(shot.opponent.pos)
d_defender = p_inter.distance(defender.pos)
# check that the defender can reach this point before the ball
# if not, this defender isn't correct
if not ((d_defender) / player_speed <= d_opponent / ball_speed):
return False
# If the goal is vertical, solving the intersection won't work
# it is then done "by hand"
if self.e_pos.x - self.s_pos.x == 0:
return self.is_in_interval(min(self.e_pos.x, o_x), max(self.e_pos.x, o_x), p_inter.x)
# If the goal is not vertical, it is now possible to define the coefficient
# of the goal's LE
ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)
# If the shot is parallel to the goal (same coefficient) it doesn't
# matter if it is intercepted (this method should only be used
# with valid shot in the first place, this is just for completion sake)
if math.tan(shot.angle) == ratio:
return False
# LE of the goal
le1 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)
# Find the intersection of the two lines and check if the defender
# is between this point and the opponent
q = le1.intersection(le2)
return self.is_in_interval(min(q.x, o_x), max(q.x, o_x), p_inter.x)
|
python
|
#!/usr/bin/env python
import glob
import yaml
import sys
import argparse
import cparser
import generator
import json
from clang.cindex import Index, CursorKind, Config
import codecs
import re
import os
from typing import List, Dict
file_cache = {}
rules = [
[r'@c\s+(\w+)', 'inlinecode'],
[r'\s*[@\\]code(.*?)\s+[@\\]endcode\s*', 'blockcode'],
[r'\\f\\\((.*?)\\f\\\)', 'inlinemath'],
[r'\\f\$(.*?)\\f\$', 'inlinemath'],
[r'\s*\\f\[(.*?)\\f\]\s*', 'blockmath'],
[r'@param\s+(\w+)', 'param'],
]
def parse_description(s):
if isinstance(s, str):
for rule in rules:
m = re.search(rule[0], s, re.MULTILINE | re.DOTALL)
if m is not None:
prefix = s[:m.start()]
match = remove_padding(m.group(1)).strip()
postfix = s[m.end():]
return parse_description([prefix, {rule[1]: match}, postfix])
return s
elif isinstance(s, List):
r = []
for ss in s:
if isinstance(ss, str):
rr = parse_description(ss)
if isinstance(rr, str):
if len(rr) > 0:
r.append(rr)
else:
r.extend(rr)
else:
r.append(ss)
return r
else:
return s
def clean_text(str):
str = str.replace('\t', ' ')
str = str.replace('\r', '')
return str
def remove_padding(s):
lines = s.splitlines()
minpadding = 100
for l in lines:
if len(l) > 0:
minpadding = min(minpadding, len(l) - len(l.lstrip(' ')))
if minpadding == 100:
return s
lines = [l[minpadding:] for l in lines]
return '\n'.join(lines)
def get_location(node):
if node is None:
return ''
if node.location is None:
return ''
if node.location.file is None:
return ''
return node.location.file.name
def get_location_line(node):
if node is None:
return -1
if node.location is None:
return -1
return node.location.line
def get_source(cursor):
assert cursor.extent.start.file.name == cursor.extent.end.file.name
filename = cursor.extent.start.file.name
if filename not in file_cache:
file_cache[filename] = codecs.open(
filename, 'r', encoding="utf-8").read()
file_content = file_cache[filename].encode('utf-8')
bytes = ' ' * (cursor.extent.start.column - 1) + clean_text(
file_content[cursor.extent.start.offset:cursor.extent.end.offset].decode('utf-8'))
return remove_padding(bytes)
def clean_comment(s):
s = s.strip()
if s.startswith('///<'):
return remove_padding(s[4:])
elif s.startswith('///'):
return remove_padding(re.sub(r'^\s*///', '', s, flags=re.MULTILINE))
elif s.startswith('/**'):
return remove_padding(re.sub(r'^\s*\*( |$)', '', s[3:-2], flags=re.MULTILINE))
return s
def replace_macros(s: str, macros: Dict):
for key, value in macros.items():
s = re.sub(r'\b'+key+r'\b', value, s)
return s
def same_location(x, y):
return x == y
def class_name(node):
template = []
for c in node.get_children():
if c.kind in [CursorKind.TEMPLATE_TYPE_PARAMETER, CursorKind.TEMPLATE_NON_TYPE_PARAMETER]:
template.append(get_source(c))
if template:
template = 'template <' + ', '.join(template) + '>'
else:
template = ''
return template + node.spelling
def source_to_definition(source):
source = re.sub(r'^(.*?)\{.*', r'\1', source, flags=re.DOTALL).strip()
return source
def parse_index(root_path, index: List[Dict], node, root_location, group: str, ns: str = '', macros={}):
source = ''
if node.brief_comment is not None:
source = get_source(node)
definition = source_to_definition(replace_macros(source, macros))
entity: Dict = {}
if node.kind in [CursorKind.FUNCTION_TEMPLATE, CursorKind.FUNCTION_DECL, CursorKind.CXX_METHOD, CursorKind.CONSTRUCTOR, CursorKind.DESTRUCTOR]:
entity['type'] = 'function'
entity['name'] = node.spelling
entity['definition'] = definition
elif node.kind in [CursorKind.CLASS_TEMPLATE, CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION]:
entity['type'] = 'class'
entity['name'] = node.spelling
entity['definition'] = class_name(node)
entity['content'] = []
elif node.kind in [CursorKind.ENUM_DECL]:
entity['type'] = 'enum'
entity['name'] = node.spelling
entity['definition'] = definition
entity['content'] = []
elif node.kind in [CursorKind.ENUM_CONSTANT_DECL]:
entity['type'] = 'enumerator'
entity['name'] = node.spelling
entity['definition'] = definition
elif node.kind in [CursorKind.TYPEDEF_DECL, CursorKind.TYPE_ALIAS_DECL, CursorKind.TYPE_ALIAS_TEMPLATE_DECL]:
entity['type'] = 'typedef'
entity['name'] = node.spelling
entity['definition'] = re.sub(r'(^|\s+)using\s+', r'', definition)
elif node.kind in [CursorKind.VAR_DECL, CursorKind.UNEXPOSED_DECL]:
entity['type'] = 'variable'
entity['name'] = node.spelling
entity['definition'] = definition
elif node.kind in [CursorKind.NAMESPACE]:
entity['type'] = 'namespace'
entity['name'] = node.displayname
entity['definition'] = definition
entity['source'] = definition + ' { ... }'
elif node.kind in [CursorKind.USING_DECLARATION]:
entity['type'] = 'function'
entity['name'] = node.spelling
entity['definition'] = definition
else:
print('warning: Unknown cursor kind: {} for {}'.format(
node.kind, node.displayname))
return
entity['qualifiedname'] = re.sub('^::', '', ns + '::' + entity['name'])
if 'source' not in entity:
entity['source'] = source
entity['file'] = os.path.relpath(
get_location(node), root_path).replace('\\', '/')
entity['line'] = get_location_line(node)
description = clean_comment(clean_text(node.raw_comment))
m = re.match(r'[@\\]copybrief\s+([a-zA-Z0-9:\._-]+)',
description.strip())
if m:
copyFrom = m.group(1)
print("Copying from {}".format(copyFrom))
description = {"copy": copyFrom}
else:
description = re.sub(r'\s*@brief\s*', '', description)
description = parse_description(description)
entity['description'] = description
index.append(entity)
entity['group'] = group
if 'content' in entity:
index = entity['content']
if node.kind == CursorKind.NAMESPACE:
ns += ns+'::'+node.spelling
if node.kind in [CursorKind.CLASS_TEMPLATE, CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION]:
ns += ns+'::'+node.spelling
if node.kind in [CursorKind.ENUM_DECL]:
ns += ns+'::'+node.spelling
for c in node.get_children():
if same_location(get_location(c), root_location):
parse_index(root_path, index, c, root_location, group, ns, macros)
def parse(root_path, filenames: List[str], clang_args: List[str], macros={}):
index = []
for filename in filenames:
print('Parsing ' + filename)
group = ''
with open(filename, 'r', encoding='utf-8') as strm:
text = strm.read()
m = re.search(r'@addtogroup\s+([a-zA-Z0-9_-]+)', text)
if m:
group = m.group(1)
clangIndex = Index.create()
tu = clangIndex.parse(None, [filename.replace('\\', '/')] + clang_args)
if not tu:
print('Unable to load input')
exit(1)
if len(tu.diagnostics):
print('------------DIAGNOSTICS---------------')
for diag in tu.diagnostics:
print(diag)
print('------------/DIAGNOSTICS---------------')
count = len(index)
parse_index(root_path, index, tu.cursor,
tu.cursor.displayname, group, '', macros)
print(' Found {} entities'.format(len(index) - count))
return index
if __name__ == '__main__':
import subprocess
parser = argparse.ArgumentParser(
description='Parse C++ sources to generate index')
parser.add_argument('config_path', help='path to configuration file (YML)')
parser.add_argument('output_path',
help='path where generated index will be written (JSON)')
parser.add_argument('--libclang', help='libclang path (.dll or .so)')
parser.add_argument(
'--git', help='Retrieve commit hash and branch', action='store_true')
args = parser.parse_args()
if args.libclang:
Config.set_library_file(args.libclang)
clang_args = []
config = None
defaults = {'clang': {'arguments': []}, 'repository': '', 'postprocessor': {'ignore': []}, 'masks': [
'**/*.hpp', '**/*.cpp', '**/*.cxx', '**/*.hxx', '**/*.h'], 'groups': {}}
config = yaml.load(open(args.config_path, 'r', encoding='utf-8'))
print(config)
config = {**defaults, **config}
print('args.config_path: ', args.config_path)
print('os.path.dirname(args.config_path): ', os.path.dirname(args.config_path))
print('os.path.dirname(args.config_path): ', os.path.join(os.path.dirname(args.config_path), config['input_directory']))
input_dir = os.path.normpath(os.path.join(os.path.dirname(
args.config_path), config['input_directory'])) + os.path.sep
print('Input directory:', input_dir)
clang_args = config['clang']['arguments']
print('Clang arguments:', clang_args)
macros = config['postprocessor']['ignore']
print('Ignore macros:', macros)
git_tag = ''
if args.git:
git_tag = subprocess.check_output(
['git', 'describe', '--always', '--abbrev=0'], cwd=input_dir).strip()
git_tag = codecs.decode(git_tag)
print('GIT:')
print(git_tag)
file_masks = config['masks']
filenames = []
for mask in file_masks:
filenames += glob.glob(input_dir + mask, recursive=True)
print('Found', len(filenames), 'files')
macros = {k: '' for k in macros}
index = cparser.parse(input_dir, filenames, clang_args, macros)
index = {'index': index, 'git_tag': git_tag, 'repository': config['repository'].replace(
'{TAG}', git_tag), 'groups': config['groups']}
json.dump(index, open(args.output_path, 'w', encoding='utf-8'), indent=4)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by yetongxue<[email protected]>
import socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', 8001))
while True:
re_data = input()
client.send(re_data.encode('utf8'))
data = client.recv(1024)
print(data.decode('utf8'))
|
python
|
from gym_nav.envs.nav_env import NavEnv
from gym_nav.envs.multi_nav_env import MultiNavEnv
|
python
|
from .get_data import get_all_data, get_data_from_api
from ..dict_as_attribute import DictAsObj as DictToObj
class Items:
states = {}
countries = {}
total = {}
data = get_data_from_api()
for entity in data:
if 'Countries' == entity:
countries.update(data[entity])
elif 'States' == entity:
states.update(data[entity])
elif 'TOTAL' == entity:
total.update(data[entity])
data = {}
data.update(countries)
data.update(states)
data.update({'Total'.upper(): total})
def __init__(self, s):
self.fullJSON = self.data
self.caller = s
class Item(Items):
def rtrn_item_json(self, name=None):
self.json = self.fullJSON[self.caller]
def rtrn_data(self):
self.rtrn_item_json()
self._confirmed()
self._deaths()
self._recovered()
def _confirmed(self):
self.confirmed = self.json['Confirmed']
self.cases = self.confirmed
def _deaths(self):
self.deaths = self.json['Deaths']
def _recovered(self):
self.recovered = self.json['Recovered']
|
python
|
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from scipy import stats
import math
_DEFAULT_THRESHOLD = 1e-10
def scale(a, mul):
return [x * mul for x in a]
def cmp(a, b):
return stats.ttest_ind(a, b)
def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
if (len(set(new))) == 1 and new == old: return 0
s0, p0 = cmp(new, old)
if math.isnan(p0): return 0
if s0 == 0: return 0
if p0 > threshold: return 0
if s0 < 0:
pct = 1
while pct < 100:
sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
if sp > 0: break
if pp > threshold: break
pct += 1
return -(pct - 1)
else:
pct = 1
while pct < 10000:
sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
if sp < 0: break
if pp > threshold: break
pct += 1
return pct - 1
if __name__ == "__main__":
new = [0.0, 0.0, 0.0, 0.0]
old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
print speedup(new, old, 1e-5)
print speedup(old, new, 1e-5)
|
python
|
#!/usr/bin/python3
"""
Copyright 2018-2019 Firmin.Sun ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lxml.etree import Element, SubElement, tostring
from xml.dom.minidom import parseString
import os
import cv2
class ImageInfo(object):
def __init__(self,width,height,path,name,image_extension,channel=3):
self.width = width
self.height = height
self.path = path
self.name = name
self.image_extension = image_extension
self.channel = channel
def save_image(self,out_path, image):
# try:
# image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
# except Exception as ex:
# print(out_path)
#
try:
if out_path is not None:
dir = os.path.dirname(out_path)
if not os.path.exists(dir):
os.makedirs(dir)
cv2.imwrite(out_path, image)
except Exception as ex:
print(ex)
def save_annotations(self,save_dir, boxes, labels):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
dom = self.make_xml( boxes, labels)
xml_path = os.path.join(save_dir, self.name + '.xml')
with open(xml_path, 'w+') as f:
dom.writexml(f, addindent='', newl='', encoding='utf-8')
def make_xml(self, boxes, labels):
node_root = Element('annotation')
node_folder = SubElement(node_root, 'folder')
node_folder.text = 'JPEGImages'
node_filename = SubElement(node_root, 'filename')
node_filename.text = self.name + '.' + self.image_extension
node_path = SubElement(node_root, 'path')
node_path.text = self.path
node_size = SubElement(node_root, 'size')
node_width = SubElement(node_size, 'width')
node_width.text = str(self.width)
node_height = SubElement(node_size, 'height')
node_height.text = str(self.height)
node_depth = SubElement(node_size, 'depth')
node_depth.text = str(self.channel)
node_segmented = SubElement(node_root, 'segmented')
node_segmented.text = '0'
for i in range(len(labels)):
label = labels[i]
b = boxes[i].split(',')
node_object = SubElement(node_root, 'object')
node_name = SubElement(node_object, 'name')
caption = "{}".format(label)
node_name.text = caption
node_pose = SubElement(node_object, 'pose')
node_pose.text = 'Unspecified'
node_truncated = SubElement(node_object, 'truncated')
node_truncated.text = '0'
node_difficult = SubElement(node_object, 'difficult')
node_difficult.text = '0'
node_bndbox = SubElement(node_object, 'bndbox')
node_xmin = SubElement(node_bndbox, 'xmin')
node_xmin.text = str(int(b[0]))
node_ymin = SubElement(node_bndbox, 'ymin')
node_ymin.text = str(int(b[1]))
node_xmax = SubElement(node_bndbox, 'xmax')
node_xmax.text = str(int(b[2]))
node_ymax = SubElement(node_bndbox, 'ymax')
node_ymax.text = str(int(b[3]))
xml = tostring(node_root, pretty_print=True)
dom = parseString(xml)
return dom
|
python
|
import json
from typing import Any
import pytest
from pydantic import BaseModel, ConfigError, NoneBytes, NoneStr, ValidationError, pretty_errors
class UltraSimpleModel(BaseModel):
a: float = ...
b: int = 10
def test_ultra_simple_success():
m = UltraSimpleModel(a=10.2)
assert m.a == 10.2
assert m.b == 10
def test_ultra_simple_missing():
with pytest.raises(ValidationError) as exc_info:
UltraSimpleModel()
assert """\
1 error validating input
a:
field required (error_type=Missing)""" == str(exc_info.value)
def test_ultra_simple_failed():
with pytest.raises(ValidationError) as exc_info:
UltraSimpleModel(a='x', b='x')
assert """\
2 errors validating input
a:
could not convert string to float: 'x' (error_type=ValueError track=float)
b:
invalid literal for int() with base 10: 'x' (error_type=ValueError track=int)\
""" == str(exc_info.value)
def test_ultra_simple_repr():
m = UltraSimpleModel(a=10.2)
assert repr(m) == '<UltraSimpleModel a=10.2 b=10>'
assert repr(m.fields['a']) == ("<Field a: type='float', required=True, "
"validators=['float', 'number_size_validator']>")
assert dict(m) == {'a': 10.2, 'b': 10}
def test_comparing():
m = UltraSimpleModel(a=10.2, b='100')
assert m == {'a': 10.2, 'b': 100}
assert m == UltraSimpleModel(a=10.2, b=100)
class ConfigModel(UltraSimpleModel):
class Config:
raise_exception = False
def test_config_doesnt_raise():
m = ConfigModel()
assert len(m.errors) == 1
assert m.errors['a'].exc.args[0] == 'field required'
assert m.config.raise_exception is False
assert m.config.max_anystr_length == 65536
def test_nullable_strings_success():
class NoneCheckModel(BaseModel):
existing_str_value = 'foo'
required_str_value: str = ...
required_str_none_value: NoneStr = ...
existing_bytes_value = b'foo'
required_bytes_value: bytes = ...
required_bytes_none_value: NoneBytes = ...
m = NoneCheckModel(
required_str_value='v1',
required_str_none_value=None,
required_bytes_value='v2',
required_bytes_none_value=None,
)
assert m.required_str_value == 'v1'
assert m.required_str_none_value is None
assert m.required_bytes_value == b'v2'
assert m.required_bytes_none_value is None
def test_nullable_strings_fails():
class NoneCheckModel(BaseModel):
existing_str_value = 'foo'
required_str_value: str = ...
required_str_none_value: NoneStr = ...
existing_bytes_value = b'foo'
required_bytes_value: bytes = ...
required_bytes_none_value: NoneBytes = ...
class Config:
raise_exception = False
m = NoneCheckModel(
required_str_value=None,
required_str_none_value=None,
required_bytes_value=None,
required_bytes_none_value=None,
)
assert """\
{
"required_bytes_value": {
"error_msg": "None is not an allow value",
"error_type": "TypeError",
"index": null,
"track": "bytes"
},
"required_str_value": {
"error_msg": "None is not an allow value",
"error_type": "TypeError",
"index": null,
"track": "str"
}
}""" == json.dumps(pretty_errors(m.errors), indent=2, sort_keys=True)
class RecursiveModel(BaseModel):
grape: bool = ...
banana: UltraSimpleModel = ...
def test_recursion():
m = RecursiveModel(grape=1, banana={'a': 1})
assert m.grape is True
assert m.banana.a == 1.0
assert m.banana.b == 10
assert repr(m) == '<RecursiveModel grape=True banana=<UltraSimpleModel a=1.0 b=10>>'
def test_recursion_fails():
with pytest.raises(ValidationError):
RecursiveModel(grape=1, banana=123)
class PreventExtraModel(BaseModel):
foo = 'whatever'
class Config:
ignore_extra = False
def test_prevent_extra_success():
m = PreventExtraModel()
assert m.foo == 'whatever'
m = PreventExtraModel(foo=1)
assert m.foo == '1'
def test_prevent_extra_fails():
with pytest.raises(ValidationError) as exc_info:
PreventExtraModel(foo='ok', bar='wrong', spam='xx')
assert exc_info.value.message == '2 errors validating input'
assert """\
bar:
extra fields not permitted (error_type=Extra)
spam:
extra fields not permitted (error_type=Extra)""" == exc_info.value.display_errors
class InvalidValidator:
@classmethod
def get_validators(cls):
yield cls.has_wrong_arguments
@classmethod
def has_wrong_arguments(cls, value, bar):
pass
def test_invalid_validator():
with pytest.raises(ConfigError) as exc_info:
class InvalidValidatorModel(BaseModel):
x: InvalidValidator = ...
assert exc_info.value.args[0].startswith('Invalid signature for validator')
def test_no_validator():
with pytest.raises(ConfigError) as exc_info:
class NoValidatorModel(BaseModel):
x: object = ...
assert exc_info.value.args[0] == "no validator found for <class 'object'>"
def test_unable_to_infer():
with pytest.raises(ConfigError) as exc_info:
class InvalidDefinitionModel(BaseModel):
x = None
assert exc_info.value.args[0] == 'unable to infer type for attribute "x"'
def test_not_required():
class Model(BaseModel):
a: float = None
assert Model(a=12.2).a == 12.2
assert Model().a is None
assert Model(a=None).a is None
def test_infer_type():
class Model(BaseModel):
a = False
b = ''
c = 0
assert Model().a is False
assert Model().b == ''
assert Model().c == 0
def test_allow_extra():
class Model(BaseModel):
a: float = ...
class Config:
allow_extra = True
assert Model(a='10.2', b=12).values == {'a': 10.2, 'b': 12}
def test_set_attr():
m = UltraSimpleModel(a=10.2)
assert m.values == {'a': 10.2, 'b': 10}
m.setattr('b', 20)
assert m.values == {'a': 10.2, 'b': 20}
def test_set_attr_invalid():
m = UltraSimpleModel(a=10.2)
assert m.values == {'a': 10.2, 'b': 10}
with pytest.raises(ValueError) as exc_info:
m.setattr('c', 20)
assert '"UltraSimpleModel" object has no field "c"' in str(exc_info)
def test_any():
class AnyModel(BaseModel):
a: Any = 10
assert AnyModel().a == 10
assert AnyModel(a='foobar').a == 'foobar'
def test_alias():
class Model(BaseModel):
a = 'foobar'
class Config:
fields = {
'a': {'alias': '_a'}
}
assert Model().a == 'foobar'
assert Model().values == {'a': 'foobar'}
assert Model(_a='different').a == 'different'
assert Model(_a='different').values == {'a': 'different'}
|
python
|
from accountancy.helpers import sort_multiple
from nominals.models import NominalTransaction
from vat.models import VatTransaction
from cashbook.models import CashBookLine
def create_lines(line_cls, header, lines):
# DO WE NEED THIS?
tmp = []
for i, line in enumerate(lines):
line["line_no"] = i + 1
line["header"] = header
tmp.append(line_cls(**line))
return line_cls.objects.bulk_create(tmp)
def create_nom_trans(nom_tran_cls, line_cls, header, lines, bank_nominal, vat_nominal):
nom_trans = []
for line in lines:
if line.goods:
nom_trans.append(
nom_tran_cls(
module="CB",
header=header.pk,
line=line.pk,
nominal=line.nominal,
value=-1 * line.goods,
ref=header.ref,
period=header.period,
date=header.date,
field="g",
type=header.type
)
)
if line.vat:
nom_trans.append(
nom_tran_cls(
module="CB",
header=header.pk,
line=line.pk,
nominal=vat_nominal,
value=-1 * line.vat,
ref=header.ref,
period=header.period,
date=header.date,
field="v",
type=header.type
)
)
if line.goods or line.vat:
nom_trans.append(
nom_tran_cls(
module="CB",
header=header.pk,
line=line.pk,
nominal=bank_nominal,
value=line.goods + line.vat,
ref=header.ref,
period=header.period,
date=header.date,
field="t",
type=header.type
)
)
nom_trans = NominalTransaction.objects.bulk_create(nom_trans)
nom_trans = sort_multiple(nom_trans, *[(lambda n: n.line, False)])
goods_and_vat = nom_trans[:-1]
for i, line in enumerate(lines):
line.goods_nominal_transaction = nom_trans[3 * i]
line.vat_nominal_transaction = nom_trans[(3 * i) + 1]
line.total_nominal_transaction = nom_trans[(3 * i) + 2]
line_cls.objects.bulk_update(
lines,
["goods_nominal_transaction", "vat_nominal_transaction",
"total_nominal_transaction"]
)
def create_cash_book_trans(cash_book_tran_cls, header):
cash_book_tran_cls.objects.create(
module="CB",
header=header.pk,
line=1,
value=header.total,
ref=header.ref,
period=header.period,
date=header.date,
field="t",
cash_book=header.cash_book,
type=header.type
)
def create_vat_transactions(header, lines):
vat_trans = []
for line in lines:
vat_trans.append(
VatTransaction(
header=header.pk,
line=line.pk,
module="CB",
ref=header.ref,
period=header.period,
date=header.date,
field="v",
tran_type=header.type,
vat_type=header.vat_type,
vat_code=line.vat_code,
vat_rate=line.vat_code.rate,
goods=line.goods,
vat=line.vat
)
)
vat_trans = VatTransaction.objects.bulk_create(vat_trans)
vat_trans = sort_multiple(vat_trans, *[(lambda v: v.line, False)])
lines = sort_multiple(lines, *[(lambda l: l.pk, False)])
for i, line in enumerate(lines):
line.vat_transaction = vat_trans[i]
CashBookLine.objects.bulk_update(lines, ["vat_transaction"])
|
python
|
import random
from django.http import Http404, JsonResponse
from django.shortcuts import render
from .models import Tweet
def home_view(request, *args, **kwargs):
return render(request, "pages/home.html", context={}, status=200)
def tweet_list_view(request, *args, **kwargs):
qs = Tweet.objects.all()
tweets_list = [{"id": x.id, "content": x.content, "likes": random.randint(0, 100)} for x in qs]
data = {
"is_user": False,
"response": tweets_list
}
return JsonResponse(data)
def tweet_detail_view(request, tweet_id, *args, **kwargs):
data = {
"id": tweet_id,
}
status = 200
try:
obj = Tweet.objects.get(id=tweet_id)
data['content'] = obj.content
except:
data['message'] = "Not found"
status = 404
return JsonResponse(data, status= status)
|
python
|
from models.db import db
from models.post import Post
from flask_restful import Resource
from flask import request
from sqlalchemy.orm import joinedload
from resources.s3 import *
class Posts(Resource):
def get(self):
posts = Post.find_all()
return posts
def post(self):
data = request.get_json()
params = {}
for k in data.keys():
params[k] = data[k]
post = Post(**params)
post.create()
return post.json(), 201
class PostDetail(Resource):
def get(self, post_id):
post = Post.query.options(joinedload(
'user')).filter_by(id=post_id).first()
return {**post.json(), 'user': post.user.json()}
def delete(self, post_id):
post = Post.find_by_id(post_id)
if not post:
return {"msg": "Not found"}
db.session.delete(post)
db.session.commit()
return {"msg": "Post Deleted", "payload": post_id}
class PostActions(Resource):
def put(self, post_id):
post = Post.find_by_id(post_id)
if not post:
return {"msg": "Not found"}
post.claps += 1
db.session.commit()
return post.json()
class PostImage(Resource):
def post(self):
file = request.files['file']
bucket.Object(file.filename).put(Body=file)
return "uploaded"
|
python
|
from .base import BaseAttack
from .fgsm import FGSMAttack
|
python
|
"""
rg_utils load helpers methods from python
"""
import pandas as pd
import re
import robustnessgym as rg
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
def update_pred(dp, model, dp_only=False):
""" Updating data panel with model prediction"""
model.predict_batch(dp, ["sentence"])
dp = dp.update(
lambda x: model.predict_batch(x, ["sentence"]),
batch_size=4,
is_batched_fn=True,
pbar=True,
)
if dp_only:
return dp
labels = pd.Series(["Negative Sentiment", "Positive Sentiment"])
probs = pd.Series(dp.__dict__["_data"]["probs"][0])
pred = pd.concat([labels, probs], axis=1)
pred.columns = ["Label", "Probability"]
return (dp, pred)
def remove_slice(bench, slice_name="user_data"):
""" Remove a slice from the rg dev bench"""
# slices and identifiers are in the same order
slice_list = []
slice_identifier = []
for i in bench.__dict__["_slices"]:
# look-up the term
name = str(i.__dict__["_identifier"])
if not re.search("new_words", name):
slice_list = slice_list + [i]
slice_identifier = slice_identifier + [name]
# metrics put datain a different order
metrics = {}
for key in bench.metrics["model"].keys():
if not re.search("new_words", key):
metrics[key] = bench.metrics["model"][key]
# slice table, repeat for sanity check
# slice_table = {}
# for key in bench.__dict__["_slice_table"].keys():
# key = str(key)
# if not re.search("new_words",key):
# slice_table[key] = bench.__dict__["_slice_table"][key]
bench.__dict__["_slices"] = set(slice_list)
bench.__dict__["_slice_identifiers"] = set(slice_identifier)
# bench.__dict__["_slice_table"] = set(slice_identifier)
bench.metrics["model"] = metrics
return bench
def add_slice(bench, table, model, slice_name="user_data"):
""" Adds a custom slice to RG """
# do it this way or it complains
dp = rg.DataPanel(
{
"sentence": table["sentence"].tolist(),
"label": table["label"].tolist(),
"pred": table["pred"].tolist(),
}
)
# dp._identifier = slice_name
# get prediction
# add to bench
# bench.add_slices([dp])
return dp
def new_bench():
""" Create new rg dev bench"""
bench = rg.DevBench()
bench.add_aggregators(
{
# Every model can be associated with custom metric calculation functions
#'distilbert-base-uncased-finetuned-sst-2-english': {
"model": {
# This function uses the predictions we stored earlier to calculate accuracy
#'accuracy': lambda dp: (dp['label'].round() == dp['pred'].numpy()).mean()
#'f1' : lambda dp: f1_score(dp['label'].round(),dp['pred'],average='macro',zero_division=1),
"recall": lambda dp: recall_score(
dp["label"].round(), dp["pred"], average="macro", zero_division=1
),
"precision": lambda dp: precision_score(
dp["label"].round(), dp["pred"], average="macro", zero_division=1
),
"accuracy": lambda dp: accuracy_score(dp["label"].round(), dp["pred"]),
}
}
)
return bench
def get_sliceid(slices):
""" Because RG stores data in a silly way"""
ids = []
for slice in list(slices):
ids = ids + [slice._identifier]
return ids
def get_sliceidx(slice_ids,name):
""" get the index from an rg slice"""
if name == "xyz_train":
idx = [i for i, elem in enumerate(slice_ids) if ("split=train" in str(elem)) ] #and len(str(elem).split("->")) == 1)]
elif name == "xyz_test":
idx = [i for i, elem in enumerate(slice_ids) if ("split=test" in str(elem)) ] #and len(str(elem).split("->")) == 1)]
else:
idx = [i for i, elem in enumerate(slice_ids) if name in str(elem)]
return idx[0]
def get_prob(x,i):
""" Helper to get probability"""
return(float(x[i]))
def slice_to_df(data):
""" Convert slice to dataframe"""
df = pd.DataFrame(
{
"sentence": list(data["sentence"]),
"model label": ["Positive Sentiment" if int(round(x)) == 1 else "Negative Sentiment" for x in data["label"]],
"model binary": [int(round(x)) for x in data["label"]],
}
)
prob = []
for i in range(0, len(data['probs'])):
prob.append(get_prob(data['probs'][i],df["model binary"][i]))
df["probability"] = prob
return df
def metrics_to_dict(metrics, slice_name):
""" Convert metrics to dataframe"""
all_metrics = {slice_name: {}}
all_metrics[slice_name]["metrics"] = metrics[slice_name]
all_metrics[slice_name]["source"] = "Custom Slice"
return all_metrics
|
python
|
# Generated by Django 3.0.7 on 2020-07-29 17:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('permafrost', '0012_auto_20200729_1710'),
('permafrost', '0015_auto_20200606_0042'),
]
operations = [
]
|
python
|
from flask import Flask
app = Flask(__name__)
def wrap_html(message):
html = """
<html>
<body>
<div style='font-size:80px;'>
<center>
<image height="600" width="531" src="https://secure.meetupstatic.com/photos/event/2/a/a/3/600_452110915.jpeg">
<br>
{0}<br>
</center>
</div>
</body>
</html>""".format(message)
return html
@app.route('/')
def hello_world():
message = 'Hello Python, The application is working --Rizwan!'
html = wrap_html(message)
return html
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
python
|
"""
Every issue is reported as ``robocop.rules.Message`` object. It can be later printed or used by
post-run reports.
Output message format
---------------------
Output message of rules can be defined with ``-f`` / ``--format`` argument. Default value::
"{source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})"
Available formats:
* ``source``: path to the file where the issue occurred
* ``source_rel``: path to the file where the issue occurred, relative to execution directory
* ``line``: line number where the issue starts
* ``end_line``: line number where the issue ends
* ``col``: column number where the issue starts
* ``end_col``: column number where the issue ends
* ``severity``: severity of the issue, value of ``robocop.rules.RuleSeverity`` enum
* ``rule_id``: rule id (e.g. 0501)
* ``name``: rule name (e.g. ``line-too-long`)
* ``desc``: description of the rule
"""
from enum import Enum
from functools import total_ordering
import robocop.exceptions
@total_ordering
class RuleSeverity(Enum):
"""
Rule severity.
It can be configured with ``--configure id_or_msg_name:severity:value``
where value can be first letter of severity value or whole name, case-insensitive.
For example ::
-c line-too-long:severity:e
will change `line-too-long` rule severity to error.
You can filter out all rules below given severity value by using following option::
-t/--threshold <severity value>
Example::
--threshold E
will only report rules with severity E and above.
"""
INFO = "I"
WARNING = "W"
ERROR = "E"
def __lt__(self, other):
look_up = [sev.value for sev in RuleSeverity]
return look_up.index(self.value) < look_up.index(other.value)
class Rule:
def __init__(self, rule_id, body):
self.rule_id = rule_id
self.name = ""
self.desc = ""
self.source = None
self.enabled = True
self.severity = RuleSeverity.INFO
self.configurable = []
self.parse_body(body)
def __str__(self):
return (
f"Rule - {self.rule_id} [{self.severity.value}]: {self.name}: {self.desc} "
f'({"enabled" if self.enabled else "disabled"})'
)
def change_severity(self, value):
severity = {
"error": "E",
"e": "E",
"warning": "W",
"w": "W",
"info": "I",
"i": "I",
}.get(str(value).lower(), None)
if severity is None:
raise robocop.exceptions.InvalidRuleSeverityError(self.name, value)
self.severity = RuleSeverity(severity)
def get_configurable(self, param):
for configurable in self.configurable:
if configurable[0] == param:
return configurable
return None
@staticmethod
def get_configurable_desc(conf, default=None):
desc = f"{conf[0]} = {default}\n" f" type: {conf[2].__name__}"
if len(conf) == 4:
desc += "\n" f" info: {conf[3]}"
return desc
@staticmethod
def get_default_value(param, checker):
return None if checker is None else checker.__dict__.get(param, None)
def available_configurables(self, include_severity=True, checker=None):
configurables = ["severity"] if include_severity else []
for conf in self.configurable:
default = self.get_default_value(conf[1], checker)
configurables.append(self.get_configurable_desc(conf, default))
if not configurables:
return ""
return "\n ".join(configurables)
def parse_body(self, body):
if isinstance(body, tuple) and len(body) >= 3:
self.name, self.desc, self.severity, *self.configurable = body
else:
raise robocop.exceptions.InvalidRuleBodyError(self.rule_id, body)
for configurable in self.configurable:
if not isinstance(configurable, tuple) or len(configurable) not in (3, 4):
raise robocop.exceptions.InvalidRuleConfigurableError(self.rule_id, body)
def prepare_message(self, *args, source, node, lineno, col, end_lineno, end_col, ext_disablers):
return Message(
*args,
rule=self,
source=source,
node=node,
lineno=lineno,
col=col,
end_col=end_col,
end_lineno=end_lineno,
ext_disablers=ext_disablers,
)
def matches_pattern(self, pattern):
"""check if this rule matches given pattern"""
if isinstance(pattern, str):
return pattern in (self.name, self.rule_id)
return pattern.match(self.name) or pattern.match(self.rule_id)
class Message:
def __init__(
self,
*args,
rule,
source,
node,
lineno,
col,
end_lineno,
end_col,
ext_disablers=None,
):
self.enabled = rule.enabled
self.rule_id = rule.rule_id
self.name = rule.name
self.severity = rule.severity
self.desc = rule.desc
try:
self.desc %= args
except TypeError as err:
raise robocop.exceptions.InvalidRuleUsageError(rule.rule_id, err)
self.source = source
self.line = 1
if node is not None and node.lineno > -1:
self.line = node.lineno
if lineno is not None:
self.line = lineno
self.col = 1 if col is None else col
self.end_line = self.line if end_lineno is None else end_lineno
self.end_col = self.col if end_col is None else end_col
self.ext_disablers = ext_disablers if ext_disablers else []
def __lt__(self, other):
return (self.line, self.col, self.rule_id) < (
other.line,
other.col,
other.rule_id,
)
def get_fullname(self):
return f"{self.severity.value}{self.rule_id} ({self.name})"
def to_json(self):
return {
"source": self.source,
"line": self.line,
"column": self.col,
"severity": self.severity.value,
"rule_id": self.rule_id,
"description": self.desc,
"rule_name": self.name,
}
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# vim: fenc=utf-8
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
#
"""
File name: favorites.py
Version: 0.1
Author: dhilipsiva <[email protected]>
Date created: 2015-07-26
"""
__author__ = "dhilipsiva"
__status__ = "development"
"""
"""
fmt = """ "%i"
{
"name" "%i"
"gamedir" "cstrike"
"Players" "3"
"maxplayers" "32"
"map" "de_dust2"
"address" "%s"
"lastplayed" "0"
"secure" "1"
"type" "4"
}\n"""
f = open("ips.txt", "r")
w = open("w.txt", "w")
i = 5
for line in f:
w.write(fmt % (i, i, line.replace("\n", "")))
i += 1
|
python
|
__author__ = 'Pauli Salmenrinne'
from setuptools import setup
requires = [
]
setup( name='sarch2',
version="1.1.0",
description='Simple archiving solution',
scripts=['bin/sarch2'],
packages=['sarch2'],
long_description=open('README.rst').read(),
url='https://github.com/susundberg/python-sarch2',
author='Pauli Salmenrinne',
author_email='[email protected]',
license='MIT',
install_requires=requires,
test_suite="test",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Archiving',
'Topic :: System :: Filesystems'
],
zip_safe=True )
|
python
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recover duts.
This module runs at system startup on Chromium OS test images. It runs through
a set of hooks to keep a DUT from being bricked without manual intervention.
Example hook:
Check to see if ethernet is connected. If its not, unload and reload the
ethernet driver.
"""
import logging
import os
import subprocess
import time
from logging import handlers
LOGGING_SUBDIR = '/var/log/recover_duts'
LOG_FILENAME = 'recover_duts.log'
LOGGING_FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
LONG_REBOOT_DELAY = 300
SLEEP_DELAY = 600
LOG_FILE_BACKUP_COUNT = 10
LOG_FILE_SIZE = 1024 * 5000 # 5000 KB
def _setup_logging(log_file):
"""Setup logging.
Args:
log_file: path to log file.
"""
log_formatter = logging.Formatter(LOGGING_FORMAT)
handler = handlers.RotatingFileHandler(
filename=log_file, maxBytes=LOG_FILE_SIZE,
backupCount=LOG_FILE_BACKUP_COUNT)
handler.setFormatter(log_formatter)
logger = logging.getLogger()
log_level = logging.DEBUG
logger.setLevel(log_level)
logger.addHandler(handler)
def main():
if not os.path.isdir(LOGGING_SUBDIR):
os.makedirs(LOGGING_SUBDIR)
log_file = os.path.join(LOGGING_SUBDIR, LOG_FILENAME)
_setup_logging(log_file)
hooks_dir = os.path.join(os.path.dirname(__file__), 'hooks')
# Additional sleep as networking not be up in the case of a long reboot.
time.sleep(LONG_REBOOT_DELAY)
try:
while True:
for script in os.listdir(hooks_dir):
script = os.path.join(hooks_dir, script)
if os.path.isfile(script) and script.endswith('.hook'):
logging.debug('Running hook: %s', script)
popen = subprocess.Popen([script], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = popen.communicate()[0]
if popen.returncode == 0:
logging.debug('Running of %s succeeded with output:\n%s', script,
output)
else:
logging.warn('Running of %s failed with output:\n%s', script,
output)
time.sleep(SLEEP_DELAY)
except Exception as e:
# Since this is run from an upstart job we want to ensure we log this into
# our log file before dying.
logging.fatal(str(e))
raise
if __name__ == '__main__':
main()
|
python
|
#-*-coding: utf8-*-
import redis
def connection(ip, port):
r = redis.StrictRedis(host=ip, port=port, db=0)
return r
def add(r, query, suggestions):
'''
:param query: string
:param suggestions: {sug1:score1,sugg2:score2...}
use SortedSet to store suggestions
'''
r.zadd('suggestions', suggestions)
def search(r, query):
return r.zrange('suggestions', start=0, end=10)
if __name__ == '__main__':
pass
|
python
|
import sys
import click
from tabulate import tabulate
from . import admin
from ...session import Session
from ..pretty import print_error
@admin.command()
@click.option('--status', default='RUNNING',
type=click.Choice(['PREPARING', 'BUILDING', 'RUNNING', 'RESTARTING',
'RESIZING', 'SUSPENDED', 'TERMINATING',
'TERMINATED', 'ERROR', 'ALL']),
help='Filter by the given status')
@click.option('--access-key', type=str, default=None,
help='Get sessions for a specific access key '
'(only works if you are a super-admin)')
@click.option('--id-only', is_flag=True, help='Display session ids only.')
def sessions(status, access_key, id_only):
'''
List and manage compute sessions.
'''
fields = [
('Session ID', 'sess_id'),
]
if not id_only:
fields.extend([
('Lang/runtime', 'lang'),
('Tag', 'tag'),
('Created At', 'created_at',),
('Terminated At', 'terminated_at'),
('Status', 'status'),
('CPU Cores', 'cpu_slot'),
('CPU Used (ms)', 'cpu_used'),
('Total Memory (MiB)', 'mem_slot'),
('Used Memory (MiB)', 'mem_cur_bytes'),
('Max Used Memory (MiB)', 'mem_max_bytes'),
('GPU Cores', 'gpu_slot'),
])
if access_key is None:
q = 'query($status:String) {' \
' compute_sessions(status:$status) { $fields }' \
'}'
else:
q = 'query($ak:String, $status:String) {' \
' compute_sessions(access_key:$ak, status:$status) { $fields }' \
'}'
q = q.replace('$fields', ' '.join(item[1] for item in fields))
v = {
'status': status if status != 'ALL' else None,
'ak': access_key,
}
with Session() as session:
try:
resp = session.Admin.query(q, v)
except Exception as e:
print_error(e)
sys.exit(1)
if len(resp['compute_sessions']) == 0:
print('There are no compute sessions currently running.')
return
for item in resp['compute_sessions']:
if 'mem_cur_bytes' in item:
item['mem_cur_bytes'] = round(item['mem_cur_bytes'] / 2 ** 20, 1)
if 'mem_max_bytes' in item:
item['mem_max_bytes'] = round(item['mem_max_bytes'] / 2 ** 20, 1)
if id_only:
for item in resp['compute_sessions']:
print(item['sess_id'])
else:
print(tabulate((item.values() for item in resp['compute_sessions']),
headers=(item[0] for item in fields)))
@admin.command()
@click.argument('sess_id_or_alias', metavar='SESSID')
def session(sess_id_or_alias):
'''
Show detailed information for a running compute session.
SESSID: Session id or its alias.
'''
fields = [
('Session ID', 'sess_id'),
('Role', 'role'),
('Lang/runtime', 'lang'),
('Tag', 'tag'),
('Created At', 'created_at',),
('Terminated At', 'terminated_at'),
('Agent', 'agent'),
('Status', 'status',),
('Status Info', 'status_info',),
('CPU Cores', 'cpu_slot'),
('CPU Used (ms)', 'cpu_used'),
('Total Memory (MiB)', 'mem_slot'),
('Used Memory (MiB)', 'mem_cur_bytes'),
('Max Used Memory (MiB)', 'mem_max_bytes'),
('GPU Cores', 'gpu_slot'),
('Number of Queries', 'num_queries'),
('Network RX Bytes', 'net_rx_bytes'),
('Network TX Bytes', 'net_tx_bytes'),
('IO Read Bytes', 'io_read_bytes'),
('IO Write Bytes', 'io_write_bytes'),
('IO Max Scratch Size', 'io_max_scratch_size'),
('IO Current Scratch Size', 'io_cur_scratch_size'),
]
q = 'query($sess_id:String) {' \
' compute_session(sess_id:$sess_id) { $fields }' \
'}'
q = q.replace('$fields', ' '.join(item[1] for item in fields))
v = {'sess_id': sess_id_or_alias}
with Session() as session:
try:
resp = session.Admin.query(q, v)
except Exception as e:
print_error(e)
sys.exit(1)
if resp['compute_session']['sess_id'] is None:
print('There is no such running compute session.')
return
print('Session detail:\n---------------')
for i, value in enumerate(resp['compute_session'].values()):
if fields[i][1] in ['mem_cur_bytes', 'mem_max_bytes']:
value = round(value / 2 ** 20, 1)
print(fields[i][0] + ': ' + str(value))
|
python
|
import io
import time
from typing import Optional, Tuple
from rich.console import Console
from rich.live import Live
from rich.panel import Panel
from rich.progress import (
BarColumn,
DownloadColumn,
FileSizeColumn,
MofNCompleteColumn,
Progress,
ProgressColumn,
SpinnerColumn,
Task,
TaskID,
TimeElapsedColumn,
TimeRemainingColumn,
)
from rich.table import Table
from rich.text import Text
class ImageDownloadUploadColumn(DownloadColumn):
def render(self, task: Task) -> Text:
if task.total is None or int(task.total) == 1:
return Text("")
else:
return super().render(task)
class TaskStatusColumn(ProgressColumn):
def __init__(self):
super().__init__()
self.dots = 0
self.max_dots = 4
self.update_interval = 1.0
self.last_updated = time.time()
def render(self, task: Task) -> Text:
total = max(0, task.total or 0)
completed = max(0, task.completed)
if completed < total:
now = time.time()
if now - self.last_updated > self.update_interval:
self.last_updated = now
self.dots += 1
if self.dots > self.max_dots:
self.dots = 0
return Text("waiting" + ("." * self.dots) + (" " * (self.max_dots - self.dots)))
else:
return Text("\N{check mark} finalized")
class BufferedReaderWithProgress(io.BufferedReader):
def __init__(self, buffered_reader: io.BufferedReader, progress: Progress, task_id: TaskID):
super().__init__(buffered_reader.raw)
self.buffered_reader = buffered_reader
self.progress = progress
self.task_id = task_id
self.total_read = 0
def peek(self, size: int = 0) -> bytes:
return self.buffered_reader.peek(size)
def read(self, size: Optional[int] = None) -> bytes:
out = self.buffered_reader.read(size)
self.progress.advance(self.task_id, len(out))
self.total_read += len(out)
return out
def read1(self, size: int = -1) -> bytes:
out = self.buffered_reader.read1(size)
self.progress.advance(self.task_id, len(out))
self.total_read += len(out)
return out
def get_experiments_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
MofNCompleteColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
disable=quiet,
)
def get_jobs_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
TaskStatusColumn(),
TimeElapsedColumn(),
disable=quiet,
)
def get_logs_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
FileSizeColumn(),
TimeElapsedColumn(),
disable=quiet,
)
def get_group_experiments_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
FileSizeColumn(),
TimeElapsedColumn(),
disable=quiet,
)
def get_exps_and_jobs_progress(quiet: bool = False) -> Tuple[Live, Progress, Progress]:
experiments_progress = get_experiments_progress(quiet)
jobs_progress = get_jobs_progress(quiet)
progress_table = Table.grid()
progress_table.add_row(
Panel.fit(experiments_progress, title="Overall progress", padding=(1, 2)),
Panel.fit(jobs_progress, title="Task progress", padding=(1, 2)),
)
return (
Live(progress_table, console=None if not quiet else Console(quiet=True)),
experiments_progress,
jobs_progress,
)
def get_dataset_sync_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeElapsedColumn(),
TimeRemainingColumn(),
DownloadColumn(),
disable=quiet,
)
def get_sized_dataset_fetch_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeElapsedColumn(),
TimeRemainingColumn(),
DownloadColumn(),
disable=quiet,
)
def get_unsized_dataset_fetch_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
SpinnerColumn(),
TimeElapsedColumn(),
FileSizeColumn(),
disable=quiet,
)
def get_image_upload_progress(quiet: bool = False) -> Progress:
return Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
ImageDownloadUploadColumn(),
disable=quiet,
)
def get_image_download_progress(quiet: bool = False) -> Progress:
return get_image_upload_progress(quiet)
|
python
|
from django.apps import AppConfig
class CapstoneConfig(AppConfig):
name = 'capstone'
|
python
|
"""
Iterative deepening Depth-first Search specialization of a generic search algorithm.
"""
from typing import Optional
from search.algorithms.search import Node, SearchAlgorithm
from search.space import Space
from search.algorithms.dfs import DFS
import time
from math import sqrt, pi
class IDDFS(DFS):
"""Iterative deepening Depth-first Search."""
def __init__(self, problem):
super().__init__(problem)
self.max_expansions = 2 ** 64
def __str__(self) -> str:
"""The string representation of this Node."""
return "{}[]".format(
self.__class__.__name__,
)
@classmethod
def name(cls) -> str:
"""Returns the name of the Algorithm."""
return "Iterative deepening Depth-first Search"
# pylint: no-self-argument
def create_starting_node(self, state: Space.State) -> Node:
"""Create an Starting Node."""
self.nodes_created += 1
return Node(state, action=None, parent=None)
def reach(self, state: Space.State, action: Space.Action, parent: Node):
"""Reaches a state and updates Open."""
if state in self.open:
# If the state was already in Open, then we discard this new path
# as we don't have a way of telling which one is better.
return
# depth = 0
# node = parent
# while node != None:
# depth += 1
# node = node.parent
# if depth > self.max_depth:
# print("ignored a reach")
# return
self.nodes_created += 1
self.open.insert(Node(state, action, parent))
def _actually_search(self, depth) -> Optional[Node]:
"""Finds a single goal Node."""
node = self.open.pop()
cost = 0
parent = node
while parent != None:
cost += 1
parent = parent.parent
if cost > depth:
for i in self.closed:
if i == node.state:
self.closed.remove(i)
return None
if self.problem.is_goal(node.state):
return node
self.expansions += 1
if self.expansions >= self.expansion_limit:
print(str(self), ": giving up...")
return None
# Expand the node and consider all its neighboring states.
self.closed.add(node.state)
for action, state in self.problem.space.neighbors(node.state):
self.states_generated += 1
if state in self.closed:
# Déjà vu, we reached an expanded state.
continue # Not falling for this (again?).
# print(self.states_reached, self.max_states)
self.states_reached += 1
self.reach(state, action, parent=node)
result = self._actually_search(depth)
if result != None:
return result
return None
def search(self) -> Optional[Node]:
"""Finds a single goal Node."""
self.time_ns = time.perf_counter_ns()
solution = None
depth = 0
while self.expansions < self.expansion_limit and solution == None:
self.open = self.create_open()
self.closed = set()
for start in self.problem.starting_states:
self.open.insert(self.create_starting_node(start))
solution = self._actually_search(depth)
depth += 5
self.time_ns = time.perf_counter_ns() - self.time_ns
return solution
|
python
|
from __clrclasses__.System import Comparison as _n_0_t_0
from __clrclasses__.System import ValueType as _n_0_t_1
from __clrclasses__.System import Predicate as _n_0_t_2
from __clrclasses__.System import Array as _n_0_t_3
from __clrclasses__.System import IDisposable as _n_0_t_4
from __clrclasses__.System import SystemException as _n_0_t_5
from __clrclasses__.System import Exception as _n_0_t_6
from __clrclasses__.System import Converter as _n_0_t_7
from __clrclasses__.System import Action as _n_0_t_8
from __clrclasses__.System import Func as _n_0_t_9
from __clrclasses__.System.Collections import IComparer as _n_1_t_0
from __clrclasses__.System.Collections import IDictionary as _n_1_t_1
from __clrclasses__.System.Collections import IDictionaryEnumerator as _n_1_t_2
from __clrclasses__.System.Collections import ICollection as _n_1_t_3
from __clrclasses__.System.Collections import IEqualityComparer as _n_1_t_4
from __clrclasses__.System.Collections import IEnumerable as _n_1_t_5
from __clrclasses__.System.Collections import IEnumerator as _n_1_t_6
from __clrclasses__.System.Collections import IList as _n_1_t_7
from __clrclasses__.System.Collections.ObjectModel import ReadOnlyCollection as _n_2_t_0
from __clrclasses__.System.Linq import ParallelQuery as _n_3_t_0
from __clrclasses__.System.Linq import IQueryable as _n_3_t_1
from __clrclasses__.System.Linq import IGrouping as _n_3_t_2
from __clrclasses__.System.Linq import IOrderedEnumerable as _n_3_t_3
from __clrclasses__.System.Linq import ILookup as _n_3_t_4
from __clrclasses__.System.Runtime.InteropServices import _Exception as _n_4_t_0
from __clrclasses__.System.Runtime.Serialization import ISerializable as _n_5_t_0
from __clrclasses__.System.Runtime.Serialization import IDeserializationCallback as _n_5_t_1
import typing
T = typing.TypeVar('T')
TKey = typing.TypeVar('TKey')
TValue = typing.TypeVar('TValue')
class Comparer(_n_1_t_0, IComparer[T], typing.Generic[T]):
@property
def Default(self) -> Comparer[T]:"""Default { get; } -> Comparer"""
@staticmethod
def Create(comparison: _n_0_t_0[T]) -> Comparer[T]:...
class Dictionary(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], _n_5_t_0, _n_5_t_1, typing.Generic[TKey, TValue], typing.Iterable[TValue]):
@property
def Comparer(self) -> IEqualityComparer[TKey]:"""Comparer { get; } -> IEqualityComparer"""
def __init__(self, dictionary: IDictionary[TKey, TValue]) -> Dictionary:...
def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IEqualityComparer[TKey]) -> Dictionary:...
def __init__(self, capacity: int, comparer: IEqualityComparer[TKey]) -> Dictionary:...
def __init__(self, comparer: IEqualityComparer[TKey]) -> Dictionary:...
def __init__(self, capacity: int) -> Dictionary:...
def __init__(self) -> Dictionary:...
def ContainsValue(self, value: TValue) -> bool:...
class Enumerator(_n_0_t_1, IEnumerator[KeyValuePair[TKey, TValue]], _n_1_t_2, typing.Generic[TKey, TValue]):
pass
class KeyCollection(ICollection[TKey], _n_1_t_3, IReadOnlyCollection[TKey], typing.Generic[TKey, TValue]):
def __init__(self, dictionary: Dictionary[TKey, TValue]) -> Dictionary.KeyCollection:...
class Enumerator(_n_0_t_1, IEnumerator[TKey], typing.Generic[TKey, TValue]):
pass
class ValueCollection(ICollection[TValue], _n_1_t_3, IReadOnlyCollection[TValue], typing.Generic[TKey, TValue]):
def __init__(self, dictionary: Dictionary[TKey, TValue]) -> Dictionary.ValueCollection:...
class Enumerator(_n_0_t_1, IEnumerator[TValue], typing.Generic[TKey, TValue]):
pass
class EqualityComparer(_n_1_t_4, IEqualityComparer[T], typing.Generic[T]):
@property
def Default(self) -> EqualityComparer[T]:"""Default { get; } -> EqualityComparer"""
class HashSet(ICollection[T], _n_5_t_0, _n_5_t_1, ISet[T], IReadOnlyCollection[T], typing.Generic[T]):
@property
def Comparer(self) -> IEqualityComparer[T]:"""Comparer { get; } -> IEqualityComparer"""
def __init__(self) -> HashSet:...
def __init__(self, comparer: IEqualityComparer[T]) -> HashSet:...
def __init__(self, collection: IEnumerable[T]) -> HashSet:...
def __init__(self, collection: IEnumerable[T], comparer: IEqualityComparer[T]) -> HashSet:...
def __init__(self, capacity: int) -> HashSet:...
def __init__(self, capacity: int, comparer: IEqualityComparer[T]) -> HashSet:...
@staticmethod
def CreateSetComparer() -> IEqualityComparer[HashSet[T]]:...
def RemoveWhere(self, match: _n_0_t_2[T]) -> int:...
def TrimExcess(self):...
def TryGetValue(self, equalValue: T, actualValue: object) -> bool:...
class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]):
pass
class ICollection(IEnumerable[T], typing.Generic[T]):
@property
def Count(self) -> int:"""Count { get; } -> int"""
@property
def IsReadOnly(self) -> bool:"""IsReadOnly { get; } -> bool"""
def Add(self, item: T):...
def Clear(self):...
def Contains(self, item: T) -> bool:...
def CopyTo(self, array: _n_0_t_3[T], arrayIndex: int):...
def Remove(self, item: T) -> bool:...
class IComparer(typing.Generic[T]):
def Compare(self, x: T, y: T) -> int:...
class IDictionary(ICollection[KeyValuePair[TKey, TValue]], typing.Generic[TKey, TValue], typing.Iterable[TValue]):
@property
def Item(self) -> TValue:"""Item { get; set; } -> TValue"""
@property
def Keys(self) -> ICollection[TKey]:"""Keys { get; } -> ICollection"""
@property
def Values(self) -> ICollection[TValue]:"""Values { get; } -> ICollection"""
def ContainsKey(self, key: TKey) -> bool:...
def TryGetValue(self, key: TKey, value: object) -> bool:...
class IEnumerable(_n_1_t_5, typing.Generic[T]):
def Aggregate(self, func: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Aggregate(self, seed: typing.Any, func: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Aggregate(self, seed: typing.Any, func: _n_0_t_9[typing.Any, typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def All(self, predicate: _n_0_t_9[typing.Any, bool]) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Any(self) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Any(self, predicate: _n_0_t_9[typing.Any, bool]) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Append(self, element: typing.Any) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def AsEnumerable(self) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def AsParallel(self) -> _n_3_t_0[typing.Any]:
"""Extension from: System.Linq.ParallelEnumerable"""
def AsQueryable(self) -> _n_3_t_1[typing.Any]:
"""Extension from: System.Linq.Queryable"""
def Average(self) -> float:
"""Extension from: System.Linq.Enumerable"""
def Average(self, selector: _n_0_t_9[typing.Any, int]) -> float:
"""Extension from: System.Linq.Enumerable"""
def Concat(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Contains(self, value: typing.Any) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Contains(self, value: typing.Any, comparer: IEqualityComparer[typing.Any]) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Count(self) -> int:
"""Extension from: System.Linq.Enumerable"""
def Count(self, predicate: _n_0_t_9[typing.Any, bool]) -> int:
"""Extension from: System.Linq.Enumerable"""
def DefaultIfEmpty(self) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def DefaultIfEmpty(self, defaultValue: typing.Any) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Distinct(self) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Distinct(self, comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ElementAt(self, index: int) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def ElementAtOrDefault(self, index: int) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Except(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Except(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def First(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def First(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def FirstOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def FirstOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[_n_3_t_2[typing.Any, typing.Any]]:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def GroupBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def GroupJoin(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def GroupJoin(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, IEnumerable[typing.Any], typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Intersect(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Intersect(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Join(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Join(self, inner: IEnumerable[typing.Any], outerKeySelector: _n_0_t_9[typing.Any, typing.Any], innerKeySelector: _n_0_t_9[typing.Any, typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Last(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Last(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def LastOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def LastOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def LongCount(self) -> int:
"""Extension from: System.Linq.Enumerable"""
def LongCount(self, predicate: _n_0_t_9[typing.Any, bool]) -> int:
"""Extension from: System.Linq.Enumerable"""
def Max(self) -> int:
"""Extension from: System.Linq.Enumerable"""
def Max(self, selector: _n_0_t_9[typing.Any, int]) -> int:
"""Extension from: System.Linq.Enumerable"""
def Min(self) -> int:
"""Extension from: System.Linq.Enumerable"""
def Min(self, selector: _n_0_t_9[typing.Any, int]) -> int:
"""Extension from: System.Linq.Enumerable"""
def OrderBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def OrderBy(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IComparer[typing.Any]) -> _n_3_t_3[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def OrderByDescending(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_3[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def OrderByDescending(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IComparer[typing.Any]) -> _n_3_t_3[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Prepend(self, element: typing.Any) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Reverse(self) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Select(self, selector: _n_0_t_9[typing.Any, typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def SelectMany(self, selector: _n_0_t_9[typing.Any, IEnumerable[typing.Any]]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def SelectMany(self, collectionSelector: _n_0_t_9[typing.Any, int, IEnumerable[typing.Any]], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def SequenceEqual(self, second: IEnumerable[typing.Any]) -> bool:
"""Extension from: System.Linq.Enumerable"""
def SequenceEqual(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> bool:
"""Extension from: System.Linq.Enumerable"""
def Single(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Single(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def SingleOrDefault(self) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def SingleOrDefault(self, predicate: _n_0_t_9[typing.Any, bool]) -> typing.Any:
"""Extension from: System.Linq.Enumerable"""
def Skip(self, count: int) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def SkipWhile(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Sum(self) -> int:
"""Extension from: System.Linq.Enumerable"""
def Sum(self, selector: _n_0_t_9[typing.Any, int]) -> int:
"""Extension from: System.Linq.Enumerable"""
def Take(self, count: int) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def TakeWhile(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToArray(self) -> _n_0_t_3[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> Dictionary[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> Dictionary[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> Dictionary[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToDictionary(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> Dictionary[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToHashSet(self) -> HashSet[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToHashSet(self, comparer: IEqualityComparer[typing.Any]) -> HashSet[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToList(self) -> List[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def ToLookup(self, keySelector: _n_0_t_9[typing.Any, typing.Any], elementSelector: _n_0_t_9[typing.Any, typing.Any], comparer: IEqualityComparer[typing.Any]) -> _n_3_t_4[typing.Any, typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Union(self, second: IEnumerable[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Union(self, second: IEnumerable[typing.Any], comparer: IEqualityComparer[typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Where(self, predicate: _n_0_t_9[typing.Any, bool]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
def Zip(self, second: IEnumerable[typing.Any], resultSelector: _n_0_t_9[typing.Any, typing.Any, typing.Any]) -> IEnumerable[typing.Any]:
"""Extension from: System.Linq.Enumerable"""
class IEnumerator(_n_0_t_4, _n_1_t_6, typing.Generic[T]):
pass
class IEqualityComparer(typing.Generic[T]):
def Equals(self, x: T, y: T) -> bool:...
def GetHashCode(self, obj: T) -> int:...
class IList(ICollection[T], typing.Generic[T], typing.Iterable[T]):
@property
def Item(self) -> T:"""Item { get; set; } -> T"""
def IndexOf(self, item: T) -> int:...
def Insert(self, index: int, item: T):...
def RemoveAt(self, index: int):...
class IReadOnlyCollection(IEnumerable[T], typing.Generic[T]):
@property
def Count(self) -> int:"""Count { get; } -> int"""
class IReadOnlyDictionary(IReadOnlyCollection[KeyValuePair[TKey, TValue]], typing.Generic[TKey, TValue], typing.Iterable[TValue]):
@property
def Item(self) -> TValue:"""Item { get; } -> TValue"""
@property
def Keys(self) -> IEnumerable[TKey]:"""Keys { get; } -> IEnumerable"""
@property
def Values(self) -> IEnumerable[TValue]:"""Values { get; } -> IEnumerable"""
def ContainsKey(self, key: TKey) -> bool:...
def TryGetValue(self, key: TKey, value: object) -> bool:...
class IReadOnlyList(IReadOnlyCollection[T], typing.Generic[T], typing.Iterable[T]):
@property
def Item(self) -> T:"""Item { get; } -> T"""
class ISet(ICollection[T], typing.Generic[T]):
def ExceptWith(self, other: IEnumerable[T]):...
def IntersectWith(self, other: IEnumerable[T]):...
def IsProperSubsetOf(self, other: IEnumerable[T]) -> bool:...
def IsProperSupersetOf(self, other: IEnumerable[T]) -> bool:...
def IsSubsetOf(self, other: IEnumerable[T]) -> bool:...
def IsSupersetOf(self, other: IEnumerable[T]) -> bool:...
def Overlaps(self, other: IEnumerable[T]) -> bool:...
def SetEquals(self, other: IEnumerable[T]) -> bool:...
def SymmetricExceptWith(self, other: IEnumerable[T]):...
def UnionWith(self, other: IEnumerable[T]):...
class KeyNotFoundException(_n_0_t_5, _n_5_t_0, _n_4_t_0):
def __init__(self, message: str, innerException: _n_0_t_6) -> KeyNotFoundException:...
def __init__(self, message: str) -> KeyNotFoundException:...
def __init__(self) -> KeyNotFoundException:...
class KeyValuePair(_n_0_t_1, typing.Generic[TKey, TValue]):
@property
def Key(self) -> TKey:"""Key { get; } -> TKey"""
@property
def Value(self) -> TValue:"""Value { get; } -> TValue"""
def __init__(self, key: TKey, value: TValue) -> KeyValuePair:...
class LinkedList(ICollection[T], _n_1_t_3, IReadOnlyCollection[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]):
@property
def First(self) -> LinkedListNode[T]:"""First { get; } -> LinkedListNode"""
@property
def Last(self) -> LinkedListNode[T]:"""Last { get; } -> LinkedListNode"""
def __init__(self, collection: IEnumerable[T]) -> LinkedList:...
def __init__(self) -> LinkedList:...
def AddAfter(self, node: LinkedListNode[T], value: T) -> LinkedListNode[T]:...
def AddAfter(self, node: LinkedListNode[T], newNode: LinkedListNode[T]):...
def AddBefore(self, node: LinkedListNode[T], newNode: LinkedListNode[T]):...
def AddBefore(self, node: LinkedListNode[T], value: T) -> LinkedListNode[T]:...
def AddFirst(self, node: LinkedListNode[T]):...
def AddFirst(self, value: T) -> LinkedListNode[T]:...
def AddLast(self, node: LinkedListNode[T]):...
def AddLast(self, value: T) -> LinkedListNode[T]:...
def Find(self, value: T) -> LinkedListNode[T]:...
def FindLast(self, value: T) -> LinkedListNode[T]:...
def RemoveFirst(self):...
def RemoveLast(self):...
class Enumerator(_n_0_t_1, IEnumerator[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]):
pass
class LinkedListNode(typing.Generic[T]):
@property
def List(self) -> LinkedList[T]:"""List { get; } -> LinkedList"""
@property
def Next(self) -> LinkedListNode[T]:"""Next { get; } -> LinkedListNode"""
@property
def Previous(self) -> LinkedListNode[T]:"""Previous { get; } -> LinkedListNode"""
@property
def Value(self) -> T:"""Value { get; set; } -> T"""
def __init__(self, value: T) -> LinkedListNode:...
class List(IList[T], _n_1_t_7, IReadOnlyList[T], typing.Generic[T], typing.Iterable[T]):
@property
def Capacity(self) -> int:"""Capacity { get; set; } -> int"""
def __init__(self, collection: IEnumerable[T]) -> List:...
def __init__(self, capacity: int) -> List:...
def __init__(self) -> List:...
def AddRange(self, collection: IEnumerable[T]):...
def AsReadOnly(self) -> _n_2_t_0[T]:...
def BinarySearch(self, item: T, comparer: IComparer[T]) -> int:...
def BinarySearch(self, item: T) -> int:...
def BinarySearch(self, index: int, count: int, item: T, comparer: IComparer[T]) -> int:...
def ConvertAll(self, converter: _n_0_t_7[T, typing.Any]) -> List[typing.Any]:...
def Exists(self, match: _n_0_t_2[T]) -> bool:...
def Find(self, match: _n_0_t_2[T]) -> T:...
def FindAll(self, match: _n_0_t_2[T]) -> List[T]:...
def FindIndex(self, startIndex: int, count: int, match: _n_0_t_2[T]) -> int:...
def FindIndex(self, startIndex: int, match: _n_0_t_2[T]) -> int:...
def FindIndex(self, match: _n_0_t_2[T]) -> int:...
def FindLast(self, match: _n_0_t_2[T]) -> T:...
def FindLastIndex(self, startIndex: int, count: int, match: _n_0_t_2[T]) -> int:...
def FindLastIndex(self, startIndex: int, match: _n_0_t_2[T]) -> int:...
def FindLastIndex(self, match: _n_0_t_2[T]) -> int:...
def ForEach(self, action: _n_0_t_8[T]):...
def GetRange(self, index: int, count: int) -> List[T]:...
def InsertRange(self, index: int, collection: IEnumerable[T]):...
def LastIndexOf(self, item: T, index: int, count: int) -> int:...
def LastIndexOf(self, item: T, index: int) -> int:...
def LastIndexOf(self, item: T) -> int:...
def RemoveAll(self, match: _n_0_t_2[T]) -> int:...
def RemoveRange(self, index: int, count: int):...
def Reverse(self):...
def Reverse(self, index: int, count: int):...
def Sort(self, comparison: _n_0_t_0[T]):...
def Sort(self, index: int, count: int, comparer: IComparer[T]):...
def Sort(self, comparer: IComparer[T]):...
def Sort(self):...
def ToArray(self) -> _n_0_t_3[T]:...
def TrimExcess(self):...
def TrueForAll(self, match: _n_0_t_2[T]) -> bool:...
class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]):
pass
class Queue(IEnumerable[T], _n_1_t_3, IReadOnlyCollection[T], typing.Generic[T]):
def __init__(self, collection: IEnumerable[T]) -> Queue:...
def __init__(self, capacity: int) -> Queue:...
def __init__(self) -> Queue:...
def Clear(self):...
def Contains(self, item: T) -> bool:...
def Dequeue(self) -> T:...
def Enqueue(self, item: T):...
def Peek(self) -> T:...
def ToArray(self) -> _n_0_t_3[T]:...
def TrimExcess(self):...
class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]):
pass
class SortedDictionary(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], typing.Generic[TKey, TValue], typing.Iterable[TValue]):
@property
def Comparer(self) -> IComparer[TKey]:"""Comparer { get; } -> IComparer"""
def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IComparer[TKey]) -> SortedDictionary:...
def __init__(self, dictionary: IDictionary[TKey, TValue]) -> SortedDictionary:...
def __init__(self, comparer: IComparer[TKey]) -> SortedDictionary:...
def __init__(self) -> SortedDictionary:...
def ContainsValue(self, value: TValue) -> bool:...
class Enumerator(_n_0_t_1, IEnumerator[KeyValuePair[TKey, TValue]], _n_1_t_2, typing.Generic[TKey, TValue]):
pass
class KeyCollection(ICollection[TKey], _n_1_t_3, IReadOnlyCollection[TKey], typing.Generic[TKey, TValue]):
def __init__(self, dictionary: SortedDictionary[TKey, TValue]) -> SortedDictionary.KeyCollection:...
class Enumerator(_n_0_t_1, IEnumerator[TKey], typing.Generic[TKey, TValue]):
pass
class ValueCollection(ICollection[TValue], _n_1_t_3, IReadOnlyCollection[TValue], typing.Generic[TKey, TValue]):
def __init__(self, dictionary: SortedDictionary[TKey, TValue]) -> SortedDictionary.ValueCollection:...
class Enumerator(_n_0_t_1, IEnumerator[TValue], typing.Generic[TKey, TValue]):
pass
class SortedList(IDictionary[TKey, TValue], _n_1_t_1, IReadOnlyDictionary[TKey, TValue], typing.Generic[TKey, TValue], typing.Iterable[TValue]):
@property
def Capacity(self) -> int:"""Capacity { get; set; } -> int"""
@property
def Comparer(self) -> IComparer[TKey]:"""Comparer { get; } -> IComparer"""
def __init__(self, dictionary: IDictionary[TKey, TValue], comparer: IComparer[TKey]) -> SortedList:...
def __init__(self, dictionary: IDictionary[TKey, TValue]) -> SortedList:...
def __init__(self, capacity: int, comparer: IComparer[TKey]) -> SortedList:...
def __init__(self, comparer: IComparer[TKey]) -> SortedList:...
def __init__(self, capacity: int) -> SortedList:...
def __init__(self) -> SortedList:...
def ContainsValue(self, value: TValue) -> bool:...
def IndexOfKey(self, key: TKey) -> int:...
def IndexOfValue(self, value: TValue) -> int:...
def RemoveAt(self, index: int):...
def TrimExcess(self):...
class SortedSet(ISet[T], _n_1_t_3, _n_5_t_0, _n_5_t_1, IReadOnlyCollection[T], typing.Generic[T]):
@property
def Comparer(self) -> IComparer[T]:"""Comparer { get; } -> IComparer"""
@property
def Max(self) -> T:"""Max { get; } -> T"""
@property
def Min(self) -> T:"""Min { get; } -> T"""
def __init__(self, comparer: IComparer[T]) -> SortedSet:...
def __init__(self, collection: IEnumerable[T], comparer: IComparer[T]) -> SortedSet:...
def __init__(self, collection: IEnumerable[T]) -> SortedSet:...
def __init__(self) -> SortedSet:...
@staticmethod
def CreateSetComparer(memberEqualityComparer: IEqualityComparer[T]) -> IEqualityComparer[SortedSet[T]]:...
@staticmethod
def CreateSetComparer() -> IEqualityComparer[SortedSet[T]]:...
def GetViewBetween(self, lowerValue: T, upperValue: T) -> SortedSet[T]:...
def RemoveWhere(self, match: _n_0_t_2[T]) -> int:...
def Reverse(self) -> IEnumerable[T]:...
def TryGetValue(self, equalValue: T, actualValue: object) -> bool:...
class Enumerator(_n_0_t_1, IEnumerator[T], _n_5_t_0, _n_5_t_1, typing.Generic[T]):
pass
class Stack(IEnumerable[T], _n_1_t_3, IReadOnlyCollection[T], typing.Generic[T]):
def __init__(self, collection: IEnumerable[T]) -> Stack:...
def __init__(self, capacity: int) -> Stack:...
def __init__(self) -> Stack:...
def Clear(self):...
def Contains(self, item: T) -> bool:...
def Peek(self) -> T:...
def Pop(self) -> T:...
def Push(self, item: T):...
def ToArray(self) -> _n_0_t_3[T]:...
def TrimExcess(self):...
class Enumerator(_n_0_t_1, IEnumerator[T], typing.Generic[T]):
pass
|
python
|
from pytorch.schedulers.imports import *
from system.imports import *
@accepts(dict, post_trace=True)
@TraceFunction(trace_args=False, trace_rv=False)
def load_scheduler(system_dict):
learning_rate_scheduler = system_dict["local"]["learning_rate_scheduler"];
optimizer = system_dict["local"]["optimizer"];
if(learning_rate_scheduler == "steplr"):
system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.StepLR(
optimizer,
system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["step_size"],
gamma=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"],
last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]);
elif(learning_rate_scheduler == "multisteplr"):
system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["milestones"],
gamma=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"],
last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]);
elif(learning_rate_scheduler == "exponentiallr"):
system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.ExponentialLR(
optimizer,
system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["gamma"],
last_epoch=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["last_epoch"]);
elif(learning_rate_scheduler == "reduceonplateaulr"):
system_dict["local"]["learning_rate_scheduler"] = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer,
mode=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["mode"],
factor=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["factor"],
patience=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["patience"],
verbose=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["verbose"],
threshold=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["threshold"],
threshold_mode=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["threshold_mode"],
cooldown=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["cooldown"],
min_lr=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["min_lr"],
eps=system_dict["hyper-parameters"]["learning_rate_scheduler"]["params"]["epsilon"]);
return system_dict;
|
python
|
#!/usr/local/bin/python3
# A wrapper to test query v2 API
# Advantage: directly replace the `query` variable with any SQL string
# to run the test. In command line, the SQL has to be in one-line
# to ensure nothing wrong, which is cumbersome.
import subprocess
import sys
import os
MY_ENV = os.environ.copy()
COMMAND_TEMPLATE = """
aws timestream-query query-v2 --query-string "{}" --endpoint-url "https://gamma-query-cell2.timestream.us-west-2.amazonaws.com" --region us-west-2
"""
query = """
SELECT region
FROM
(VALUES
('abc')
)
AS testtb(region)
"""
def main(argv=sys.argv[1:]):
"""main program
"""
try:
cmd = COMMAND_TEMPLATE.format(query)
print(f'test query: {query}')
popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
universal_newlines=True, env=MY_ENV)
for stdout_line in iter(popen.stdout.readline, ""):
print(f'got line from subprocess: {stdout_line}')
popen.stdout.close()
return_code = popen.wait()
except(KeyboardInterrupt, EOFError):
print()
print('[Interrupted.]')
return_code = 130
return return_code
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
python
|
from sys import stdin
def main():
for s in sorted(list(map(int, stdin.readline().strip())), reverse=True):
print("", end="".join(str(s)))
if __name__ == "__main__":
main()
|
python
|
"""
Description:
Defines the QAOACustom and CircuitSamplerCustom classes that replace the
qiskit QAOA and CircuitSampler classes respectively.
It is more easily customised than qiskit's built in ones and includes a variety of helper methods.
Author: Gary Mooney
Adapted from Qiskit 0.26.2 documentation
Example 1: Full usage example.
from QAOAEx import (QAOACustom, convert_to_fourier_point, print_qaoa_solutions,
get_quadratic_program_from_ising_hamiltonian_terms,
output_ising_graph, get_ising_graph_from_ising_hamiltonian_terms,
convert_from_fourier_point)
backend = Aer.get_backend('aer_simulator_matrix_product_state')
quantum_instance = QuantumInstance(backend, shots=8192)
optimizer = NELDER_MEAD()
couplings = [(0, 1, -1.0), (0, 2, 1.0), (1, 2, 1.0), (2, 3, -1.0), (0, 3, 0.5)] # formatted as List[Tuple[int, int, float]]
local_fields = {0: 0.2, 1: -0.3, 2: 0.0, 3: 0.5} # formatted as Mapping[int, float]
constant_term = 1.0 # formatted as float
quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms(couplings = couplings,
local_fields = local_fields,
constant_term = constant_term,
output_ising_graph_filename = "example-ising_graph")
qaoa_instance = QAOACustom(quantum_instance = quantum_instance,
reps = 2,
force_shots = False,
optimizer = optimizer,
qaoa_name = "example_qaoa")
operator, offset = quadratic_program.to_ising()
initial_point = [0.40784, 0.73974, -0.53411, -0.28296]
print()
print("Solving QAOA...")
qaoa_results = qaoa_instance.solve(operator, initial_point)
qaoa_results_eigenstate = qaoa_results.eigenstate
print("optimal_value:", qaoa_results.optimal_value)
print("optimal_parameters:", qaoa_results.optimal_parameters)
print("optimal_point:", qaoa_results.optimal_point)
print("optimizer_evals:", qaoa_results.optimizer_evals)
solutions = qaoa_instance.get_optimal_solutions_from_statevector(qaoa_results_eigenstate, quadratic_program)
print_qaoa_QuantumCircuit
# initial Fourier space point, will be converted to a typical point using
# 'convert_from_fourier_point' as per previous line
initial_fourier_point = [0.5, 0.7]
# bounds used for the optimiser
bounds = [(-1, 1)] * len(initial_fourier_point)
qaoa_results = qaoa_instance.solve(operator, initial_fourier_point, bounds)
optimal_parameterised_point = qaoa_instance.latest_parameterised_point
Example 3: Post process raw data. This is how QREM could be applied.
Before the line 'qaoa_results = qaoa_instance.solve(operator, initial_point)'
in Example 1, add the following.
# Define a method to process the counts dict. In this case it simply calculates and prints the shot counts.
def print_shot_count(raw_counts_data):
shot_count = None
if len(raw_counts_data) > 0:
if isinstance(raw_counts_data[0], dict):
shot_count = sum(raw_counts_data[0].values())
elif isinstance(raw_counts_data[0], list) and len(raw_counts_data[0]) > 0:
shot_count = sum(raw_counts_data[0][0].values())
else:
raise Exception("Error: Wrong format 'raw_counts_data', execting List[Dict] or List[List[Dict]]")
print("Raw data shot count:", shot_count)
return raw_counts_data
# set the raw data processing method. If using a statevector simulator
# with force_shot = False, (in qaoa_instance) then raw processing will not be used.
qaoa_instance.set_post_process_raw_data(print_shot_count)
"""
import logging
import time as time
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple, Union
import matplotlib.pyplot as plt
import networkx as nx # tool to handle general Graphs
import numpy as np
import math as math
from qiskit import QiskitError
from qiskit.algorithms import QAOA
from qiskit.algorithms.exceptions import AlgorithmError
from qiskit.algorithms.minimum_eigen_solvers.minimum_eigen_solver import (
MinimumEigensolverResult,
)
from qiskit.algorithms.minimum_eigen_solvers.vqe import VQE
from qiskit.algorithms.optimizers import Optimizer
from qiskit.algorithms.variational_algorithm import (
VariationalAlgorithm,
VariationalResult,
)
from qiskit.circuit import ClassicalRegister, Parameter, QuantumCircuit
from qiskit.circuit.library.n_local.qaoa_ansatz import QAOAAnsatz
# from qiskit.algorithms.minimum_eigen_solvers
from qiskit.opflow import (
CircuitSampler,
CircuitStateFn,
DictStateFn,
ExpectationBase,
I,
OperatorBase,
StateFn,
)
from qiskit.opflow.exceptions import OpflowError
from qiskit.opflow.gradients import GradientBase
from qiskit.providers import Backend, BaseBackend
from qiskit.quantum_info import Statevector
from qiskit.tools.visualization import circuit_drawer
from qiskit.utils import algorithm_globals
from qiskit.utils.backend_utils import is_aer_provider
from qiskit.utils.quantum_instance import QuantumInstance
from qiskit.utils.validation import validate_min
from qiskit.visualization import plot_histogram
from qiskit_optimization import QuadraticProgram
logger = logging.getLogger(__name__)
###############
### Classes
###############
class CircuitSamplerCustom(CircuitSampler):
# a function pointer that processes returned results from execution when sample_circuits is called.
# post_process_raw_data(Result) -> Result
_post_process_raw_data: Optional[
Callable[
[Union[List[Dict[str, int]], List[List[Dict[str, int]]]]],
Union[List[Dict[str, int]], List[List[Dict[str, int]]]],
]
] = None
_shots = None
_log_text = print
_force_shots = False
_sampler_name = ""
_output_circuit_when_sample = False
def __init__(
self,
backend: Union[Backend, BaseBackend, QuantumInstance],
statevector: Optional[bool] = None,
param_qobj: bool = False,
attach_results: bool = False,
caching: str = 'last',
sampler_name: str = "",
force_shots: bool = False,
output_circuit_when_sample: bool = False,
log_text: Optional[Callable[..., Any]] = print,
) -> None:
"""
Args:
backend: The quantum backend or QuantumInstance to use to sample the circuits.
statevector: If backend is a statevector backend, whether to replace the
CircuitStateFns with DictStateFns (from the counts) or VectorStateFns (from the
statevector). ``None`` will set this argument automatically based on the backend.
param_qobj: Whether to use Aer's parameterized Qobj capability to avoid re-assembling
the circuits.
attach_results: Whether to attach the data from the backend ``Results`` object for
a given ``CircuitStateFn``` to an ``execution_results`` field added the converted
``DictStateFn`` or ``VectorStateFn``.
caching: The caching strategy. Can be `'last'` (default) to store the last operator
that was converted, set to `'all'` to cache all processed operators.
sampler_name: Name used when outputting text or files to help identify CircuitSamplerCustom instance.
force_shots: If quantum instance returns a statevector, then convert into shots instead.
output_circuit_when_sample: Whether to output circuit using circuit_drawer whenever circuit is sampled.
log_text: Used for text output, replacement to the default print method to make logging easy.
If None, no text output can occur.
Raises:
ValueError: Set statevector or param_qobj True when not supported by backend.
"""
super().__init__(
backend=backend,
statevector=statevector,
param_qobj=param_qobj,
attach_results=attach_results,
caching=caching,
)
self._sampler_name = sampler_name
self._log_text = log_text
# determines whether to use the statevectors directly from simulations is available
# If true, counts are sampled from statevector (default 8192)
self._force_shots = force_shots
self._output_circuit_when_sample = output_circuit_when_sample
def set_post_process_raw_data(
self,
post_process_raw_data_method: Optional[
Callable[
[Union[List[Dict[str, int]], List[List[Dict[str, int]]]]],
Union[List[Dict[str, int]], List[List[Dict[str, int]]]],
]
],
) -> None:
"""Uses the specified method to process the raw sampled data executed on the backened whenever circuits are sampled.
Args:
post_process_raw_data_method: The method to process the data.
Inputs a list f counts dicts List[Dict[str, int]] and outputs the processed list of count dicts List[Dict[str, int]].
The data could potentially be formatted as a list of a list of dictionaries List[List[Dict[str, int]]]. However, this
will likely not happen withouth modifying QAOA to do so.
Each dictionary has the counts for each qubit with the keys containing a string in binary format and separated
according to the registers in circuit (e.g. ``0100 1110``). The string is little-endian (cr[0] on the right hand side).
However there will likely only be a single register without modifying QAOA, so the state bitstring should have no spaces.
"""
self._post_process_raw_data = post_process_raw_data_method
def sample_circuits(
self,
circuit_sfns: Optional[List[CircuitStateFn]] = None,
param_bindings: Optional[List[Dict[Parameter, float]]] = None,
) -> Dict[int, List[StateFn]]:
r"""
Samples the CircuitStateFns and returns a dict associating their ``id()`` values to their
replacement DictStateFn or VectorStateFn. If param_bindings is provided,
the CircuitStateFns are broken into their parameterizations, and a list of StateFns is
returned in the dict for each circuit ``id()``. Note that param_bindings is provided here
in a different format than in ``convert``, and lists of parameters within the dict is not
supported, and only binding dicts which are valid to be passed into Terra can be included
in this list. (Overides method)
Args:
circuit_sfns: The list of CircuitStateFns to sample.
param_bindings: The parameterizations to bind to each CircuitStateFn.
Returns:
The dictionary mapping ids of the CircuitStateFns to their replacement StateFns.
Raises:
OpflowError: if extracted circuits are empty.
"""
if not circuit_sfns and not self._transpiled_circ_cache:
raise OpflowError('CircuitStateFn is empty and there is no cache.')
#############
# NOTE:
# Can modify circuits before execution here.
# can even manually transpile to specific qubit layout.
#############
if circuit_sfns:
self._transpiled_circ_templates = None
if self._statevector:
circuits = [op_c.to_circuit(meas=False) for op_c in circuit_sfns]
else:
circuits = [op_c.to_circuit(meas=True) for op_c in circuit_sfns]
####### Saving circuit
if self._output_circuit_when_sample == True:
filename = "quantum-circuit-" + self._sampler_name + "-params"
for _, value in param_bindings[0].items():
filename += "-" + str(int(1000 * value))
if self._log_text != None:
self._log_text("Saving circuit '" + filename + "'...")
fig = circuit_drawer(circuits[0], filename=filename, output='mpl')
plt.close(fig)
#######
try:
self._transpiled_circ_cache = self.quantum_instance.transpile(circuits)
except QiskitError:
logger.debug(
r'CircuitSampler failed to transpile circuits with unbound '
r'parameters. Attempting to transpile only when circuits are bound '
r'now, but this can hurt performance due to repeated transpilation.'
)
self._transpile_before_bind = False
self._transpiled_circ_cache = circuits
else:
circuit_sfns = list(self._circuit_ops_cache.values())
if param_bindings is not None:
# if fourier method, then convert param_bindings to another param_bindings, usually larger.
if self._param_qobj:
start_time = time.time()
ready_circs = self._prepare_parameterized_run_config(param_bindings)
end_time = time.time()
logger.debug(
'Parameter conversion %.5f (ms)', (end_time - start_time) * 1000
)
else:
start_time = time.time()
ready_circs = [
circ.assign_parameters(
CircuitSamplerCustom._filter_params(circ, binding)
)
for circ in self._transpiled_circ_cache
for binding in param_bindings
]
end_time = time.time()
logger.debug(
'Parameter binding %.5f (ms)', (end_time - start_time) * 1000
)
else:
ready_circs = self._transpiled_circ_cache
results = self.quantum_instance.execute(
ready_circs, had_transpiled=self._transpile_before_bind
)
if param_bindings is not None and self._param_qobj:
self._clean_parameterized_run_config()
# Wipe parameterizations, if any
# self.quantum_instance._run_config.parameterizations = None
#############
# NOTE:
# Can apply QREM here. But we need to know which qubits were used in order to apply...
# results.get_counts(circ_index)
# will need to convert results in case it's a statevector.
#############
counts_dicts = []
for i, op_c in enumerate(circuit_sfns):
# Taking square root because we're replacing a statevector
# representation of probabilities.
reps = len(param_bindings) if param_bindings is not None else 1
c_statefns = []
for j in range(reps):
circ_index = (i * reps) + j
# counts_dicts[circ_index] = results.get_counts(circ_index)
circ_results = results.data(circ_index)
# statevector = results.get_statevector(circ_index)
if 'expval_measurement' in circ_results.get('snapshots', {}).get(
'expectation_value', {}
):
if self.quantum_instance.run_config.shots != None:
shots = self.quantum_instance.run_config.shots
else:
shots = 8192
counts_dicts.append(
Statevector(results.get_statevector(circ_index)).sample_counts(
shots
)
)
# print("DEBUG: From statevector (1): " + str(shots) + " shots")
elif self._statevector:
if self.quantum_instance.run_config.shots != None:
shots = self.quantum_instance.run_config.shots
else:
shots = 8192
counts_dicts.append(
Statevector(results.get_statevector(circ_index)).sample_counts(
shots
)
)
# print("counts_dicts[circ_index]", counts_dicts[circ_index])
# if self._force_shots == True:
# print("DEBUG: From statevector (2): " + str(shots) + " shots")
# else:
# print("DEBUG: From statevector (2) - using statevector")
else:
counts_dicts.append(results.get_counts(circ_index))
# print("counts_dicts[circ_index]", counts_dicts[circ_index])
shots = 0
for count in counts_dicts[circ_index].values():
shots += count
# print("DEBUG: From counts: " + str(shots) + " shots")
# print("counts_dicts:", counts_dicts)
#############
### Post process raw counts
### NOTE: counts_dicts could be formatted as
### List[Dict[str, int]] or List[List[Dict[str, int]]]: a list of dictionaries or a list of
### a list of dictionaries. A dictionary has the counts for each qubit with
### the keys containing a string in binary format and separated
### according to the registers in circuit (e.g. ``0100 1110``).
### The string is little-endian (cr[0] on the right hand side).
###
### However the format will most likely always be List[Dict[str, int]]
### with a single register, so the state bitstring will have no spaces.
#############
counts_dicts_new = None
if self._post_process_raw_data != None:
if (
self._force_shots == False
and self._statevector
and self._log_text != None
):
self._log_text(
"WARNING: post_process_raw_data method cannot execute on statevector, set force_shots to True or don't use the stavevector simulator."
)
counts_dicts_new = self._post_process_raw_data(counts_dicts)
else:
counts_dicts_new = counts_dicts
#############
sampled_statefn_dicts = {}
for i, op_c in enumerate(circuit_sfns):
# Taking square root because we're replacing a statevector
# representation of probabilities.
reps = len(param_bindings) if param_bindings is not None else 1
c_statefns = []
for j in range(reps):
circ_index = (i * reps) + j
circ_results = results.data(circ_index)
if self._force_shots == False:
if 'expval_measurement' in circ_results.get('snapshots', {}).get(
'expectation_value', {}
):
snapshot_data = results.data(circ_index)['snapshots']
avg = snapshot_data['expectation_value']['expval_measurement'][
0
]['value']
if isinstance(avg, (list, tuple)):
# Aer versions before 0.4 use a list snapshot format
# which must be converted to a complex value.
avg = avg[0] + 1j * avg[1]
# Will be replaced with just avg when eval is called later
num_qubits = circuit_sfns[0].num_qubits
result_sfn = (
DictStateFn(
'0' * num_qubits, is_measurement=op_c.is_measurement
)
* avg
)
elif self._statevector:
result_sfn = StateFn(
op_c.coeff * results.get_statevector(circ_index),
is_measurement=op_c.is_measurement,
)
else:
shots = self.quantum_instance._run_config.shots
result_sfn = StateFn(
{
b: (v / shots) ** 0.5 * op_c.coeff
for (b, v) in counts_dicts_new[circ_index].items()
},
is_measurement=op_c.is_measurement,
)
else:
# result_sfn = ConvertCountsToStateFunction(counts_dicts_new[circ_index], shots=None, op_c=op_c)
shots = 0
for _, count in counts_dicts_new[circ_index].items():
shots += count
result_sfn = StateFn(
{
b: (v / shots) ** 0.5 * op_c.coeff
for (b, v) in counts_dicts_new[circ_index].items()
},
is_measurement=op_c.is_measurement,
)
# use statefn instead of dictstatefn
if self._statevector:
result_sfn = result_sfn.to_matrix_op(massive=True)
if self._attach_results:
result_sfn.execution_results = circ_results
c_statefns.append(result_sfn)
sampled_statefn_dicts[id(op_c)] = c_statefns
return sampled_statefn_dicts
class QAOACustom(QAOA):
# a function pointer that processes returned results from execution when sample_circuits is called.
# post_process_raw_data(Result) -> Result
_post_process_raw_data: Optional[
Callable[
[Union[List[Dict[str, int]], List[List[Dict[str, int]]]]],
Union[List[Dict[str, int]], List[List[Dict[str, int]]]],
]
] = None
_qaoa_name = ""
_force_shots = False
_log_text = print
_output_circuit_when_sample = False
_reps = 1
_mixer = None
_initial_state = None
_optimiser_parameter_bounds = None
_parameterise_point_for_energy_evaluation: Callable[
[Union[List[float], np.ndarray], int], List[float]
] = None
# After solving/optimising using a custom parameterisation, the member 'latest_parameterised_point' should
# contain the solution parameterised point returned by the optimiser.
latest_parameterised_point = None
def __init__(
self,
optimizer: Optimizer = None,
reps: int = 1,
initial_state: Optional[QuantumCircuit] = None,
mixer: Union[QuantumCircuit, OperatorBase] = None,
initial_point: Union[List[float], np.ndarray, None] = None,
gradient: Optional[
Union[GradientBase, Callable[[Union[np.ndarray, List]], List]]
] = None,
expectation: Optional[ExpectationBase] = None,
include_custom: bool = False,
max_evals_grouped: int = 1,
callback: Optional[Callable[[int, np.ndarray, float, float], None]] = None,
quantum_instance: Optional[Union[QuantumInstance, BaseBackend, Backend]] = None,
qaoa_name: str = "",
force_shots: bool = False,
output_circuit_when_sample: bool = False,
log_text: Optional[Callable[..., Any]] = print,
) -> None:
"""
Args:
optimizer: A classical optimizer.
reps: the integer parameter :math:`p` as specified in https://arxiv.org/abs/1411.4028,
Has a minimum valid value of 1.
initial_state: An optional initial state to prepend the QAOA circuit with
mixer: the mixer Hamiltonian to evolve with or a custom quantum circuit. Allows support
of optimizations in constrained subspaces as per https://arxiv.org/abs/1709.03489
as well as warm-starting the optimization as introduced
in http://arxiv.org/abs/2009.10095.
initial_point: An optional initial point (i.e. initial parameter values)
for the optimizer. If ``None`` then it will simply compute a random one.
QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]).
gradient: An optional gradient operator respectively a gradient function used for
optimization.
expectation: The Expectation converter for taking the average value of the
Observable over the ansatz state function. When None (the default) an
:class:`~qiskit.opflow.expectations.ExpectationFactory` is used to select
an appropriate expectation based on the operator and backend. When using Aer
qasm_simulator backend, with paulis, it is however much faster to leverage custom
Aer function for the computation but, although VQE performs much faster
with it, the outcome is ideal, with no shot noise, like using a state vector
simulator. If you are just looking for the quickest performance when choosing Aer
qasm_simulator and the lack of shot noise is not an issue then set `include_custom`
parameter here to True (defaults to False).
include_custom: When `expectation` parameter here is None setting this to True will
allow the factory to include the custom Aer pauli expectation.
max_evals_grouped: Max number of evaluations performed simultaneously. Signals the
given optimizer that more than one set of parameters can be supplied so that
potentially the expectation values can be computed in parallel. Typically this is
possible when a finite difference gradient is used by the optimizer such that
multiple points to compute the gradient can be passed and if computed in parallel
improve overall execution time. Ignored if a gradient operator or function is
given.
callback: a callback that can access the intermediate data during the optimization.
Four parameter values are passed to the callback as follows during each evaluation
by the optimizer for its current set of parameters as it works towards the minimum.
These are: the evaluation count, the optimizer parameters for the
ansatz, the evaluated mean and the evaluated standard deviation.
quantum_instance: Quantum Instance or Backend
qaoa_name: Name to identify this QAOAEx instance when logging or outputting files.
force_shots: If quantum instance returns a statevector, then convert into shots instead.
output_circuit_when_sample: Whether to output circuit using circuit_drawer whenever circuit is sampled.
log_text: Used for text output, replacement to the default print method to make logging easy.
If None, no text output can occur.
"""
validate_min('reps', reps, 1)
self._qaoa_name = qaoa_name
self._reps = reps
self._mixer = mixer
self._initial_state = initial_state
self._force_shots = force_shots
self._log_text = log_text
self._output_circuit_when_sample = output_circuit_when_sample
# VQE will use the operator setter, during its constructor, which is overridden below and
# will cause the var form to be built
super(QAOA, self).__init__(
ansatz=None,
optimizer=optimizer,
initial_point=initial_point,
gradient=gradient,
expectation=expectation,
include_custom=include_custom,
max_evals_grouped=max_evals_grouped,
callback=callback,
quantum_instance=quantum_instance,
)
@VariationalAlgorithm.quantum_instance.setter
def quantum_instance(
self, quantum_instance: Union[QuantumInstance, BaseBackend, Backend]
) -> None:
"""set quantum_instance. (Overides method)"""
super(VQE, self.__class__).quantum_instance.__set__(self, quantum_instance)
self._circuit_sampler = CircuitSamplerCustom(
self._quantum_instance,
param_qobj=is_aer_provider(self._quantum_instance.backend),
sampler_name=self._qaoa_name,
force_shots=self._force_shots,
output_circuit_when_sample=self._output_circuit_when_sample,
log_text=self._log_text,
)
self._circuit_sampler.set_post_process_raw_data(self._post_process_raw_data)
def find_minimum(
self,
initial_point: Optional[np.ndarray] = None,
ansatz: Optional[QuantumCircuit] = None,
cost_fn: Optional[Callable] = None,
optimizer: Optional[Optimizer] = None,
gradient_fn: Optional[Callable] = None,
) -> 'VariationalResult':
"""Optimize to find the minimum cost value.
Args:
initial_point: If not `None` will be used instead of any initial point supplied via
constructor. If `None` and `None` was supplied to constructor then a random
point will be used if the optimizer requires an initial point.
ansatz: If not `None` will be used instead of any ansatz supplied via constructor.
cost_fn: If not `None` will be used instead of any cost_fn supplied via
constructor.
optimizer: If not `None` will be used instead of any optimizer supplied via
constructor.
gradient_fn: Optional gradient function for optimizer
Returns:
dict: Optimized variational parameters, and corresponding minimum cost value.
Raises:
ValueError: invalid input
"""
initial_point = (
initial_point if initial_point is not None else self.initial_point
)
ansatz = ansatz if ansatz is not None else self.ansatz
cost_fn = cost_fn if cost_fn is not None else self._cost_fn
optimizer = optimizer if optimizer is not None else self.optimizer
if ansatz is None:
raise ValueError('Ansatz neither supplied to constructor nor find minimum.')
if cost_fn is None:
raise ValueError(
'Cost function neither supplied to constructor nor find minimum.'
)
if optimizer is None:
raise ValueError(
'Optimizer neither supplied to constructor nor find minimum.'
)
nparms = ansatz.num_parameters
if self._optimiser_parameter_bounds == None:
if (
hasattr(ansatz, 'parameter_bounds')
and ansatz.parameter_bounds is not None
):
bounds = ansatz.parameter_bounds
else:
bounds = [(None, None)] * len(self.initial_point)
else:
bounds = self._optimiser_parameter_bounds
# if initial_point is not None and len(initial_point) != nparms:
# raise ValueError(
# 'Initial point size {} and parameter size {} mismatch'.format(
# len(initial_point), nparms))
if len(bounds) != len(self.initial_point):
bounds = [(None, None)] * len(self.initial_point)
print(
"WARNING: Ansatz bounds size does not match parameter size (len(self.initial_point)), setting bounds to (None, None)"
)
# raise ValueError('Ansatz bounds size does not match parameter size (len(self.initial_point))')
# If *any* value is *equal* in bounds array to None then the problem does *not* have bounds
problem_has_bounds = not np.any(np.equal(bounds, None))
# Check capabilities of the optimizer
if problem_has_bounds:
if not optimizer.is_bounds_supported:
raise ValueError(
'Problem has bounds but optimizer does not support bounds'
)
else:
if optimizer.is_bounds_required:
raise ValueError(
'Problem does not have bounds but optimizer requires bounds'
)
if initial_point is not None:
if not optimizer.is_initial_point_supported:
raise ValueError('Optimizer does not support initial point')
else:
if optimizer.is_initial_point_required:
if hasattr(ansatz, 'preferred_init_points'):
# Note: default implementation returns None, hence check again after below
initial_point = ansatz.preferred_init_points
if initial_point is None: # If still None use a random generated point
low = [(l if l is not None else -2 * np.pi) for (l, u) in bounds]
high = [(u if u is not None else 2 * np.pi) for (l, u) in bounds]
initial_point = algorithm_globals.random.uniform(low, high)
start = time.time()
if not optimizer.is_gradient_supported: # ignore the passed gradient function
gradient_fn = None
else:
if not gradient_fn:
gradient_fn = self._gradient
logger.info(
'Starting optimizer.\nbounds=%s\ninitial point=%s', bounds, initial_point
)
opt_params, opt_val, num_optimizer_evals = optimizer.optimize(
len(self.initial_point),
cost_fn,
variable_bounds=bounds,
initial_point=initial_point,
gradient_function=gradient_fn,
)
if self._parameterise_point_for_energy_evaluation != None:
self.latest_parameterised_point = (
self._parameterise_point_for_energy_evaluation(opt_params, nparms)
)
eval_time = time.time() - start
result = VariationalResult()
result.optimizer_evals = num_optimizer_evals
result.optimizer_time = eval_time
result.optimal_value = opt_val
result.optimal_point = opt_params
result.optimal_parameters = dict(zip(self._ansatz_params, opt_params))
return result
def eigenvector_to_solutions(
self,
eigenvector: Union[dict, np.ndarray, StateFn],
quadratic_program: QuadraticProgram,
min_probability: float = 1e-6,
) -> List[Tuple[str, float, float]]:
"""Convert the eigenvector to a list of solution 3-tuples (bitstrings, quadratic_function_objective_value, probability). (Overides method)
Args:
eigenvector: The eigenvector from which the solution states are extracted.
quadratic_program: The quadatic program to evaluate at the bitstring.
min_probability: Only consider states where the amplitude exceeds this threshold.
Returns:
A list with elements for each computational basis state contained in the eigenvector.
Each element is a 3-tuple:
(state as bitstring (str),
quadatic program evaluated at that bitstring (float),
probability of sampling this bitstring from the eigenvector (float)
).
Raises:
TypeError: If the type of eigenvector is not supported.
"""
if isinstance(eigenvector, DictStateFn):
eigenvector = {
bitstr: val ** 2 for (bitstr, val) in eigenvector.primitive.items()
}
elif isinstance(eigenvector, StateFn):
eigenvector = eigenvector.to_matrix()
solutions = []
if isinstance(eigenvector, dict):
# iterate over all samples
for bitstr, amplitude in eigenvector.items():
sampling_probability = amplitude * amplitude
# add the bitstring, if the sampling probability exceeds the threshold
if sampling_probability > 0:
if sampling_probability >= min_probability:
# I've reversed the qubits here, I think they were the wrong order.
value = quadratic_program.objective.evaluate(
[int(bit) for bit in bitstr[::-1]]
)
solutions += [(bitstr[::-1], value, sampling_probability)]
elif isinstance(eigenvector, np.ndarray):
num_qubits = int(np.log2(eigenvector.size))
probabilities = np.abs(eigenvector * eigenvector.conj())
# iterate over all states and their sampling probabilities
for i, sampling_probability in enumerate(probabilities):
# add the i-th state if the sampling probability exceeds the threshold
if sampling_probability > 0:
if sampling_probability >= min_probability:
bitstr = '{:b}'.format(i).rjust(num_qubits, '0')[::-1]
value = quadratic_program.objective.evaluate(
[int(bit) for bit in bitstr]
)
solutions += [(bitstr, value, sampling_probability)]
else:
raise TypeError(
'Unsupported format of eigenvector. Provide a dict or numpy.ndarray.'
)
return solutions
def _energy_evaluation(
self, parameters: Union[List[float], np.ndarray]
) -> Union[float, List[float]]:
"""Evaluate energy at given parameters for the ansatz. This is the objective function
to be passed to the optimizer that is used for evaluation. (Overides method)
Args:
parameters: The parameters for the ansatz.
Returns:
Energy of the hamiltonian of each parameter.
Raises:
RuntimeError: If the ansatz has no parameters.
"""
num_parameters = self.ansatz.num_parameters
if self._parameterise_point_for_energy_evaluation != None:
self.latest_parameterised_point = parameters
parameters = self._parameterise_point_for_energy_evaluation(
parameters, num_parameters
)
if self._ansatz.num_parameters == 0:
raise RuntimeError('The ansatz cannot have 0 parameters.')
parameter_sets = np.reshape(parameters, (-1, num_parameters))
# Create dict associating each parameter with the lists of parameterization values for it
param_bindings = dict(
zip(self._ansatz_params, parameter_sets.transpose().tolist())
) # type: Dict
start_time = time.time()
# self._log_text("self._expect_op:", self._expect_op)
sampled_expect_op = self._circuit_sampler.convert(
self._expect_op, params=param_bindings
)
means = np.real(sampled_expect_op.eval())
if self._callback is not None:
variance = np.real(self._expectation.compute_variance(sampled_expect_op))
estimator_error = np.sqrt(variance / self.quantum_instance.run_config.shots)
for i, param_set in enumerate(parameter_sets):
self._eval_count += 1
self._callback(
self._eval_count, param_set, means[i], estimator_error[i]
)
else:
self._eval_count += len(means)
end_time = time.time()
logger.info(
'Energy evaluation returned %s - %.5f (ms), eval count: %s',
means,
(end_time - start_time) * 1000,
self._eval_count,
)
return means if len(means) > 1 else means[0]
def _prepare_for_optisation(
self,
operator: OperatorBase,
aux_operators: Optional[List[Optional[OperatorBase]]] = None,
) -> None:
"""Prepares the QAOA instance to perform simulation without needing to run the optimisation loop. (New method)
Args:
operator: The operator (usually obtained from QuadraticProgram.to_ising()).
"""
# super(VQE, self).compute_minimum_eigenvalue(operator, aux_operators)
if self.quantum_instance is None:
raise AlgorithmError(
"A QuantumInstance or Backend "
"must be supplied to run the quantum algorithm."
)
if operator is None:
raise AlgorithmError("The operator was never provided.")
# operator = self._check_operator(operator)
# The following code "operator = self._check_operator(operator)" was not working correctly here since it is meant to replace the operator.
# So instead, using below code to manually update the ansatz.
self.ansatz = QAOAAnsatz(
operator,
self._reps,
initial_state=self._initial_state,
mixer_operator=self._mixer,
)
# We need to handle the array entries being Optional i.e. having value None
if aux_operators:
zero_op = I.tensorpower(operator.num_qubits) * 0.0
converted = []
for op in aux_operators:
if op is None:
converted.append(zero_op)
else:
converted.append(op)
# For some reason Chemistry passes aux_ops with 0 qubits and paulis sometimes.
aux_operators = [zero_op if op == 0 else op for op in converted]
else:
aux_operators = None
self._quantum_instance.circuit_summary = True
self._eval_count = 0
# Convert the gradient operator into a callable function that is compatible with the
# optimization routine.
if self._gradient:
if isinstance(self._gradient, GradientBase):
self._gradient = self._gradient.gradient_wrapper(
~StateFn(operator) @ StateFn(self._ansatz),
bind_params=self._ansatz_params,
backend=self._quantum_instance,
)
# if not self._expect_op:
self._expect_op = self.construct_expectation(self._ansatz_params, operator)
def calculate_statevector_at_point(
self,
operator: OperatorBase,
point: Union[List[float], np.ndarray],
force_shots: bool = False,
sample_shots: int = 8192,
) -> Union[Dict[str, float], List[float], np.ndarray]:
"""Prepares for QAOA simulation and calculates the statevector for the given point. (New method)
Args:
operator: The operator (usually obtained from QuadraticProgram.to_ising()).
point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]).
force_shots: If simulating using a statevector, should a new statevector be formed by sampling from it?
sample_shots: If force_shots is True, how many shots to sample?
Returns:
The resulting statevector. Might be a dict or an ndarray, depending on which
simulator is used and whether the statevector is being sampled or not.
When statevector sim is used, returns an ndarray, otherwise returns a dict.
"""
from qiskit.utils.run_circuits import find_regs_by_name
self._prepare_for_optisation(operator)
qc = self.ansatz.assign_parameters(point)
statevector = {}
if self._quantum_instance.is_statevector:
ret = self._quantum_instance.execute(qc)
statevector = ret.get_statevector(qc)
if force_shots == True:
counts = Statevector(ret.get_statevector(qc)).sample_counts(
sample_shots
)
statevector = {}
for state in counts.keys():
statevector[state] = (counts[state] / sample_shots) ** 0.5
else:
c = ClassicalRegister(qc.width(), name='c')
q = find_regs_by_name(qc, 'q')
qc.add_register(c)
qc.barrier(q)
qc.measure(q, c)
ret = self._quantum_instance.execute(qc)
counts = ret.get_counts(qc)
shots = self._quantum_instance._run_config.shots
statevector = {b: (v / shots) ** 0.5 for (b, v) in counts.items()}
return statevector
def execute_at_point(
self,
point: Union[List[float], np.ndarray],
quadratic_program: QuadraticProgram,
optimal_function_value: float = None,
log_text: Optional[Callable[..., Any]] = print,
) -> Dict[str, Any]:
"""Runs QAOA without the optimization loop. Evaluates a single set of qaoa parameters. (New method)
Args:
point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]).
quadratic_program: The quadratic program to obtain the operator from and to evaluate the solution state bitstrings with.
optimal_function_value: The optimal value for which the solution states return in the quadratic_program.
Useful in rare cases where the solutions have zero probability.
If None, the best function_value among solutions will be used.
log_text: Used for text output, replacement to the default print method to make logging easy.
If None, no text output can occur.
Returns:
A dict containing the results. Keys are: 'energy', 'point', 'solutions', 'solution_probability', 'eigenstate', 'function_value'.
"""
op_custom, offset = quadratic_program.to_ising()
results_dict = {}
# no need to call "self.prepare_for_optisation(op_custom)" because the
# methods "self.calculate_statevector_at_point(op_custom, point)" and
# "self.evaluate_energy(op_custom, point)" already do.
eigenstate = self.calculate_statevector_at_point(op_custom, point)
energy = self.evaluate_energy_at_point(op_custom, point)
solutions = self.get_optimal_solutions_from_statevector(
eigenstate,
quadratic_program,
min_probability=10 ** -6,
optimal_function_value=optimal_function_value,
)
solution_probability = 0
for sol in solutions:
solution_probability += sol["probability"]
results_dict["energy"] = energy
results_dict["point"] = point
results_dict["solutions"] = solutions
results_dict["solution_probability"] = solution_probability
results_dict["eigenstate"] = eigenstate
if len(solutions) > 0:
results_dict["function_value"] = solutions[0]["function_value"]
else:
if log_text != None:
log_text("WARNING: No solutions were found.")
return results_dict
def evaluate_energy_at_point(
self, operator: OperatorBase, point: Union[List[float], np.ndarray]
) -> Union[float, List[float]]:
"""Evaluate energy at given parameters for the operator ansatz. (New method)
Args:
operator: The operator (usually obtained from QuadraticProgram.to_ising()).
point: The QAOA parameters (a list ordered as: [all_ZZ_gamma_values] + [all_X_beta_values]).
Returns:
Energy of the hamiltonian of each parameter.
Raises:
RuntimeError: If the ansatz has no parameters.
"""
self._prepare_for_optisation(operator)
return self._energy_evaluation(point)
def get_optimal_solutions_from_statevector(
self,
eigenvector: Union[dict, np.ndarray, StateFn],
quadratic_program: QuadraticProgram,
min_probability: float = 1e-6,
optimal_function_value: float = None,
) -> List[Tuple[str, float, float]]:
"""Extract the solution state information from the eigenvector. (New method)
Args:
eigenvector: The eigenvector from which the solution states are extracted.
quadratic_program: The QUBO to evaluate at the bitstring.
min_probability: Only consider states where the amplitude exceeds this threshold.
optimal_function_value: The optimal value for which the solution states return in the quadratic_program. Useful in rare cases where the solutions have zero probability.
Returns:
A list of all solutions. Each solution is a dict of length 3: "state": the state bitstring, "function_value": the function value, and "probability": the state probability.
Raises:
TypeError: If the type of eigenvector is not supported.
"""
samples = self.eigenvector_to_solutions(
eigenvector, quadratic_program, min_probability
)
samples.sort(key=lambda x: quadratic_program.objective.sense.value * x[1])
fval = samples[0][1]
if optimal_function_value != None:
fval = optimal_function_value
solution_samples = []
for i in range(len(samples)):
if samples[i][1] == fval:
solution = {}
solution["state"] = samples[i][0]
solution["function_value"] = samples[i][1]
solution["probability"] = samples[i][2]
solution_samples.append(solution)
return solution_samples
def reset_reps(self, reps: int) -> None:
"""Reset the number of reps when performing QAOA.
Args:
reps: The number of layers in QAOA (the 'p' value)
"""
validate_min('reps', reps, 1)
self._reps = reps
def set_optimiser_parameter_bounds(
self,
optimiser_parameter_bounds: Optional[
List[Tuple[Optional[float], Optional[float]]]
],
) -> None:
self._optimiser_parameter_bounds = optimiser_parameter_bounds
def set_parameterise_point_for_energy_evaluation(
self,
parameterise_point_for_optimisation: Callable[
[Union[List[float], np.ndarray], int], List[float]
],
) -> None:
self._parameterise_point_for_energy_evaluation = (
parameterise_point_for_optimisation
)
def set_post_process_raw_data(
self,
post_process_raw_data_method: Optional[
Callable[
[Union[List[Dict[str, int]], List[List[Dict[str, int]]]]],
Union[List[Dict[str, int]], List[List[Dict[str, int]]]],
]
],
) -> None:
"""Uses the specified method to process the raw sampled data executed on the backened whenever circuits are sampled.
Args:
post_process_raw_data_method: The method to process the data.
Inputs a list f counts dicts List[Dict[str, int]] and outputs the processed list of count dicts List[Dict[str, int]].
The data could potentially be formatted as a list of a list of dictionaries List[List[Dict[str, int]]]. However, this
will likely not happen withouth modifying QAOA to do so.
Each dictionary has the counts for each qubit with the keys containing a string in binary format and separated
according to the registers in circuit (e.g. ``0100 1110``). The string is little-endian (cr[0] on the right hand side).
However there will likely only be a single register without modifying QAOA, so the state bitstring should have no spaces.
"""
self._post_process_raw_data = post_process_raw_data_method
if self._circuit_sampler != None:
self._circuit_sampler.set_post_process_raw_data(self._post_process_raw_data)
def solve(
self,
ising_hamiltonian_operator: Union[OperatorBase, nx.Graph],
initial_point: Union[List[float], np.ndarray],
bounds: Optional[List[Tuple[Optional[float], Optional[float]]]] = None,
) -> MinimumEigensolverResult:
if isinstance(ising_hamiltonian_operator, nx.Graph):
couplings, local_fields = get_ising_hamiltonian_terms_from_ising_graph(
ising_hamiltonian_operator
)
quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms(
couplings, local_fields, 0, None, None
)
ising_hamiltonian_operator, _ = quadratic_program.to_ising()
self.initial_point = initial_point
self.set_optimiser_parameter_bounds(bounds)
return self.compute_minimum_eigenvalue(ising_hamiltonian_operator)
def solve_from_ising_hamiltonian_terms(
self,
couplings: List[Tuple[int, int, float]],
local_fields: Mapping[int, float],
constant_term: float,
initial_point: Union[List[float], np.ndarray],
bounds: Optional[List[Tuple[Optional[float], Optional[float]]]] = None,
) -> MinimumEigensolverResult:
quadratic_program = get_quadratic_program_from_ising_hamiltonian_terms(
couplings, local_fields, constant_term, None, None
)
ising_hamiltonian_operator, _ = quadratic_program.to_ising()
self.initial_point = initial_point
self.set_optimiser_parameter_bounds(bounds)
return self.compute_minimum_eigenvalue(ising_hamiltonian_operator)
###############
### Helper Methods
###############
def convert_from_fourier_point(
fourier_point: List[float], num_params_in_point: int
) -> List[float]:
"""Converts a point in Fourier space back to QAOA angles.
Args:
fourier_point: The point in Fourier space to convert.
num_params_in_point: The length of the resulting point. Must be even.
Returns:
The converted point in the form of QAOA rotation angles.
"""
new_point = [0] * num_params_in_point
reps = int(num_params_in_point / 2) # num_params_in_result should always be even
max_frequency = int(len(fourier_point) / 2) # fourier_point should always be even
for i in range(reps):
new_point[i] = 0
for k in range(max_frequency):
new_point[i] += fourier_point[k] * math.sin(
(k + 0.5) * (i + 0.5) * math.pi / reps
)
new_point[i + reps] = 0
for k in range(max_frequency):
new_point[i + reps] += fourier_point[k + max_frequency] * math.cos(
(k + 0.5) * (i + 0.5) * math.pi / reps
)
return new_point
def convert_to_fourier_point(
point: List[float], num_params_in_fourier_point: int
) -> List[float]:
"""Converts a point to fourier space.
Args:
point: The point to convert.
num_params_in_fourier_point: The length of the resulting fourier point. Must be even.
Returns:
The converted point in fourier space.
"""
fourier_point = [0] * num_params_in_fourier_point
reps = int(len(point) / 2) # point should always be even
max_frequency = int(
num_params_in_fourier_point / 2
) # num_params_in_fourier_point should always be even
for i in range(max_frequency):
fourier_point[i] = 0
for k in range(reps):
fourier_point[i] += point[k] * math.sin(
(k + 0.5) * (i + 0.5) * math.pi / max_frequency
)
fourier_point[i] = 2 * fourier_point[i] / reps
fourier_point[i + max_frequency] = 0
for k in range(reps):
fourier_point[i + max_frequency] += point[k + reps] * math.cos(
(k + 0.5) * (i + 0.5) * math.pi / max_frequency
)
fourier_point[i + max_frequency] = 2 * fourier_point[i + max_frequency] / reps
return fourier_point
def get_ising_graph_from_ising_hamiltonian_terms(
couplings: List[Tuple[int, int, float]], local_fields: Mapping[int, float]
) -> nx.Graph:
"""Constructs a networkx graph with node and edge weights corresponding to the coefficients
of the local field and coupling strengths of the Ising Hamiltonian respectively.
Args:
couplings: A list of couplings for the Ising graph (or Hamiltonian).
Couplings are in the form of a 3-tuple e.g.
(spin_1, spin_2, coupling_strength).
local_fields: The local field strengths for the Ising graph (or Hamiltonian)
A Dict with keys: spin numbers and values: field strengths.
Returns:
The Ising graph as an instance of a networkx Graph object with node and edge weights.
"""
G = nx.Graph()
for local_field in local_fields.keys():
G.add_node(local_field, weight=local_fields[local_field])
G.add_weighted_edges_from(couplings)
return G
def get_ising_hamiltonian_terms_from_ising_graph(
ising_graph: nx.Graph,
) -> Tuple[List[Tuple[int, int, float]], Dict[int, float]]:
"""Constructs a networkx graph with node and edge weights corresponding to the coefficients
of the local field and coupling strengths of the Ising Hamiltonian respectively.
Args:
couplings: A list of couplings for the Ising graph (or Hamiltonian).
Couplings are in the form of a 3-tuple e.g.
(spin_1, spin_2, coupling_strength).
local_fields: The local field strengths for the Ising graph (or Hamiltonian)
A Dict with keys: spin numbers and values: field strengths.
Returns:
The Ising graph as an instance of a networkx Graph object with node and edge weights.
"""
local_fields = {}
for i in range(len(ising_graph.nodes)):
local_fields[i] = ising_graph.nodes[i]['weight']
couplings = []
edge_data = ising_graph.edges(data=True)
for edge in edge_data:
couplings.append((edge[0], edge[1], edge[3]['weight']))
return couplings, local_fields
def get_quadratic_program_from_ising_hamiltonian_terms(
couplings: List[Tuple[int, int, float]],
local_fields: Mapping[int, float],
constant_term: float,
output_ising_graph_filename: Optional[str] = None,
log_text: Optional[Callable[..., Any]] = print,
) -> QuadraticProgram:
"""Constructs and returns the quadratic program corresponding to the input Hamiltonian terms.
Applies the transformation -> Z = 2b - 1, since Ising Hamiltonian spins have {+-1} values
while the quadratic program is binary.
Args:
couplings: A list of couplings for the Ising graph (or Hamiltonian).
Couplings are in the form of a 3-tuple e.g.
(spin_1, spin_2, coupling_strength).
Negative coupling strengths are Ferromagnetic (spin states want to be the same).
local_fields: The local field strengths for the Ising graph (or Hamiltonian)
A Dict with keys: spin numbers and values: field strengths.
Using convention with negative sign on local fields. So a negative local field makes the spin want to be +1.
constant_term: the constant for the Ising Hamiltonian.
output_ising_graph_filename: Filename to save ising graph file with.
If None, will not output ising graph to file.
log_text: Used for text output, replacement to the default print method to make logging easy.
If None, no text output will occur.
Returns:
The binary quadratic program corresponding to the Hamiltonian
"""
if output_ising_graph_filename != None:
ising_graph = get_ising_graph_from_ising_hamiltonian_terms(
couplings, local_fields
)
output_ising_graph(
ising_graph,
custom_filename_no_ext=output_ising_graph_filename,
log_text=log_text,
)
quadratic_program = QuadraticProgram()
for local_field in local_fields.keys():
quadratic_program.binary_var('c' + str(local_field))
new_constant_term = 0
new_linear_terms = {}
for car_number in local_fields.keys():
new_linear_terms[car_number] = 0.0
new_quadratic_terms = {}
# transform constant term
new_constant_term = constant_term
# transform local fields
for car_number in local_fields.keys():
new_linear_terms[car_number] = 2 * local_fields[car_number]
new_constant_term -= local_fields[car_number]
# transform couplings
for coupling in couplings:
if ('c' + str(coupling[0]), 'c' + str(coupling[1])) in new_quadratic_terms:
new_quadratic_terms[('c' + str(coupling[0]), 'c' + str(coupling[1]))] += (
4 * coupling[2]
)
else:
new_quadratic_terms[('c' + str(coupling[0]), 'c' + str(coupling[1]))] = (
4 * coupling[2]
)
new_linear_terms[coupling[0]] -= 2 * coupling[2]
new_linear_terms[coupling[1]] -= 2 * coupling[2]
new_constant_term += coupling[2]
quadratic_program.minimize(
constant=new_constant_term,
linear=[new_linear_terms[lf] for lf in new_linear_terms.keys()],
quadratic=new_quadratic_terms,
)
return quadratic_program
def output_ising_graph(
ising_graph: nx.Graph,
custom_filename_no_ext: Optional[str] = None,
log_text: Optional[Callable[..., Any]] = print,
) -> None:
"""Outputs the networkx graph to file in PNG format
Args:
ising_graph: A networkx graph with node and edge weights specified.
Nodes have attribute 'weight' that corresponds to a local field strength.
Edges have attribute 'weight' corresponding to the coupling strength.
custom_filename_no_ext: The filename to save the figure to.
Defaults to "Ising_graph" if None.
log_text: Used for text output, replacement to the default print method to make logging easy.
If None, no text output will occur.
"""
# Generate plot of the Graph
colors = ['r' for node in ising_graph.nodes()]
default_axes = plt.axes(frameon=False)
default_axes.set_axis_off()
default_axes.margins(0.1)
pos = nx.circular_layout(ising_graph)
labels = {
n: str(n) + '; ' + str(ising_graph.nodes[n]['weight'])
for n in ising_graph.nodes
}
nx.draw_networkx(
ising_graph,
node_color=colors,
node_size=600,
alpha=1,
ax=default_axes,
pos=pos,
labels=labels,
)
edge_labels = nx.get_edge_attributes(ising_graph, 'weight')
nx.draw_networkx_edge_labels(ising_graph, pos=pos, edge_labels=edge_labels)
if custom_filename_no_ext == None:
filename = "Ising_graph.png"
else:
filename = custom_filename_no_ext + '.png'
if log_text != None:
log_text("Saving Ising graph '" + filename + "'...")
plt.savefig(filename, format="PNG", bbox_inches=0)
plt.close()
def print_qaoa_solutions(
solutions: List[Mapping[str, Any]], log_text: Callable[..., Any] = print
) -> None:
"""Pretty prints (pprint) a list of solutions followed by their summed probability.
Args:
solutions: List of solutions, they are each formatted as a dict with (key, value):
'state', state bitstring (str)
'function_value', binary quadratic program objective value (float)
'probability', probability (float)
log_text: Used for text output, replacement to the default print method to make logging easy.
"""
import pprint
if len(solutions) > 0:
log_text(
"function value (quadratic program):", str(solutions[0]["function_value"])
)
solutions_string = pprint.pformat(
[
[solutions[x]["state"], solutions[x]["probability"]]
for x in range(len(solutions))
],
indent=2,
)
log_text(solutions_string)
initial_solution_probability = 0
for x in range(len(solutions)):
initial_solution_probability += solutions[x]["probability"]
log_text("total probability:", initial_solution_probability)
else:
log_text("total probability: 0")
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017~2999 - cologler <[email protected]>
# ----------
#
# ----------
import os
import importlib
from .common import TypeMatcher
for name in os.listdir(os.path.dirname(__file__)):
if name.startswith('_') or not name.endswith('.py'):
continue
importlib.import_module('.' + name[:-3], __name__)
|
python
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code related to the PyTorch model registry for easily creating models.
"""
import re
from typing import Any, Callable, Dict, List, Optional, Union
from sparseml.tensorflow_v1.models.estimator import EstimatorModelFn
from sparseml.tensorflow_v1.utils import tf_compat
from sparseml.utils import TENSORFLOW_V1_FRAMEWORK, parse_optimization_str
from sparsezoo import Zoo
from sparsezoo.objects import Model
__all__ = ["ModelRegistry"]
class _ModelAttributes(object):
def __init__(
self,
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
default_model_fn_creator: EstimatorModelFn,
base_name_scope: str,
tl_ignore_tens: List[str],
repo_source: str,
):
self.input_shape = input_shape
self.domain = domain
self.sub_domain = sub_domain
self.architecture = architecture
self.sub_architecture = sub_architecture
self.default_dataset = default_dataset
self.default_desc = default_desc
self.default_model_fn_creator = default_model_fn_creator
self.base_name_scope = base_name_scope
self.tl_ignore_tens = tl_ignore_tens
self.repo_source = repo_source
class ModelRegistry(object):
"""
Registry class for creating models
"""
_CONSTRUCTORS = {} # type: Dict[str, Callable]
_ATTRIBUTES = {} # type: Dict[str, _ModelAttributes]
@staticmethod
def available_keys() -> List[str]:
"""
:return: the keys (models) currently available in the registry
"""
return list(ModelRegistry._CONSTRUCTORS.keys())
@staticmethod
def create(key: str, *args, **kwargs) -> Any:
"""
Create a new model for the given key
:param key: the model key (name) to create
:param args: any args to supply to the graph constructor
:param kwargs: any keyword args to supply to the graph constructor
:return: the outputs from the created graph
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
return ModelRegistry._CONSTRUCTORS[key](*args, **kwargs)
@staticmethod
def create_estimator(
key: str,
model_dir: str,
model_fn_params: Optional[Dict[str, Any]],
run_config: tf_compat.estimator.RunConfig,
*args,
**kwargs,
) -> tf_compat.estimator.Estimator:
"""
Create Estimator for a model given the key and extra parameters
:param key: the key that the model was registered with
:param model_dir: directory to save results
:param model_fn_params: parameters for model function
:param run_config: RunConfig used by the estimator during training
:param args: additional positional arguments to pass into model constructor
:param kwargs: additional keyword arguments to pass into model constructor
:return: an Estimator instance
"""
model_const = ModelRegistry._CONSTRUCTORS[key]
attributes = ModelRegistry._ATTRIBUTES[key]
model_fn_creator = attributes.default_model_fn_creator()
model_fn = model_fn_creator.create(model_const, *args, **kwargs)
model_fn_params = {} if model_fn_params is None else model_fn_params
classifier = tf_compat.estimator.Estimator(
config=run_config,
model_dir=model_dir,
model_fn=model_fn,
params=model_fn_params,
)
return classifier
@staticmethod
def create_zoo_model(
key: str,
pretrained: Union[bool, str] = True,
pretrained_dataset: str = None,
) -> Model:
"""
Create a sparsezoo Model for the desired model in the zoo
:param key: the model key (name) to retrieve
:param pretrained: True to load pretrained weights; to load a specific version
give a string with the name of the version (pruned-moderate, base),
default True
:param pretrained_dataset: The dataset to load for the model
:return: the sparsezoo Model reference for the given model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
attributes = ModelRegistry._ATTRIBUTES[key]
optim_name, optim_category, optim_target = parse_optimization_str(
pretrained if isinstance(pretrained, str) else attributes.default_desc
)
return Zoo.load_model(
attributes.domain,
attributes.sub_domain,
attributes.architecture,
attributes.sub_architecture,
TENSORFLOW_V1_FRAMEWORK,
attributes.repo_source,
attributes.default_dataset
if pretrained_dataset is None
else pretrained_dataset,
None,
optim_name,
optim_category,
optim_target,
)
@staticmethod
def load_pretrained(
key: str,
pretrained: Union[bool, str] = True,
pretrained_dataset: str = None,
pretrained_path: str = None,
remove_dynamic_tl_vars: bool = False,
sess: tf_compat.Session = None,
saver: tf_compat.train.Saver = None,
):
"""
Load pre-trained variables for a given model into a session.
Uses a Saver object from TensorFlow to restore the variables
from an index and data file.
:param key: the model key (name) to create
:param pretrained: True to load the default pretrained variables,
a string to load a specific pretrained graph
(ex: base, optim, optim-perf),
or False to not load any pretrained weights
:param pretrained_dataset: The dataset to load pretrained weights for
(ex: imagenet, mnist, etc).
If not supplied will default to the one preconfigured for the model.
:param pretrained_path: A path to the pretrained variables to load,
if provided will override the pretrained param
:param remove_dynamic_tl_vars: True to remove the vars that are used for
transfer learning (have a different shape and should not be restored),
False to keep all vars in the Saver.
Only used if saver is None
:param sess: The session to load the model variables into
if pretrained_path or pretrained is supplied.
If not supplied and required, then will use the default session
:param saver: The Saver instance to use to restore the variables
for the graph if pretrained_path or pretrained is supplied.
If not supplied and required, then will create one using the
ModelRegistry.saver function
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
if not sess and (pretrained_path or pretrained):
sess = tf_compat.get_default_session()
if not saver and (pretrained_path or pretrained):
saver = ModelRegistry.saver(key, remove_dynamic_tl_vars)
if isinstance(pretrained, str):
if pretrained.lower() == "true":
pretrained = True
elif pretrained.lower() in ["false", "none"]:
pretrained = False
if pretrained_path:
saver.restore(sess, pretrained_path)
elif pretrained:
zoo_model = ModelRegistry.create_zoo_model(
key, pretrained, pretrained_dataset
)
try:
paths = zoo_model.download_framework_files()
index_path = [path for path in paths if path.endswith(".index")]
index_path = index_path[0]
model_path = index_path[:-6]
saver.restore(sess, model_path)
except Exception:
# try one more time with overwrite on in case files were corrupted
paths = zoo_model.download_framework_files(overwrite=True)
index_path = [path for path in paths if path.endswith(".index")]
if len(index_path) != 1:
raise FileNotFoundError(
"could not find .index file for {}".format(zoo_model.root_path)
)
index_path = index_path[0]
model_path = index_path[:-6]
saver.restore(sess, model_path)
@staticmethod
def input_shape(key: str):
"""
:param key: the model key (name) to create
:return: the specified input shape for the model
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
return ModelRegistry._ATTRIBUTES[key].input_shape
@staticmethod
def saver(key: str, remove_dynamic_tl_vars: bool = False) -> tf_compat.train.Saver:
"""
Get a tf compat saver that contains only the variables for the desired
architecture specified by key.
Note, the architecture must have been created in the current graph already
to work.
:param key: the model key (name) to get a saver instance for
:param remove_dynamic_tl_vars: True to remove the vars that are used for
transfer learning (have a different shape and should not be restored),
False to keep all vars in the Saver
:return: a Saver object with the appropriate vars for the model to restore
"""
if key not in ModelRegistry._CONSTRUCTORS:
raise ValueError(
"key {} is not in the model registry; available: {}".format(
key, ModelRegistry._CONSTRUCTORS
)
)
base_name = ModelRegistry._ATTRIBUTES[key].base_name_scope
saver_vars = [
var
for var in tf_compat.get_collection(tf_compat.GraphKeys.TRAINABLE_VARIABLES)
if base_name in var.name
]
saver_vars.extend(
[
var
for var in tf_compat.global_variables()
if ("moving_mean" in var.name or "moving_variance" in var.name)
and base_name in var.name
]
)
if remove_dynamic_tl_vars:
tl_ignore_tens = ModelRegistry._ATTRIBUTES[key].tl_ignore_tens
def _check_ignore(var: tf_compat.Variable) -> bool:
for ignore in tl_ignore_tens:
if re.match(ignore, var.name):
return True
return False
saver_vars = [var for var in saver_vars if not _check_ignore(var)]
saver = tf_compat.train.Saver(saver_vars)
return saver
@staticmethod
def register(
key: Union[str, List[str]],
input_shape: Any,
domain: str,
sub_domain: str,
architecture: str,
sub_architecture: str,
default_dataset: str,
default_desc: str,
default_model_fn_creator: EstimatorModelFn,
base_name_scope: str,
tl_ignore_tens: List[str],
repo_source: str = "sparseml",
):
"""
Register a model with the registry. Should be used as a decorator
:param key: the model key (name) to create
:param input_shape: the specified input shape for the model
:param domain: the domain the model belongs to; ex: cv, nlp, etc
:param sub_domain: the sub domain the model belongs to;
ex: classification, detection, etc
:param architecture: the architecture the model belongs to;
ex: resnet, mobilenet, etc
:param sub_architecture: the sub architecture the model belongs to;
ex: 50, 101, etc
:param default_dataset: the dataset to use by default for loading
pretrained if not supplied
:param default_desc: the description to use by default for loading
pretrained if not supplied
:param default_model_fn_creator: default model creator to use when creating
estimator instance
:param base_name_scope: the base string used to create the graph under
:param tl_ignore_tens: a list of tensors to ignore restoring for
if transfer learning
:param repo_source: the source repo for the model, default is sparseml
:return: the decorator
"""
if not isinstance(key, List):
key = [key]
def decorator(const_func):
for r_key in key:
if r_key in ModelRegistry._CONSTRUCTORS:
raise ValueError("key {} is already registered".format(key))
ModelRegistry._CONSTRUCTORS[r_key] = const_func
ModelRegistry._ATTRIBUTES[r_key] = _ModelAttributes(
input_shape,
domain,
sub_domain,
architecture,
sub_architecture,
default_dataset,
default_desc,
default_model_fn_creator,
base_name_scope,
tl_ignore_tens,
repo_source,
)
return const_func
return decorator
|
python
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import collections
import numpy as np
import pytest
import megengine
from megengine.core._imperative_rt.core2 import apply
from megengine.core._trace_option import use_symbolic_shape
from megengine.core.ops import builtin
from megengine.tensor import Tensor
def cvt_to_shape_desc(val, inpvar, config=None):
def as_tensor(val, device):
assert device is not None, "can not infer device"
# TODO: should copy to appropriate device
val = Tensor(val, device=device)
return val
device = None
if inpvar is not None:
assert isinstance(inpvar, Tensor)
device = device or inpvar.device
if config is not None:
device = device or config.device
if isinstance(val, Tensor):
return as_tensor(val, device)
if not isinstance(val, collections.abc.Iterable):
val = [val]
components = []
on_host = True
for i in val:
if isinstance(i, Tensor):
on_host = False
device = device or i.device
else:
assert isinstance(i, int), (
"shape desc could contain either int or Tensor, got {}"
" actually".format(repr(i))
)
components.append(i)
assert components, "shape desc could not be empty"
if on_host:
shape = np.ascontiguousarray(components, dtype=np.int32)
assert np.all(shape == components), "failed to convert to shape: {}".format(
components
)
return as_tensor(shape, device)
for idx, v in enumerate(components):
if not isinstance(v, Tensor):
vi = int(v)
assert vi == v, "could not convert {} to int".format(v)
v = vi
components[idx] = as_tensor(v, device)
return invoke_op(all_oprs.Concat(axis=0), components)
def canonize_reshape(inputs, *, config):
src, tshape = inputs
tshape = cvt_to_shape_desc(tshape, src, config)
return src, tshape
def canonize_inputs(inputs, *, config):
"""convert immediate numbers and SharedND to SymbolVar in inputs; at least
one of the inputs must be SymbolVar, so comp node and comp graph can
beinferred
:return: list of converted vars
"""
if (
isinstance(inputs, (list, tuple))
and len(inputs) == 1
and isinstance(inputs[0], (list, tuple))
):
# handle the case when a list is passed to a function with
# variable-length argument (e.g. concat has signature concat(*inputs)
# and is called with concat([a, b]))
inputs = inputs[0]
if isinstance(inputs, Tensor):
return [inputs]
old_inputs = inputs
inputs = []
get_comp_node = None
need_cvt = False
for i in old_inputs:
if isinstance(i, Tensor):
get_comp_node = lambda cn=i.device: cn
else:
need_cvt = True
inputs.append(i)
if not need_cvt:
return inputs
if get_comp_node is None:
def get_comp_node():
return config.comp_node
for idx, var in enumerate(inputs):
if not isinstance(var, Tensor):
var = Tensor(var)
inputs[idx] = var
return inputs
def invoke_op(op, inputs_, cvt_inputs=canonize_inputs):
inputs = cvt_inputs(
inputs_, config=megengine.core._imperative_rt.OperatorNodeConfig()
)
return apply(op, *inputs)
def unpack_getitem(inp, tuple_val, *, allow_newaxis=True):
assert isinstance(inp, Tensor)
if not isinstance(tuple_val, tuple):
tuple_val = (tuple_val,)
def as_tensor(v):
if not isinstance(v, Tensor):
vi = np.ascontiguousarray(v, dtype=np.int32)
assert np.abs(vi - v).max() == 0, "bad index: {!r}".format(v)
v = Tensor(vi)
return v
new_axes = []
tensors = []
items = []
cur_axis = -1
for i_idx, i in enumerate(tuple_val):
cur_axis += 1
if i is np.newaxis:
if cur_axis >= 0:
new_axes.append(cur_axis)
continue
if i is Ellipsis:
cur_axis = -1
for j in tuple_val[:i_idx:-1]:
if j is Ellipsis:
raise IndexError("only one ellipsis is allowed")
if j is np.newaxis:
new_axes.append(cur_axis)
cur_axis -= 1
continue
item = [
cur_axis,
]
def push(v, item, tensors):
if v is None:
item.append(False)
else:
item.append(True)
tensors.append(as_tensor(v))
if isinstance(i, slice):
if i.start is None and i.stop is None and i.step is None:
continue
push(i.start, item, tensors)
push(i.stop, item, tensors)
push(i.step, item, tensors)
item.append(False) # idx
else:
item += [False,] * 3 # begin, end, stop
push(i, item, tensors)
assert len(item) == 5
items.append(item)
if new_axes:
raise IndexError("newaxis is not allowed here")
return inp, tensors, items
def transpose(*args, **kwargs):
op = builtin.Dimshuffle(**kwargs)
return invoke_op(op, args)
def broadcast(input, tshape):
op = builtin.Broadcast()
return invoke_op(op, (input, tshape), canonize_reshape)
def subtensor(input, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.Subtensor(items)
return invoke_op(op, (input, *tensors))
def set_subtensor(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.SetSubtensor(items)
return invoke_op(op, (input, value, *tensors))
def incr_subtensor(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.IncrSubtensor(items)
return invoke_op(op, (input, value, *tensors))
def advance_indexing(input, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.IndexingMultiAxisVec(items)
return invoke_op(op, (input, *tensors))
def set_advance_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.IndexingSetMultiAxisVec(items)
return invoke_op(op, (input, value, *tensors))
def incr_advance_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.IndexingIncrMultiAxisVec(items)
return invoke_op(op, (input, value, *tensors))
def mesh_indexing(input, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.MeshIndexing(items)
return invoke_op(op, (input, *tensors))
def set_mesh_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.SetMeshIndexing(items)
return invoke_op(op, (input, value, *tensors))
def incr_mesh_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.IncrMeshIndexing(items)
return invoke_op(op, (input, value, *tensors))
def batched_mesh_indexing(input, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.BatchedMeshIndexing(items)
return invoke_op(op, (input, *tensors))
def batched_set_mesh_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.BatchedSetMeshIndexing(items)
return invoke_op(op, (input, value, *tensors))
def batched_incr_mesh_indexing(input, value, tuple_val):
input, tensors, items = unpack_getitem(input, tuple_val)
op = builtin.BatchedIncrMeshIndexing(items)
return invoke_op(op, (input, value, *tensors))
def test_transpose():
x = np.arange(10).reshape(2, 5).astype("int32")
xx = Tensor(x)
(yy,) = transpose(xx, pattern=[1, -1, 0])
np.testing.assert_equal(np.expand_dims(x.transpose(), axis=1), yy.numpy())
def test_broadcast():
x = np.arange(10).reshape(1, 10).astype("int32")
xx = Tensor(x)
(yy,) = broadcast(xx, (10, 10))
np.testing.assert_equal(np.repeat(x, 10, 0), yy.numpy())
def test_subtensor():
x = np.arange(25).reshape(5, 5).astype("int32")
d = np.arange(2).astype("int32")
xx = Tensor(x)
(yy0,) = subtensor(xx, (slice(0, 4, 2), 3))
(yy1,) = set_subtensor(xx, d, (slice(0, 4, 2), 3))
(yy2,) = incr_subtensor(xx, d, (slice(0, 4, 2), 3))
np.testing.assert_equal(x[0:4:2, 3], yy0.numpy())
x_ = x.copy()
x_[0:4:2, 3] = d
np.testing.assert_equal(x_, yy1.numpy())
x_ = x.copy()
x_[0:4:2, 3] += d
np.testing.assert_equal(x_, yy2.numpy())
def test_advance_indexing():
x = np.arange(25).reshape(5, 5).astype("int32")
d = np.arange(15).reshape(3, 5).astype("int32")
xx = Tensor(x)
(yy0,) = advance_indexing(xx, ((0, 4, 2), slice(None, None, None)))
(yy1,) = set_advance_indexing(xx, d, ((0, 4, 2), slice(None, None, None)))
(yy2,) = incr_advance_indexing(xx, d, ((0, 4, 2), slice(None, None, None)))
np.testing.assert_equal(x[(0, 4, 2), :], yy0.numpy())
x_ = x.copy()
x_[(0, 4, 2), :] = d
np.testing.assert_equal(x_, yy1.numpy())
x_ = x.copy()
x_[(0, 4, 2), :] += d
np.testing.assert_equal(x_, yy2.numpy())
def test_mesh_indexing():
x = np.arange(25).reshape(5, 5).astype("int32")
d = np.arange(6).reshape(3, 2).astype("int32")
xx = Tensor(x)
(yy0,) = mesh_indexing(xx, (slice(0, 5, 2), (1, 3)))
(yy1,) = set_mesh_indexing(xx, d, (slice(0, 5, 2), (1, 3)))
(yy2,) = incr_mesh_indexing(xx, d, (slice(0, 5, 2), (1, 3)))
r = np.ndarray(shape=(3, 2), dtype="int32")
for i0, i1 in enumerate(range(0, 5, 2)):
for j0, j1 in enumerate((1, 3)):
r[i0, j0] = x[i1, j1]
np.testing.assert_equal(r, yy0.numpy())
r = x.copy()
for i0, i1 in enumerate(range(0, 5, 2)):
for j0, j1 in enumerate((1, 3)):
r[i1, j1] = d[i0, j0]
np.testing.assert_equal(r, yy1.numpy())
r = x.copy()
for i0, i1 in enumerate(range(0, 5, 2)):
for j0, j1 in enumerate((1, 3)):
r[i1, j1] += d[i0, j0]
np.testing.assert_equal(r, yy2.numpy())
def test_batched_mesh_indexing():
x = np.arange(24).reshape(2, 3, 4).astype("int32")
d = np.arange(12).reshape(2, 2, 3).astype("int32")
xx = Tensor(x)
s = [(0, 1, 2), (1, 2, 3)]
(yy0,) = batched_mesh_indexing(xx, (slice(None, None, None), [(0, 2)] * 2, s))
(yy1,) = batched_set_mesh_indexing(
xx, d, (slice(None, None, None), [(0, 2)] * 2, s)
)
(yy2,) = batched_incr_mesh_indexing(
xx, d, (slice(None, None, None), [(0, 2)] * 2, s)
)
r = np.ndarray(shape=(2, 2, 3), dtype="int32")
for i in range(2):
for j0, j1 in enumerate((0, 2)):
for k0, k1 in enumerate(s[i]):
r[i, j0, k0] = x[i, j1, k1]
np.testing.assert_equal(r, yy0.numpy())
r = x.copy()
for i in range(2):
for j0, j1 in enumerate((0, 2)):
for k0, k1 in enumerate(s[i]):
r[i, j1, k1] = d[i, j0, k0]
np.testing.assert_equal(r, yy1.numpy())
r = x.copy()
for i in range(2):
for j0, j1 in enumerate((0, 2)):
for k0, k1 in enumerate(s[i]):
r[i, j1, k1] += d[i, j0, k0]
np.testing.assert_equal(r, yy2.numpy())
# high level
def test_advance_indexing_high_level():
x = np.arange(25).reshape(5, 5).astype("int32")
d = np.arange(15).reshape(3, 5).astype("int32")
xx = Tensor(x)
np.testing.assert_equal(x[1, :], xx[1, :].numpy())
np.testing.assert_equal(x[:, 1], xx[:, 1].numpy())
np.testing.assert_equal(x[1:3, :], xx[1:3, :].numpy())
np.testing.assert_equal(x[:, :], xx[:, :].numpy())
np.testing.assert_equal(x[1, 1], xx[1, 1].numpy())
yy = xx[(0, 4, 2), :]
np.testing.assert_equal(x[(0, 4, 2), :], yy.numpy())
x_ = x.copy()
x_[(0, 4, 2), :] = d
xx_ = Tensor(xx)
xx_[(0, 4, 2), :] = d
np.testing.assert_equal(x_, xx_.numpy())
x = np.arange(27).reshape(3, 3, 3).astype("int32")
xx = Tensor(x)
np.testing.assert_equal(x[1, :, :], xx[1, :, :].numpy())
np.testing.assert_equal(x[1, :, 1], xx[1, :, 1].numpy())
np.testing.assert_equal(x[1, 0:1, :], xx[1, 0:1, :].numpy())
np.testing.assert_equal(x[0:1, 1, 1], xx[0:1, 1, 1].numpy())
np.testing.assert_equal(x[:, 1, 1], xx[:, 1, 1].numpy())
np.testing.assert_equal(x[:, 1], xx[:, 1].numpy())
np.testing.assert_equal(x[1, 1:2], xx[1, 1:2].numpy())
x_ = x.copy()
x_[1, 1, 1] = -1
xx[1, 1, 1] = -1
np.testing.assert_equal(x_, xx.numpy())
x_[:, 1, 1] = -2
xx[:, 1, 1] = x_[:, 1, 1]
np.testing.assert_equal(x_, xx.numpy())
x_[0:1, :, 1] = -3
xx[0:1, :, 1] = x_[0:1, :, 1]
np.testing.assert_equal(x_, xx.numpy())
x_[0:1, :, 1] = -4
y = Tensor(x_)
xx[0:1, :, 1] = y[0:1, :, 1]
np.testing.assert_equal(y.numpy(), xx.numpy())
x[:] = 1
xx[:] = 1
np.testing.assert_equal(x, xx.numpy())
x = np.arange(9).reshape(3, 3).astype("int32")
xx = Tensor(x)
y = np.array([1, 2])
yy = Tensor(y)
np.testing.assert_equal(x[:, y[0]], xx[:, y[0]].numpy())
np.testing.assert_equal(x[:, y[0]], xx[:, yy[0]].numpy())
np.testing.assert_equal(x[:, y], xx[:, y].numpy())
np.testing.assert_equal(x[:, y], xx[:, yy].numpy())
x_ = x.copy()
x_[:, y[0]] = -1
xx_ = Tensor(x_)
xx[:, yy[0]] = xx_[:, yy[0]]
np.testing.assert_equal(x_, xx.numpy())
x_[:, y] = -1
xx_ = Tensor(x_)
xx[:, yy] = xx_[:, yy]
np.testing.assert_equal(x_, xx.numpy())
x = np.arange(9).reshape(3, 3).astype("int32")
xx = Tensor(x)
y = np.array([1])
yy = Tensor(y)
np.testing.assert_equal(x[:, y[0]], xx[:, y[0]].numpy())
np.testing.assert_equal(x[:, y[0]], xx[:, yy[0]].numpy())
np.testing.assert_equal(x[:, y], xx[:, y].numpy())
np.testing.assert_equal(x[:, y], xx[:, yy].numpy())
x = np.arange(9).reshape(3, 3).astype("int32")
xx = Tensor(x)
np.testing.assert_equal(x[[0, 1], 0], xx[[0, 1], 0].numpy())
np.testing.assert_equal(x[0:2, 0], xx[0:2, 0].numpy())
def test_advance_indexing_with_bool():
a = np.arange(9).reshape(3, 3).astype(np.float32)
b = np.array([1, 2, 3])
c = np.array([1, 2, 3])
aa = Tensor(a)
bb = Tensor(b)
cc = Tensor(c)
np.testing.assert_equal(a[b == 1, c == 2], aa[bb == 1, cc == 2].numpy())
a[b == 1, c == 2] = -1.0
aa[bb == 1, cc == 2] = -1.0
np.testing.assert_equal(a, aa.numpy())
a = np.arange(9).reshape(3, 3).astype(np.float32)
b = np.array([False, True, True])
c = np.array([2, 0]).astype(np.int32)
aa = Tensor(a)
bb = Tensor(b)
cc = Tensor(c)
np.testing.assert_equal(a[b, c], aa[bb, cc].numpy())
a[b, c] = -1.0
aa[bb, cc] = -1.0
np.testing.assert_equal(a, aa.numpy())
d = np.array([-1, -2], dtype=np.float32)
dd = Tensor(d)
a[b, c] = d
aa[bb, cc] = dd
np.testing.assert_equal(a, aa.numpy())
a = np.ones((2, 2))
b = np.array([[True, False], [False, True]])
aa = Tensor(a)
bb = Tensor(b)
np.testing.assert_equal(a[b], aa[bb].numpy())
b[:] = True
bb[:] = True
np.testing.assert_equal(a[b], aa[bb].numpy())
np.testing.assert_equal(a[:, [True, False]], aa[:, [True, False]].numpy())
a = np.array([[True, False], [False, True]])
b = np.array([1])
aa = Tensor(a)
bb = Tensor(b)
np.testing.assert_equal(a[b], aa[bb].numpy())
b = np.array([[True, True], [False, True]])
bb = Tensor(b)
np.testing.assert_equal(a[b], aa[bb].numpy())
a[b] = False
aa[bb] = False
np.testing.assert_equal(a, aa.numpy())
# XXX: trace does not expect empty condtake tensor
if not use_symbolic_shape():
a = np.ones((2, 2), dtype=np.int32)
b = np.array([[False, False], [False, False]])
aa = Tensor(a)
bb = Tensor(b)
np.testing.assert_equal(a[b], aa[b].numpy())
np.testing.assert_equal(a[b], aa[bb].numpy())
b = np.array([False, False])
bb = Tensor(b)
np.testing.assert_equal(a[b], aa[bb].numpy().reshape(a[b].shape)) # FIXME
a = np.arange(576).reshape(2, 3, 4, 3, 4, 2).astype("int32")
aa = Tensor(a)
b = (np.random.sample((2, 3, 4)) > 0.5).astype("bool")
bb = Tensor(b)
np.testing.assert_equal(a[b, :, 0:4:2], aa[bb, :, 0:4:2].numpy())
b = (np.random.sample((4, 3, 4)) > 0.5).astype("bool")
bb = Tensor(b)
np.testing.assert_equal(a[..., b, 0:2], aa[..., bb, 0:2].numpy())
b = (np.random.sample((3, 4, 3)) > 0.5).astype("bool")
bb = Tensor(b)
np.testing.assert_equal(
a[:, b, 0:2, [True, False]], aa[:, bb, 0:2, [True, False]].numpy()
)
|
python
|
import json
import pathlib
import os
print("Please enter the input path to the filepath you want to use for Mistos")
print("We will create a folder called 'Mistos' there. It contains your input and output directory")
path = input()
is_dir = False
while is_dir == False:
path = pathlib.Path(path)
if path.is_dir():
is_dir = True
else:
print("Path is not valid. Make sure to enter a correct filepath (e.g. 'C:/Users/tlux1/Desktop')")
path = input()
mistos_path = path.joinpath("Mistos")
export_path = mistos_path.joinpath("export")
fileserver_path = mistos_path.joinpath("fileserver")
os.mkdir(mistos_path)
os.mkdir(export_path)
os.mkdir(fileserver_path)
config = {
"EXPORT_DIRECTORY": export_path.as_posix(),
"WORKING_DIRECTORY": fileserver_path.as_posix()
}
with open("config.json", "w") as _file:
json.dump(config, _file)
print("Success! Start Mistos by running the 'mistos_start.bat' script.")
|
python
|
"Tests for presto.map"
import unittest as ut
from presto.map import System, Constellation, Region
class TestMap(ut.TestCase):
def test_map(self):
"Basic map data functionality test"
stacmon = System.by_name("Stacmon")
self.assertTrue(stacmon)
self.assertEqual(len(list(stacmon.neighbors())), 5)
self.assertTrue("Ostingele" in {n.name for n in stacmon.neighbors()})
self.assertEqual(stacmon.region.name, "Placid")
self.assertEqual(stacmon.constellation.name, "Fislipesnes")
fislipesnes = Constellation.by_name("Fislipesnes")
placid = Region.by_name("Placid")
self.assertEqual(fislipesnes, stacmon.constellation)
self.assertEqual(placid, stacmon.region)
self.assertEqual(len(stacmon.region.systems), 71)
if __name__ == '__main__':
unittest.main()
|
python
|
import logging
import smores.medkit as medkit
failures = []
def rxnorm_ingredient(rxcui, expect):
_return_check=False
_med_key_check=None
_ing_key_check=None
_overall=False
_failures=[]
ingredients = medkit.get_ingredients(rxcui, 'RXNORM')
if ingredients is not None:
if len(ingredients) > 0:
if type(ingredients[0]) is dict:
_return_check = True
else:
_failures.append('Return Check: Bad Return Type')
if _return_check:
default_keys = ['rxcui','tty','name','ingredients']
med_1_keys = list(ingredients[0].keys())
i=0
while True:
if i == len(med_1_keys)-1:
break
elif default_keys[i] not in med_1_keys:
_failures.append('Med Check Failure: {0}'.format(default_keys[i]))
if _med_key_check:
_med_key_check = False
i += 1
continue
else:
_med_key_check = True if _ing_key_check is None else False
i += 1
continue
ing_1_keys = list(ingredients[0]['ingredients'][0].keys())
ing_default_keys = ['rxcui', 'tty', 'name']
j=0
while True:
if j == len(ing_1_keys)-1:
break
elif ing_default_keys[j] not in ing_1_keys:
_failures.append('Med ING Check Failure: {0}'.format(ing_default_keys[j]))
if _ing_key_check:
_ing_key_check = False
j += 1
continue
else:
_ing_key_check = True if _ing_key_check is None or _ing_key_check else _ing_key_check
j += 1
continue
else:
_failures.append('RxNav Check of {0} Failed All Checks'.format(rxcui))
if _return_check and _med_key_check and _ing_key_check:
_overall = True
failures.append('{0}: {1} '.format(rxcui, _failures)) if len(_failures) > 0 else None
if _overall == expect:
print('RxNav Check of {0} Produced Expected Result of {1}'.format(rxcui, expect))
else:
print('RxNav Check of {0} Produced Unexpected Result of {1}'.format(rxcui, _overall))
if __name__ == "__main__":
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(funcName)-20s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
rxnormLog = logging.getLogger('rxnorm')
smoresLog = logging.getLogger('smores')
rxnormLog.addHandler(console)
smoresLog.addHandler(console)
rxnorm_ingredient('209387', True)
rxnorm_ingredient('206410', False)
rxnorm_ingredient('161', True)
print(failures)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.