gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config options available all across the project."""
from oslo_config import cfg
from cloudbaseinit.conf import base as conf_base
from cloudbaseinit import constant
class GlobalOptions(conf_base.Options):
"""Config options available all across the project."""
def __init__(self, config):
super(GlobalOptions, self).__init__(config, group="DEFAULT")
self._options = [
cfg.BoolOpt(
'allow_reboot', default=True,
help='Allows OS reboots requested by plugins'),
cfg.BoolOpt(
'stop_service_on_exit', default=True,
help='In case of execution as a service, specifies if the '
'service must be gracefully stopped before exiting'),
cfg.BoolOpt(
'check_latest_version', default=False,
help='Check if there is a newer version of cloudbase-init '
'available. If this option is activated, a log '
'message will be emitted if there is a newer version '
'available.'),
cfg.IntOpt(
'retry_count', default=5,
help='Max. number of attempts for fetching metadata in '
'case of transient errors'),
cfg.FloatOpt(
'retry_count_interval', default=4,
help='Interval between attempts in case of transient errors, '
'expressed in seconds'),
cfg.StrOpt(
'mtools_path', default=None,
help='Path to "mtools" program suite, used for interacting '
'with VFAT filesystems'),
cfg.StrOpt(
'bsdtar_path', default='bsdtar.exe',
help='Path to "bsdtar", used to extract ISO ConfigDrive '
'files'),
cfg.BoolOpt(
'netbios_host_name_compatibility', default=True,
help='Truncates the hostname to 15 characters for Netbios '
'compatibility'),
cfg.StrOpt(
'logging_serial_port_settings', default=None,
help='Serial port logging settings. Format: '
'"port,baudrate,parity,bytesize", e.g.: '
'"COM1,115200,N,8". Set to None (default) to disable.'),
cfg.BoolOpt(
'activate_windows', default=False,
help='Activates Windows automatically'),
cfg.BoolOpt(
'set_kms_product_key', default=False,
help='Sets the KMS product key for this operating system'),
cfg.BoolOpt(
'set_avma_product_key', default=False,
help='Sets the AVMA product key for this operating system'),
cfg.StrOpt(
'kms_host', default=None,
help='The KMS host address in form <host>[:<port>], '
'e.g: "kmshost:1688"'),
cfg.BoolOpt(
'log_licensing_info', default=True,
help='Logs the operating system licensing information'),
cfg.BoolOpt(
'winrm_enable_basic_auth', default=True,
help='Enables basic authentication for the WinRM '
'HTTPS listener'),
cfg.BoolOpt(
'winrm_configure_http_listener', default=False,
help='Configures the WinRM HTTP listener'),
cfg.BoolOpt(
'winrm_configure_https_listener', default=True,
help='Configures the WinRM HTTPS listener'),
cfg.ListOpt(
'volumes_to_extend', default=None,
help='List of volumes that need to be extended '
'if contiguous space is available on the disk. '
'By default all the available volumes can be extended. '
'Volumes must be specified using a comma separated list '
'of volume indexes, e.g.: "1,2"'),
cfg.StrOpt(
'san_policy', default=None,
choices=[constant.SAN_POLICY_ONLINE_STR,
constant.SAN_POLICY_OFFLINE_STR,
constant.SAN_POLICY_OFFLINE_SHARED_STR],
help='If not None, the SAN policy is set to the given value'),
cfg.StrOpt(
'local_scripts_path', default=None,
help='Path location containing scripts to be executed when '
'the plugin runs'),
cfg.BoolOpt(
'mtu_use_dhcp_config', default=True,
help='Configures the network interfaces MTU based on the '
'values provided via DHCP'),
cfg.StrOpt(
'username', default='Admin', help='User to be added to the '
'system or updated if already existing'),
cfg.ListOpt(
'groups', default=['Administrators'],
help='List of local groups to which the user specified in '
'"username" will be added'),
cfg.BoolOpt(
'rename_admin_user', default=False,
help='Renames the builtin admin user instead of creating a '
'new user'),
cfg.StrOpt(
'heat_config_dir', default='C:\\cfn',
help='The directory where the Heat configuration files must '
'be saved'),
cfg.BoolOpt(
'ntp_enable_service', default=True,
help='Enables the NTP client service'),
cfg.BoolOpt(
'ntp_use_dhcp_config', default=False,
help='Configures NTP client time synchronization using '
'the NTP servers provided via DHCP'),
cfg.BoolOpt(
'real_time_clock_utc', default=False,
help='Sets the real time clock to use universal time (True) '
'or local time (False)'),
cfg.BoolOpt(
'inject_user_password', default=True,
help='Set the password provided in the configuration. '
'If False or no password is provided, a random one '
'will be set'),
cfg.StrOpt(
'first_logon_behaviour',
default=constant.CLEAR_TEXT_INJECTED_ONLY,
choices=constant.LOGON_PASSWORD_CHANGE_OPTIONS,
help='Control the behaviour of what happens at '
'next logon. If this option is set to `always`, '
'then the user will be forced to change the password '
'at next logon. If it is set to '
'`clear_text_injected_only`, '
'then the user will have to change the password only if '
'the password is a clear text password, coming from the '
'metadata. The last option is `no`, when the user is '
'never forced to change the password.'),
cfg.ListOpt(
'metadata_services',
default=[
'cloudbaseinit.metadata.services.httpservice.HttpService',
'cloudbaseinit.metadata.services'
'.configdrive.ConfigDriveService',
'cloudbaseinit.metadata.services.ec2service.EC2Service',
'cloudbaseinit.metadata.services'
'.maasservice.MaaSHttpService',
'cloudbaseinit.metadata.services.cloudstack.CloudStack',
'cloudbaseinit.metadata.services'
'.opennebulaservice.OpenNebulaService',
],
help='List of enabled metadata service classes, '
'to be tested for availability in the provided order. '
'The first available service will be used to retrieve '
'metadata'),
cfg.ListOpt(
'plugins',
default=[
'cloudbaseinit.plugins.common.mtu.MTUPlugin',
'cloudbaseinit.plugins.windows.ntpclient'
'.NTPClientPlugin',
'cloudbaseinit.plugins.common.sethostname'
'.SetHostNamePlugin',
'cloudbaseinit.plugins.windows.createuser'
'.CreateUserPlugin',
'cloudbaseinit.plugins.common.networkconfig'
'.NetworkConfigPlugin',
'cloudbaseinit.plugins.windows.licensing'
'.WindowsLicensingPlugin',
'cloudbaseinit.plugins.common.sshpublickeys'
'.SetUserSSHPublicKeysPlugin',
'cloudbaseinit.plugins.windows.extendvolumes'
'.ExtendVolumesPlugin',
'cloudbaseinit.plugins.common.userdata.UserDataPlugin',
'cloudbaseinit.plugins.common.setuserpassword.'
'SetUserPasswordPlugin',
'cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin',
'cloudbaseinit.plugins.windows.winrmcertificateauth.'
'ConfigWinRMCertificateAuthPlugin',
'cloudbaseinit.plugins.common.localscripts'
'.LocalScriptsPlugin',
],
help='List of enabled plugin classes, '
'to be executed in the provided order'),
cfg.ListOpt(
'user_data_plugins',
default=[
'cloudbaseinit.plugins.common.userdataplugins.parthandler.'
'PartHandlerPlugin',
'cloudbaseinit.plugins.common.userdataplugins.cloudconfig.'
'CloudConfigPlugin',
'cloudbaseinit.plugins.common.userdataplugins'
'.cloudboothook.CloudBootHookPlugin',
'cloudbaseinit.plugins.common.userdataplugins.shellscript.'
'ShellScriptPlugin',
'cloudbaseinit.plugins.common.userdataplugins'
'.multipartmixed.MultipartMixedPlugin',
'cloudbaseinit.plugins.common.userdataplugins.heat.'
'HeatPlugin',
],
help='List of enabled userdata content plugins'),
cfg.ListOpt(
'cloud_config_plugins', default=[],
help='List which contains the name of the cloud config '
'plugins ordered by priority.'),
cfg.BoolOpt(
'rdp_set_keepalive', default=True,
help='Sets the RDP KeepAlive policy'),
cfg.StrOpt(
'bcd_boot_status_policy',
default=None,
choices=[constant.POLICY_IGNORE_ALL_FAILURES],
help='Sets the Windows BCD boot status policy'),
cfg.BoolOpt(
'bcd_enable_auto_recovery', default=False,
help='Enables or disables the BCD auto recovery'),
cfg.BoolOpt(
'set_unique_boot_disk_id', default=True,
help='Sets a new random unique id on the boot disk to avoid '
'collisions'),
cfg.IntOpt(
'display_idle_timeout', default=0,
help='The idle timeout, in seconds, before powering off '
'the display. Set 0 to leave the display always on'),
cfg.ListOpt(
'page_file_volume_labels', default=[],
help='Labels of volumes on which a Windows page file needs to '
'be created. E.g.: "Temporary Storage"'),
cfg.ListOpt(
'page_file_volume_mount_points', default=[],
help='Volume mount points on which a Windows page file needs '
'to be created. E.g.: '
'"\\\\?\\GLOBALROOT\\device\\Harddisk1\\Partition1\\"'),
cfg.BoolOpt(
'trim_enabled', default=False,
help='Enables or disables TRIM delete notifications for '
'the underlying storage device.'),
cfg.BoolOpt(
'process_userdata', default=True,
help='Processes the userdata content based on the type, e.g. '
'executing a PowerShell script'),
cfg.StrOpt(
'userdata_save_path',
default=None,
help='Copies the userdata to the given file path. The path '
'can include environment variables that will be expanded,'
' e.g. "%%SYSTEMDRIVE%%\\CloudbaseInit\\UserData.bin"'),
cfg.BoolOpt(
'enable_automatic_updates', default=None,
help='If set, enables or disables automatic operating '
'system updates.'),
cfg.BoolOpt(
'metadata_report_provisioning_started', default=False,
help='Reports to the metadata service that provisioning has '
'started'),
cfg.BoolOpt(
'metadata_report_provisioning_completed', default=False,
help='Reports to the metadata service that provisioning '
'completed successfully or failed'),
cfg.StrOpt(
'ephemeral_disk_volume_label', default=None,
help='Ephemeral disk volume label, e.g.: "Temporary Storage"'),
cfg.StrOpt(
'ephemeral_disk_volume_mount_point', default=None,
help='Ephemeral disk volume mount point, e.g.:'
'"\\\\?\\GLOBALROOT\\device\\Harddisk1\\Partition1\\"'),
cfg.StrOpt(
'ephemeral_disk_data_loss_warning_path', default=None,
help='Ephemeral disk data loss warning path, relative to the '
'ephemeral disk volume path. E.g.: '
'DATALOSS_WARNING_README.txt'),
cfg.IntOpt(
'user_password_length', default=20,
help='The length of the generated password for the user '
'defined by the `username` config option.'),
]
self._cli_options = [
cfg.BoolOpt(
'reset_service_password', default=True,
help='If set to True, the service user password will be '
'reset at each execution with a new random value of '
'appropriate length and complexity, unless the user is '
'a built-in or domain account.'
'This is needed to avoid "pass the hash" attacks on '
'Windows cloned instances.'),
]
def register(self):
"""Register the current options to the global ConfigOpts object."""
self._config.register_cli_opts(self._cli_options)
self._config.register_opts(self._options + self._cli_options)
def list(self):
"""Return a list which contains all the available options."""
return self._options
|
|
from sys import exit
from click import BOOL, argument, option, prompt
from flask.cli import AppGroup
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.users import invite_user
manager = AppGroup(help="Users management commands.")
def build_groups(org, groups, is_admin):
if isinstance(groups, basestring):
groups = groups.split(',')
if "" in groups:
groups.remove('') # in case it was empty string
groups = [int(g) for g in groups]
if groups is None:
groups = [org.default_group.id]
if is_admin:
groups += [org.admin_group.id]
return groups
@manager.command()
@argument('email')
@option('--org', 'organization', default='default',
help="the organization the user belongs to, (leave blank for "
"'default').")
def grant_admin(email, organization='default'):
"""
Grant admin access to user EMAIL.
"""
try:
org = models.Organization.get_by_slug(organization)
admin_group = org.admin_group
user = models.User.get_by_email_and_org(email, org)
if admin_group.id in user.group_ids:
print "User is already an admin."
else:
user.group_ids = user.group_ids + [org.admin_group.id]
models.db.session.add(user)
models.db.session.commit()
print "User updated."
except NoResultFound:
print "User [%s] not found." % email
def create_user_logic(email, name, groups, is_admin=False, google_auth=False, password=None, organization='default', dashgroups=None):
"""
Create user EMAIL with display name NAME. The dashgroups argument is a comma separated list of dashgroups names.
"""
print "Creating user (%s, %s) in organization %s..." % (email, name,
organization)
print "Admin: %r" % is_admin
print "Login with Google Auth: %r\n" % google_auth
org = models.Organization.get_by_slug(organization)
groups = build_groups(org, groups, is_admin)
user = models.User(org=org, email=email, name=name, group_ids=groups)
if not password and not google_auth:
password = prompt("Password", hide_input=True,
confirmation_prompt=True)
if not google_auth:
user.hash_password(password)
try:
models.db.session.add(user)
models.db.session.commit()
# Creates a UserDashgroup for every passed dashgroup.
if dashgroups is not None:
for dg in dashgroups.split(','):
dashgroup = models.Dashgroup.get_by_name(dg)
user_dg = models.UserDashgroup(dashgroup_id=dashgroup.id, user_id=user.id)
models.db.session.add(user_dg)
models.db.session.commit()
except Exception, e:
print "Failed creating user: %s" % e.message
exit(1)
@manager.command("create")
@argument('email')
@argument('name')
@option('--org', 'organization', default='default',
help="The organization the user belongs to (leave blank for "
"'default').")
@option('--admin', 'is_admin', is_flag=True, default=False,
help="set user as admin")
@option('--google', 'google_auth', is_flag=True,
default=False, help="user uses Google Auth to login")
@option('--password', 'password', default=None,
help="Password for users who don't use Google Auth "
"(leave blank for prompt).")
@option('--groups', 'groups', default=None,
help="Comma separated list of groups (leave blank for "
"default).")
@option('--dashgroups', 'dashgroups', default=None, help="Comma separated list of dashgroups.")
def create(email, name, groups, is_admin=False, google_auth=False,password=None, organization='default', dashgroups=None):
create_user_logic(email, name, groups, is_admin, google_auth, dashgroups)
@manager.command()
@argument('email')
@option('--org', 'organization', default=None,
help="The organization the user belongs to (leave blank for all"
" organizations).")
def delete(email, organization=None):
"""
Delete user EMAIL.
"""
if organization:
org = models.Organization.get_by_slug(organization)
deleted_count = models.User.query.filter(
models.User.email == email,
models.User.org == org.id,
).delete()
else:
deleted_count = models.User.query.filter(models.User.email == email).delete()
models.db.session.commit()
print "Deleted %d users." % deleted_count
@manager.command()
@argument('email')
@argument('password')
@option('--org', 'organization', default=None,
help="The organization the user belongs to (leave blank for all "
"organizations).")
def password(email, password, organization=None):
"""
Resets password for EMAIL to PASSWORD.
"""
if organization:
org = models.Organization.get_by_slug(organization)
user = models.User.query.filter(
models.User.email == email,
models.User.org == org,
).first()
else:
user = models.User.query.filter(models.User.email == email).first()
if user is not None:
user.hash_password(password)
models.db.session.add(user)
models.db.session.commit()
print "User updated."
else:
print "User [%s] not found." % email
exit(1)
@manager.command()
@argument('email')
@argument('name')
@argument('inviter_email')
@option('--org', 'organization', default='default',
help="The organization the user belongs to (leave blank for 'default')")
@option('--admin', 'is_admin', type=BOOL, default=False,
help="set user as admin")
@option('--groups', 'groups', default=None,
help="Comma seperated list of groups (leave blank for default).")
def invite(email, name, inviter_email, groups, is_admin=False,
organization='default'):
"""
Sends an invitation to the given NAME and EMAIL from INVITER_EMAIL.
"""
org = models.Organization.get_by_slug(organization)
groups = build_groups(org, groups, is_admin)
try:
user_from = models.User.get_by_email_and_org(inviter_email, org)
user = models.User(org=org, name=name, email=email, group_ids=groups)
models.db.session.add(user)
try:
models.db.session.commit()
invite_user(org, user_from, user)
print "An invitation was sent to [%s] at [%s]." % (name, email)
except IntegrityError as e:
if "email" in e.message:
print "Cannot invite. User already exists [%s]" % email
else:
print e
except NoResultFound:
print "The inviter [%s] was not found." % inviter_email
@manager.command()
@option('--org', 'organization', default=None,
help="The organization the user belongs to (leave blank for all"
" organizations)")
def list(organization=None):
"""List all users"""
if organization:
org = models.Organization.get_by_slug(organization)
users = models.User.query.filter(models.User.org == org)
else:
users = models.User.query
for i, user in enumerate(users):
if i > 0:
print "-" * 20
print "Id: {}\nName: {}\nEmail: {}\nOrganization: {}".format(
user.id, user.name.encode('utf-8'), user.email, user.org.name)
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8, native_str
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
class SetHeaderHandler(RequestHandler):
def get(self):
# Use get_arguments for keys to get strings, but
# request.arguments for values to get bytes.
for k, v in zip(self.get_arguments('k'),
self.request.arguments['v']):
self.set_header(k, v)
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
url('/set_header', SetHeaderHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u"\xe9"
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u"foo")
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u"MyUserAgent", b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
def test_non_ascii_header(self):
# Non-ascii headers are sent as latin1.
response = self.fetch("/set_header?k=foo&v=%E9")
response.rethrow()
self.assertEqual(response.headers["Foo"], native_str(u"\u00e9"))
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ == 'TwistedIOLoop':
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop')
self.server_ioloop = IOLoop()
@gen.coroutine
def init_server():
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app)
self.server.add_socket(sock)
self.server_ioloop.run_sync(init_server)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticeable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_plain_error(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
def test_error_with_response(self):
resp = HTTPResponse(HTTPRequest('http://example.com/'), 403)
with self.assertRaises(HTTPError) as cm:
resp.rethrow()
e = cm.exception
self.assertEqual(str(e), "HTTP 403: Forbidden")
self.assertEqual(repr(e), "HTTP 403: Forbidden")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training functions for Gradient boosted decision trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from tensorflow.contrib import learn
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import stateless_random_ops as stateless
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
# Key names for prediction dict.
ENSEMBLE_STAMP = "ensemble_stamp"
PREDICTIONS = "predictions"
PARTITION_IDS = "partition_ids"
NUM_LAYERS_ATTEMPTED = "num_layers"
NUM_TREES_ATTEMPTED = "num_trees"
NUM_USED_HANDLERS = "num_used_handlers"
USED_HANDLERS_MASK = "used_handlers_mask"
LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
"num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
"active_layer", "continue_centering", "bias_stats_accumulator",
"steps_accumulator", "handlers"
])
def _get_column_by_index(tensor, indices):
"""Returns columns from a 2-D tensor by index."""
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [-1])
i_flat = array_ops.reshape(
array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +
indices, [-1])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])
def _make_predictions_dict(stamp,
logits,
partition_ids,
ensemble_stats,
used_handlers,
leaf_index=None):
"""Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
"""
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if leaf_index is not None:
result[LEAF_INDEX] = leaf_index
return result
class _OpRoundRobinStrategy(object):
"""Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
"""
def __init__(self, ps_ops, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
"""
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (next_task + 1) % num_tasks if num_tasks else 0
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
"""
if op.type not in self._next_task_per_op:
raise ValueError("Unknown op type '%s' for placement:" % op.type)
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks
if self._num_tasks else 0)
return task
def extract_features(features, feature_columns, use_core_columns):
"""Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
"""
if not features:
raise ValueError("Features dictionary must be specified.")
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
features = copy.copy(features)
if feature_columns:
scope = "gbdt"
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
# pylint: disable=protected-access
if use_core_columns:
# pylint: disable=protected-access
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
# pylint: enable=protected-access
transformed_features[fc.name] = fc_core.input_layer(
features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if len(result) > 1:
raise ValueError("Unexpected number of output features")
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
# TODO(nponomareva): consider iterating over feature columns instead.
if isinstance(tensor, tuple):
# Weighted categorical feature.
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.cast(categorical_tensor.values, dtypes.int64), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if tensor.values.dtype == dtypes.float32:
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif tensor.values.dtype == dtypes.int64:
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError("Unsupported sparse feature %s with dtype %s." %
(tensor.indices.name, tensor.dtype))
else:
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError("Unsupported dense feature %s with dtype %s." %
(tensor.name, tensor.dtype))
# Feature columns are logically organized into incrementing slots starting
# from dense floats, then sparse floats then sparse ints.
fc_names = (dense_float_names + sparse_float_names + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes)
def _dropout_params(mode, ensemble_stats):
"""Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
"""
if mode == learn.ModeKeys.TRAIN:
# Do dropout only during training.
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = -1
apply_dropout = False
return apply_dropout, seed
class GradientBoostedDecisionTreeModel(object):
"""A GBDT model function."""
def __init__(self,
is_chief,
num_ps_replicas,
ensemble_handle,
center_bias,
examples_per_layer,
learner_config,
features,
logits_dimension,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False,
output_leaf_index_modes=None,
num_quantiles=100):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
"""
if ensemble_handle is None:
raise ValueError("ensemble_handle must be specified.")
if learner_config is None:
raise ValueError("learner_config must be specified.")
if learner_config.num_classes < 2:
raise ValueError("Number of classes must be >=2")
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
# Check loss reduction value.
if (loss_reduction != losses.Reduction.SUM and
loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
raise ValueError(
"Invalid loss reduction is provided: %s." % loss_reduction)
self._loss_reduction = loss_reduction
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if logits_dimension == 1:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
else:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
if logits_dimension == 1 or learner_config.multi_class_strategy == (
learner_pb2.LearnerConfig.TREE_PER_CLASS):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError("Center bias should be False for multiclass.")
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape(
([logits_dimension, logits_dimension]))
else:
# Diagonal hessian strategy.
self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))
if (learner_config.growing_mode ==
learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and learner_config.pruning_mode == learner_pb2
.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.PRE_PRUNE
if (learner_config.pruning_mode ==
learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and
learner_config.pruning_mode == learner_pb2.LearnerConfig.POST_PRUNE):
raise ValueError(
"Post pruning is not implmented for oblivious decision trees.")
if learner_config.constraints.max_tree_depth == 0:
# Use 6 as the default maximum depth.
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof("tuner")
if not tuner:
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(
initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="attempted_trees")
self._finalized_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="finalized_trees")
if not features:
raise ValueError("Features dictionary must be specified.")
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices,
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
if (learner_config.weak_learner_type == learner_pb2.LearnerConfig
.OBLIVIOUS_DECISION_TREE and sparse_float_indices):
raise ValueError("Oblivious trees don't handle sparse float features yet."
)
logging.info("Active Feature Columns: " + str(fc_names))
logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = (
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
if output_leaf_index_modes is None:
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif not all(
mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
num_handlers = (
len(self._dense_floats) + len(self._sparse_float_shapes) + len(
self._sparse_int_shapes))
# Used during feature selection.
used_handlers = model_ops.tree_ensemble_used_handlers(
ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
# We don't need dropout info - we can always restore it based on the
# seed.
apply_dropout, seed = _dropout_params(mode, ensemble_stats)
# Make sure ensemble stats run. This will check that the ensemble has
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
else:
leaf_index = None
predictions, _ = prediction_ops.gradient_trees_prediction(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(
ensemble_handle,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,
ensemble_stats, used_handlers, leaf_index)
def predict(self, mode):
"""Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
"""
# Use the current ensemble to predict on the current batch of input.
# For faster prediction we check if the inputs are on the same device
# as the model. If not, we create a copy of the model on the worker.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
if not input_deps:
raise ValueError("No input tensors for prediction.")
# Get most current model stamp.
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
# Determine if ensemble is colocated with the inputs.
if self._ensemble_handle.device != input_deps[0].device:
# Create a local ensemble and get its local stamp.
with ops.name_scope("local_ensemble", "TreeEnsembleVariable"):
local_ensemble_handle = (
gen_model_ops.decision_tree_ensemble_resource_handle_op(
self._ensemble_handle.op.name + "/local_ensemble"))
create_op = gen_model_ops.create_tree_ensemble_variable(
local_ensemble_handle, stamp_token=-1, tree_ensemble_config="")
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(
local_ensemble_handle)
# Determine whether the local ensemble is stale and update it if needed.
def _refresh_local_ensemble_fn():
# Serialize the model from parameter server after reading the inputs.
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = (
model_ops.tree_ensemble_serialize(self._ensemble_handle))
# Update local ensemble with the serialized model from parameter server.
with ops.control_dependencies([create_op]):
return model_ops.tree_ensemble_deserialize(
local_ensemble_handle,
stamp_token=ensemble_stamp,
tree_ensemble_config=serialized_model), ensemble_stamp
with ops.device(local_ensemble_handle.device):
# Need to colocate stamps for cond.
colocated_ensemble_stamp = array_ops.identity(ensemble_stamp)
refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(
math_ops.not_equal(colocated_ensemble_stamp,
local_stamp), _refresh_local_ensemble_fn,
lambda: (control_flow_ops.no_op(), colocated_ensemble_stamp))
# Once updated, use the local model for prediction.
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle,
ensemble_stamp, mode)
else:
# Use ensemble_handle directly, if colocated.
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle,
ensemble_stamp, mode)
def _get_class_id(self, predictions_dict):
# Handle different multiclass strategies.
if (self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.cast(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension,
dtypes.int32)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict, gradients=None, hessians=None):
"""Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
gradients: A tensor with the gradients with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
hessians: A tensor with the hessians with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
"""
# Get the worker device from input dependencies.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
worker_device = input_deps[0].device
# Get tensors relevant for training and form the loss.
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
if gradients is None:
gradients = gradients_impl.gradients(
loss,
predictions,
name="Gradients",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
# Handle different multiclass strategies.
if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
# We build one vs rest trees.
if self._logits_dimension == 1:
# We have only 1 score, gradients is of shape [batch, 1].
if hessians is None:
hessians = gradients_impl.gradients(
gradients,
predictions,
name="Hessian",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
if hessians is not None:
raise ValueError("Providing hessians is not yet supported here.")
hessian_list = self._diagonal_hessian(gradients, predictions)
# Assemble hessian list into a tensor.
hessians = array_ops.stack(hessian_list, axis=1)
# Use class id tensor to get the column with that index from gradients
# and hessians.
squeezed_gradients = array_ops.squeeze(
_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(
_get_column_by_index(hessians, class_id))
else:
if hessians is not None:
raise ValueError("Providing hessians is not yet supported here.")
# Other multiclass strategies.
if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
hessian_list = self._full_hessian(gradients, predictions)
else:
# Diagonal hessian strategy.
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
# Get the weights for each example for quantiles calculation,
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
# Create all handlers ensuring resources are evenly allocated across PS.
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(
self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(
self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(
self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = 1.0 / num_quantiles
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
# Create handlers for dense float columns
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.DenseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type,
))
fc_name_idx += 1
# Create handlers for sparse float columns.
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.SparseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
self._sparse_float_indices[sparse_float_column_idx],
self._sparse_float_values[sparse_float_column_idx],
self._sparse_float_shapes[sparse_float_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
categorical_split_handler.EqualitySplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
self._sparse_int_shapes[sparse_int_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type))
fc_name_idx += 1
# Create ensemble stats variables.
num_layer_examples = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_examples",
trainable=False)
num_layer_steps = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_steps",
trainable=False)
num_layers = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layers",
trainable=False)
active_tree = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_tree",
trainable=False)
active_layer = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_layer",
trainable=False)
# Variable that becomes false once bias centering is done.
continue_centering = variables.VariableV1(
initial_value=self._center_bias,
name="continue_centering",
trainable=False)
# Create bias stats accumulator.
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
name="BiasAccumulator")
# Create steps accumulator.
steps_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar(),
name="StepsAccumulator")
# Create ensemble stats summaries.
summary.scalar("layer_stats/num_examples", num_layer_examples)
summary.scalar("layer_stats/num_steps", num_layer_steps)
summary.scalar("ensemble_stats/active_tree", active_tree)
summary.scalar("ensemble_stats/active_layer", active_layer)
# Update bias stats.
stats_update_ops = []
stats_update_ops.append(
control_flow_ops.cond(
continue_centering,
self._make_update_bias_stats_fn(ensemble_stamp, predictions,
gradients, bias_stats_accumulator,
hessians), control_flow_ops.no_op))
# Update handler stats.
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
# Two values per handler. First one is if the handler is active for the
# current layer. The second one is if the handler is going to be active
# for the next layer.
subsampling_type = self._learner_config.WhichOneof("feature_fraction")
if subsampling_type == "feature_fraction_per_level":
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed + 1, 1])
active_handlers = array_ops.stack(
[active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (
active_handlers < self._learner_config.feature_fraction_per_level)
elif subsampling_type == "feature_fraction_per_tree":
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (
active_handlers_current_layer <
self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack(
[
active_handlers_current_layer,
array_ops.ones([len(handlers)], dtype=dtypes.bool)
],
axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = (
self._learner_config.constraints.max_number_of_unique_feature_columns)
def _feature_selection_active_handlers():
# The active list for current and the next iteration.
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],
[-1, 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = (
control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,
_feature_selection_active_handlers,
lambda: active_handlers))
# Prepare empty gradients and hessians when handlers are not ready.
empty_hess_shape = [1] + self._hessian_shape.as_list()
empty_grad_shape = [1] + self._gradient_shape.as_list()
empty_gradients = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant_v1(
[], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
updates, scheduled_updates = handler.update_stats(
ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(
per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(
num_layer_examples=num_layer_examples,
num_layer_steps=num_layer_steps,
num_layers=num_layers,
active_tree=active_tree,
active_layer=active_layer,
continue_centering=continue_centering,
bias_stats_accumulator=bias_stats_accumulator,
steps_accumulator=steps_accumulator,
handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
# Advance the ensemble stamp to throw away staggered workers.
stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = stamp_token + 1
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(
bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(
model_ops.tree_ensemble_deserialize(
self._ensemble_handle,
stamp_token=next_stamp_token,
tree_ensemble_config="",
name="reset_gbdt"))
reset_op = control_flow_ops.group([reset_ops])
return stats_update_ops, reset_op, training_state
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
batch_size = math_ops.cast(
array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
# After adding the step, decide if further processing is needed.
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
# Get accumulated steps and examples for the current layer.
_, _, _, _, acc_examples, acc_steps = (
steps_accumulator.saveable.serialize())
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(
num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
# Determine whether we need to update tree ensemble.
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
self.make_update_ensemble_fn(ensemble_stamp, training_state,
dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
# that the value might look staggering over steps when the dropout ratio is
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
"tuner")
if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
tuner = getattr(self._learner_config.learning_rate_tuner,
learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
# TODO(nponomareva, soroush) do the line search.
raise ValueError("Line search learning rate is not yet supported.")
def _update_ensemble():
"""A method to update the tree ensemble."""
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
_, _, _, bias_grads, bias_hess = (
training_state.bias_stats_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
# Stack all the inputs to one tensor per type.
# This is a workaround for the slowness of graph building in tf.cond.
# See (b/36554864).
split_sizes = array_ops.reshape(
array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
# Determine if all splits are ready.
are_all_splits_ready = math_ops.reduce_all(
array_ops.stack(
are_splits_ready_list, axis=0, name="stack_handler_readiness"))
# Define bias centering update operation.
def _center_bias_fn():
# Center tree ensemble bias.
delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,
array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
# Grow the ensemble given the current candidates.
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
# When using the oblivious decision tree as weak learner, it produces
# one gain and one split per handler and not number of partitions.
if self._learner_config.weak_learner_type == (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=learning_rate,
partition_ids=partition_ids_list,
gains=gains_list,
splits=split_info_list,
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
# Don't grow the ensemble, just update the stamp.
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=0,
partition_ids=[],
gains=[],
splits=[],
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
# Conditionally grow an ensemble depending on whether the splits
# from all the handlers are ready.
return control_flow_ops.cond(are_all_splits_ready,
_grow_ensemble_ready_fn,
_grow_ensemble_not_ready_fn)
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
# Update ensemble stats.
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(
training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
training_state.steps_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
def get_number_of_trees_tensor(self):
return self._finalized_trees, self._attempted_trees
def get_max_tree_depth(self):
return self._max_tree_depth
def train(self, loss, predictions_dict, labels, gradients=None,
hessians=None):
"""Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
gradients: A tensor with the gradients with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
hessians: A tensor with the hessians with the respect to logits from
predictions_dict. If not provided, tensorflow will do
autodifferentiation.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
"""
del labels # unused; kept for backward compatibility.
update_op, _, training_state = self.update_stats(loss, predictions_dict,
gradients, hessians)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
if hessian_shape == tensor_shape.scalar():
# This is tree per class.
weights = hessians
elif len(hessian_shape.dims) == 1:
# This is diagonal hessian.
weights = math_ops.reduce_sum(hessians, axis=1)
else:
# This is full hessian.
weights = math_ops.trace(hessians)
return weights
def _full_hessian(self, grads, predictions):
"""Prepares hessians for full-hessian multiclass strategy."""
# Because of
# https://github.com/tensorflow/tensorflow/issues/675, we can't just
# compute the full hessian with a single call to gradients, but instead
# must compute it row-by-row.
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_i dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
gradients_list[row],
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
def _diagonal_hessian(self, grads, predictions):
"""Prepares hessians for diagonal-hessian multiclass mode."""
diag_hessian_list = []
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
for row, row_grads in enumerate(gradients_list):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
row_grads,
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
# Get dx_i^2 for the whole batch.
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
def _get_replica_device_setter(self, worker_device):
"""Creates a replica device setter."""
ps_tasks = self._num_ps_replicas
ps_ops = list(device_setter.STANDARD_PS_OPS)
ps_ops.extend([
"DecisionTreeEnsembleResourceHandleOp",
"StatsAccumulatorScalarResourceHandleOp",
"StatsAccumulatorTensorResourceHandleOp",
])
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_tasks,
merge_devices=True,
ps_ops=ps_ops,
ps_strategy=ps_strategy)
def _make_update_bias_stats_fn(self,
ensemble_stamp,
predictions,
gradients,
bias_stats_accumulator,
hessians=None):
"""A method to create the function which updates the bias stats."""
def _update_bias_stats():
"""A method to update the bias stats."""
# Get reduced gradients and hessians.
grads_sum = math_ops.reduce_sum(gradients, 0)
if hessians is not None:
hess = hessians
else:
hess = gradients_impl.gradients(
grads_sum,
predictions,
name="Hessians",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
# Accumulate gradients and hessians.
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros(
[self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(
ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name="update_bias_stats")
return _update_bias_stats
|
|
import json
import logging
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotAllowed,
HttpResponseServerError,
)
from django.utils.encoding import force_str
from django.utils.text import slugify
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from .models import SystemToken
from standup.status.models import Project, Status, StandupUser
logger = logging.getLogger(__name__)
def convert_to_json(resp, cors=False):
"""Converts responses into JSON responses"""
# If the response is already json-ified, then we skip it.
if getattr(resp, 'is_json', False):
return resp
resp['content_type'] = 'application/json'
if cors:
resp['Access-Control-Allow-Origin'] = '*'
content = resp.content
if isinstance(resp, (
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseServerError
)):
# Errors are in the form {'error': 'error string...'}
content = {'error': content.decode('utf-8')}
elif isinstance(resp, HttpResponseNotAllowed):
content = {'error': 'Method not allowed'}
elif isinstance(resp, HttpResponseJSON):
return resp
resp.content = json.dumps(content)
resp.is_json = True
return resp
class AuthException(Exception):
"""Authentication exception"""
pass
class HttpResponseJSON(HttpResponse):
is_json = True
def __init__(self, content, status=None, cors=False):
super(HttpResponseJSON, self).__init__(
content=json.dumps(content),
content_type='application/json',
status=status
)
if cors:
self['Access-Control-Allow-Origin'] = '*'
class APIView(View):
"""API view that looks at the world in JSON"""
@classmethod
def as_view(cls, *args, **kwargs):
# API calls use token authentication and don't need csrf bits
return csrf_exempt(super(APIView, cls).as_view(*args, **kwargs))
def authenticate(self, request):
"""Authenticates the request which pulls out the auth token and validates it
Adds "auth_token" attribute to the request with the token instance.
:raises AuthError: for authentication errors
"""
pass
def dispatch(self, request, *args, **kwargs):
"""Dispatches like View, except always returns JSON responses"""
try:
if request.body:
# FIXME(willkg): This assumes the body is utf-8.
request.json_body = json.loads(force_str(request.body))
if not isinstance(request.json_body, dict):
raise Exception('Unrecognized JSON payload.')
else:
request.json_body = None
self.authenticate(request)
resp = super(APIView, self).dispatch(request, *args, **kwargs)
except AuthException as exc:
resp = HttpResponseForbidden(exc.args[0])
except Exception as exc:
resp = HttpResponseServerError('Error occured: %s' % exc)
logger.exception('Request kicked up exception')
return convert_to_json(resp)
class AuthenticatedAPIView(APIView):
def authenticate(self, request):
# If this request is for a method that's not allowed, we just
# skip it and dispatch will deal with it.
if getattr(self, request.method.lower(), None) is None:
return
if not request.json_body:
raise AuthException('No api_key provided.')
api_key = request.json_body.get('api_key')
if not api_key:
raise AuthException('No api_key provided.')
# Check to see if this is a system token
try:
token = SystemToken.objects.get(token=api_key)
except SystemToken.DoesNotExist:
raise AuthException('Api key forbidden.')
# Verify the token is enabled
if not token.enabled:
raise AuthException('Api key forbidden.')
# Add token to request object
request.auth_token = token
class StatusCreate(AuthenticatedAPIView):
def post(self, request):
# token = request.auth_token
# FIXME(willkg): Authorize operation.
# FIXME(willkg): This makes the API irc-specific.
irc_nick = request.json_body.get('user')
project_slug = request.json_body.get('project')
content = request.json_body.get('content')
reply_to = request.json_body.get('reply_to')
project = None
replied = None
# Validate we have the required fields.
if not (irc_nick and content):
return HttpResponseBadRequest('Missing required fields.')
# If this is a reply make sure that the status being replied to
# exists and is not itself a reply
if reply_to:
replied = Status.objects.filter(id=reply_to).first()
if not replied:
return HttpResponseBadRequest('Status does not exist.')
elif replied.reply_to:
return HttpResponseBadRequest('Cannot reply to a reply.')
# Get the user
user = StandupUser.objects.filter(irc_nick=irc_nick).first()
if not user:
return HttpResponseBadRequest('User does not exist.')
# Get or create the project (but not if this is a reply)
if project_slug and not replied:
# This forces the slug to be slug-like.
project_slug = slugify(project_slug)
project = Project.objects.filter(slug=project_slug).first()
if not project:
project = Project(slug=project_slug, name=project_slug)
project.save()
# Create the status
status = Status(user=user, content=content)
if project_slug and project:
status.project = project
if replied:
status.reply_to = replied
status.save()
return HttpResponseJSON({'id': status.id, 'content': content})
class StatusDelete(AuthenticatedAPIView):
def delete(self, request, pk):
# token = request.auth_token
# FIXME(willkg): Authorize this operation.
# FIXME(willkg): This makes the API irc-specific.
irc_nick = request.json_body.get('user')
if not irc_nick:
return HttpResponseBadRequest('Missing required fields.')
status = Status.objects.filter(id=pk).first()
if not status:
return HttpResponseBadRequest('Status does not exist.')
if status.user.irc_nick != irc_nick:
return HttpResponseForbidden('You cannot delete this status.')
status_id = status.id
status.delete()
return HttpResponseJSON({'id': status_id})
class UpdateUser(AuthenticatedAPIView):
def post(self, request, username):
# token = request.auth_token
# FIXME(willkg): Authorize this operation.
# FIXME(willkg): This makes the API irc-specific.
user = StandupUser.objects.filter(irc_nick=username).first()
if not user:
return HttpResponseBadRequest('User does not exist.')
if 'name' in request.json_body:
user.name = request.json_body['name']
if 'email' in request.json_body:
user.user.email = request.json_body['email']
if 'github_handle' in request.json_body:
user.github_handle = request.json_body['github_handle']
user.save()
user.user.save()
return HttpResponseJSON({'id': user.id})
|
|
# Copyright 2013-2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import imp
import inspect
import os
import sys
from .config import ConfigContext
from .config_types import Path, ModuleBasePath, RECIPE_MODULE_PREFIX
from .recipe_api import RecipeApi, RecipeApiPlain, Property, BoundProperty
from .recipe_api import UndefinedPropertyException, PROPERTY_SENTINEL
from .recipe_test_api import RecipeTestApi, DisabledTestData
from .util import scan_directory
class NoSuchRecipe(Exception):
"""Raised by load_recipe is recipe is not found."""
class RecipeScript(object):
"""Holds dict of an evaluated recipe script."""
def __init__(self, recipe_dict):
# Let each property object know about the property name.
recipe_dict['PROPERTIES'] = {
name: value.bind(name, BoundProperty.RECIPE_PROPERTY, name)
for name, value in recipe_dict.get('PROPERTIES', {}).items()}
for k, v in recipe_dict.iteritems():
setattr(self, k, v)
@classmethod
def from_script_path(cls, script_path, universe):
"""Evaluates a script and returns RecipeScript instance."""
script_vars = {}
script_vars['__file__'] = script_path
with _preserve_path():
execfile(script_path, script_vars)
script_vars['LOADED_DEPS'] = universe.deps_from_spec(
script_vars.get('DEPS', []))
return cls(script_vars)
class Dependency(object):
def load(self, universe):
raise NotImplementedError()
@property
def local_name(self):
raise NotImplementedError()
@property
def unique_name(self):
"""A unique identifier for the module that this dependency refers to.
This must be generated without loading the module."""
raise NotImplementedError()
class PathDependency(Dependency):
def __init__(self, path, local_name, universe):
assert os.path.isabs(path), (
'Path dependencies must be absolute, but %s is not' % path)
self._path = path
self._local_name = local_name
# We forbid modules from living outside our main paths to keep clients
# from going crazy before we have standardized recipe locations.
mod_dir = os.path.dirname(path)
assert mod_dir in universe.module_dirs, (
'Modules living outside of approved directories are forbidden: '
'%s is not in %s' % (mod_dir, universe.module_dirs))
def load(self, universe):
return _load_recipe_module_module(self._path, universe)
@property
def local_name(self):
return self._local_name
@property
def unique_name(self):
return self._path
class NamedDependency(PathDependency):
def __init__(self, name, universe):
for path in universe.module_dirs:
mod_path = os.path.join(path, name)
if _is_recipe_module_dir(mod_path):
super(NamedDependency, self).__init__(mod_path, name, universe=universe)
return
raise NoSuchRecipe('Recipe module named %s does not exist' % name)
class PackageDependency(PathDependency):
# TODO(luqui): Forbid depending on a module from a (locally) undeclared
# dependency.
def __init__(self, package, module, local_name, universe):
mod_path = (
universe.package_deps.get_package(package).module_path(module))
super(PackageDependency, self).__init__(
mod_path, local_name, universe=universe)
class RecipeUniverse(object):
def __init__(self, package_deps):
self._loaded = {}
self._package_deps = package_deps
@property
def module_dirs(self):
return self._package_deps.all_module_dirs
@property
def recipe_dirs(self):
return self._package_deps.all_recipe_dirs
@property
def package_deps(self):
return self._package_deps
def load(self, dep):
"""Load a Dependency."""
name = dep.unique_name
if name in self._loaded:
mod = self._loaded[name]
assert mod is not None, (
'Cyclic dependency when trying to load %s' % name)
return mod
else:
self._loaded[name] = None
mod = dep.load(self)
self._loaded[name] = mod
return mod
def _dep_from_name(self, name):
if '/' in name:
[package,module] = name.split('/')
dep = PackageDependency(package, module, module, universe=self)
else:
# Old style: bare module name, search paths to find it.
module = name
dep = NamedDependency(name, universe=self)
return module, dep
def deps_from_spec(self, spec):
# Automatic local names.
if isinstance(spec, (list, tuple)):
deps = {}
for item in spec:
name, dep = self._dep_from_name(item)
deps[name] = self.load(dep)
# Explicit local names.
elif isinstance(spec, dict):
deps = {}
for name, item in spec.iteritems():
_, dep = self._dep_from_name(item)
deps[name] = self.load(dep)
return deps
def load_recipe(self, recipe):
"""Given name of a recipe, loads and returns it as RecipeScript instance.
Args:
recipe (str): name of a recipe, can be in form '<module>:<recipe>'.
Returns:
RecipeScript instance.
Raises:
NoSuchRecipe: recipe is not found.
"""
# If the recipe is specified as "module:recipe", then it is an recipe
# contained in a recipe_module as an example. Look for it in the modules
# imported by load_recipe_modules instead of the normal search paths.
if ':' in recipe:
module_name, example = recipe.split(':')
assert example.endswith('example')
for module_dir in self.module_dirs:
if os.path.isdir(module_dir):
for subitem in os.listdir(module_dir):
if module_name == subitem:
return RecipeScript.from_script_path(
os.path.join(module_dir, subitem, 'example.py'), self)
raise NoSuchRecipe(recipe,
'Recipe example %s:%s does not exist' %
(module_name, example))
else:
for recipe_path in (os.path.join(p, recipe) for p in self.recipe_dirs):
if os.path.exists(recipe_path + '.py'):
return RecipeScript.from_script_path(recipe_path + '.py', self)
raise NoSuchRecipe(recipe)
def loop_over_recipe_modules(self):
for path in self.module_dirs:
if os.path.isdir(path):
for item in os.listdir(path):
subpath = os.path.join(path, item)
if _is_recipe_module_dir(subpath):
yield subpath
def loop_over_recipes(self):
"""Yields pairs (path to recipe, recipe name).
Enumerates real recipes in recipes/* as well as examples in recipe_modules/*.
"""
for path in self.recipe_dirs:
for recipe in scan_directory(
path, lambda f: f.endswith('.py') and f[0] != '_'):
yield recipe, recipe[len(path)+1:-len('.py')]
for path in self.module_dirs:
for recipe in scan_directory(
path, lambda f: f.endswith('example.py')):
module_name = os.path.dirname(recipe)[len(path)+1:]
yield recipe, '%s:example' % module_name
def _is_recipe_module_dir(path):
return (os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py')))
@contextlib.contextmanager
def _preserve_path():
old_path = sys.path[:]
try:
yield
finally:
sys.path = old_path
def _find_and_load_module(fullname, modname, path):
imp.acquire_lock()
try:
if fullname not in sys.modules:
fil = None
try:
fil, pathname, descr = imp.find_module(modname,
[os.path.dirname(path)])
imp.load_module(fullname, fil, pathname, descr)
finally:
if fil:
fil.close()
return sys.modules[fullname]
finally:
imp.release_lock()
def _load_recipe_module_module(path, universe):
modname = os.path.splitext(os.path.basename(path))[0]
fullname = '%s.%s' % (RECIPE_MODULE_PREFIX, modname)
mod = _find_and_load_module(fullname, modname, path)
# This actually loads the dependencies.
mod.LOADED_DEPS = universe.deps_from_spec(getattr(mod, 'DEPS', []))
# Prevent any modules that mess with sys.path from leaking.
with _preserve_path():
# TODO(luqui): Remove this hack once configs are cleaned.
sys.modules['%s.DEPS' % fullname] = mod.LOADED_DEPS
_recursive_import(path, RECIPE_MODULE_PREFIX)
_patchup_module(modname, mod)
return mod
def _recursive_import(path, prefix):
modname = os.path.splitext(os.path.basename(path))[0]
fullname = '%s.%s' % (prefix, modname)
mod = _find_and_load_module(fullname, modname, path)
if not os.path.isdir(path):
return mod
for subitem in os.listdir(path):
subpath = os.path.join(path, subitem)
subname = os.path.splitext(subitem)[0]
if os.path.isdir(subpath):
if not os.path.exists(os.path.join(subpath, '__init__.py')):
continue
elif not subpath.endswith('.py') or subitem.startswith('__init__.py'):
continue
submod = _recursive_import(subpath, fullname)
if not hasattr(mod, subname):
setattr(mod, subname, submod)
else:
prev = getattr(mod, subname)
assert submod is prev, (
'Conflicting modules: %s and %s' % (prev, mod))
return mod
def _patchup_module(name, submod):
"""Finds framework related classes and functions in a |submod| and adds
them to |submod| as top level constants with well known names such as
API, CONFIG_CTX, TEST_API, and PROPERTIES.
|submod| is a recipe module (akin to python package) with submodules such as
'api', 'config', 'test_api'. This function scans through dicts of that
submodules to find subclasses of RecipeApi, RecipeTestApi, etc.
"""
submod.NAME = name
submod.UNIQUE_NAME = name # TODO(luqui): use a luci-config unique name
submod.MODULE_DIRECTORY = Path(ModuleBasePath(submod))
submod.CONFIG_CTX = getattr(submod, 'CONFIG_CTX', None)
if hasattr(submod, 'config'):
for v in submod.config.__dict__.itervalues():
if isinstance(v, ConfigContext):
assert not submod.CONFIG_CTX, (
'More than one configuration context: %s, %s' %
(submod.config, submod.CONFIG_CTX))
submod.CONFIG_CTX = v
assert submod.CONFIG_CTX, 'Config file, but no config context?'
submod.API = getattr(submod, 'API', None)
for v in submod.api.__dict__.itervalues():
if inspect.isclass(v) and issubclass(v, RecipeApiPlain):
assert not submod.API, (
'%s has more than one Api subclass: %s, %s' % (name, v, submod.api))
submod.API = v
assert submod.API, 'Submodule has no api? %s' % (submod)
submod.TEST_API = getattr(submod, 'TEST_API', None)
if hasattr(submod, 'test_api'):
for v in submod.test_api.__dict__.itervalues():
if inspect.isclass(v) and issubclass(v, RecipeTestApi):
assert not submod.TEST_API, (
'More than one TestApi subclass: %s' % submod.api)
submod.TEST_API = v
assert submod.API, (
'Submodule has test_api.py but no TestApi subclass? %s'
% (submod)
)
# Let each property object know about the property name.
submod.PROPERTIES = {
prop_name: value.bind(prop_name, BoundProperty.MODULE_PROPERTY, name)
for prop_name, value in getattr(submod, 'PROPERTIES', {}).items()}
class DependencyMapper(object):
"""DependencyMapper topologically traverses the dependency DAG beginning at
a module, executing a callback ("instantiator") for each module.
For example, if the dependency DAG looked like this:
A
/ \
B C
\ /
D
(with D depending on B and C, etc.), DependencyMapper(f).instantiate(D) would
construct
f_A = f(A, {})
f_B = f(B, { 'A': f_A })
f_C = f(C, { 'A': f_A })
f_D = f(D, { 'B': f_B, 'C': f_C })
finally returning f_D. instantiate can be called multiple times, which reuses
already-computed results.
"""
def __init__(self, instantiator):
self._instantiator = instantiator
self._instances = {}
def instantiate(self, mod):
if mod in self._instances:
return self._instances[mod]
deps_dict = { name: self.instantiate(dep)
for name, dep in mod.LOADED_DEPS.iteritems() }
self._instances[mod] = self._instantiator(mod, deps_dict)
return self._instances[mod]
def invoke_with_properties(callable_obj, all_props, prop_defs,
**additional_args):
"""
Invokes callable with filtered, type-checked properties.
Args:
callable_obj: The function to call, or class to instantiate.
This supports passing in either RunSteps, or a recipe module,
which is a class.
all_props: A dictionary containing all the properties (instances of BoundProperty)
currently defined in the system.
prop_defs: A dictionary of name to property definitions for this callable.
additional_args: kwargs to pass through to the callable.
Note that the names of the arguments can correspond to
positional arguments as well.
Returns:
The result of calling callable with the filtered properties
and additional arguments.
"""
# Check that we got passed BoundProperties, and not Properties
for name, prop in prop_defs.items():
if not isinstance(prop, BoundProperty):
raise ValueError(
"You tried to invoke {} with an unbound Property {} named {}".format(
callable, prop, name))
# To detect when they didn't specify a property that they have as a
# function argument, list the arguments, through inspection,
# and then comparing this list to the provided properties. We use a list
# instead of a dict because getargspec returns a list which we would have to
# convert to a dictionary, and the benefit of the dictionary is pretty small.
props = []
if inspect.isclass(callable_obj):
arg_names = inspect.getargspec(callable_obj.__init__).args
arg_names.pop(0)
else:
arg_names = inspect.getargspec(callable_obj).args
for arg in arg_names:
if arg in additional_args:
props.append(additional_args.pop(arg))
continue
if arg not in prop_defs:
raise UndefinedPropertyException(
"Missing property definition for '{}'.".format(arg))
prop = prop_defs[arg]
props.append(prop.interpret(all_props.get(
prop.param_name, PROPERTY_SENTINEL)))
return callable_obj(*props, **additional_args)
def create_recipe_api(toplevel_deps, engine, test_data=DisabledTestData()):
def instantiator(mod, deps):
kwargs = {
'module': mod,
'engine': engine,
# TODO(luqui): test_data will need to use canonical unique names.
'test_data': test_data.get_module_test_data(mod.NAME)
}
prop_defs = mod.PROPERTIES
mod_api = invoke_with_properties(
mod.API, engine.properties, prop_defs, **kwargs)
mod_api.test_api = (getattr(mod, 'TEST_API', None)
or RecipeTestApi)(module=mod)
for k, v in deps.iteritems():
setattr(mod_api.m, k, v)
setattr(mod_api.test_api.m, k, v.test_api)
return mod_api
mapper = DependencyMapper(instantiator)
api = RecipeApi(module=None, engine=engine,
test_data=test_data.get_module_test_data(None))
for k, v in toplevel_deps.iteritems():
setattr(api, k, mapper.instantiate(v))
return api
def create_test_api(toplevel_deps, universe):
def instantiator(mod, deps):
modapi = (getattr(mod, 'TEST_API', None) or RecipeTestApi)(module=mod)
for k,v in deps.iteritems():
setattr(modapi.m, k, v)
return modapi
mapper = DependencyMapper(instantiator)
api = RecipeTestApi(module=None)
for k,v in toplevel_deps.iteritems():
setattr(api, k, mapper.instantiate(v))
return api
|
|
from nose.tools import *
import mock
from tests.factories import UserFactory, ProjectFactory
from framework.auth.decorators import Auth
from website.addons.dataverse.model import (
AddonDataverseUserSettings, AddonDataverseNodeSettings, DataverseFile
)
from website.addons.dataverse.tests.utils import DataverseAddonTestCase
class TestDataverseFile(DataverseAddonTestCase):
def test_constants(self):
dvf = DataverseFile()
assert_equal('dataverse', dvf.provider)
assert_equal('state', dvf.version_identifier)
def test_path_doesnt_crash_without_addon(self):
dvf = DataverseFile(node=self.project, file_id='12345')
self.project.delete_addon('dataverse', Auth(self.user))
assert_is(self.project.get_addon('dataverse'), None)
assert_true(dvf.file_id)
assert_true(dvf.waterbutler_path)
def test_waterbutler_path(self):
dvf = DataverseFile(node=self.project, file_id='12345')
assert_equals(dvf.file_id, '12345')
assert_equals(dvf.waterbutler_path, '/12345')
def test_unique_identifier(self):
dvf = DataverseFile(node=self.project, file_id='12345')
assert_true(dvf.file_id, '12345')
assert_equals(dvf.unique_identifier, '12345')
def test_node_addon_get_or_create(self):
dvf, created = self.node_settings.find_or_create_file_guid('12345')
assert_true(created)
assert_equal(dvf.file_id, '12345')
assert_equal(dvf.waterbutler_path, '/12345')
def test_node_addon_get_or_create_finds(self):
dvf1, created1 = self.node_settings.find_or_create_file_guid('12345')
dvf2, created2 = self.node_settings.find_or_create_file_guid('12345')
assert_true(created1)
assert_false(created2)
assert_equals(dvf1, dvf2)
@mock.patch('website.addons.base.requests.get')
def test_name(self, mock_get):
mock_response = mock.Mock(ok=True, status_code=200)
mock_get.return_value = mock_response
mock_response.json.return_value = {
'data': {
'name': 'Morty.foo',
'path': '/12345',
},
}
dvf, created = self.node_settings.find_or_create_file_guid('12345')
dvf.enrich()
assert_equal(dvf.name, 'Morty.foo')
@mock.patch('website.addons.base.requests.get')
def test_mfr_temp_path(self, mock_get):
mock_response = mock.Mock(ok=True, status_code=200)
mock_get.return_value = mock_response
mock_response.json.return_value = {
'data': {
'name': 'Morty.foo',
'path': '/12345',
},
}
dvf, created = self.node_settings.find_or_create_file_guid('12345')
dvf.enrich()
# Included fields ensure uniqueness
assert_in(self.project._id, dvf.mfr_temp_path)
assert_in(dvf.provider, dvf.mfr_temp_path)
assert_in(dvf.unique_identifier, dvf.mfr_temp_path)
class TestDataverseUserSettings(DataverseAddonTestCase):
def test_has_auth(self):
# Dataverse has no auth by default
dataverse = AddonDataverseUserSettings()
assert_false(dataverse.has_auth)
# With valid credentials, dataverse is authorized
dataverse.api_token = 'snowman-frosty'
assert_true(dataverse.has_auth)
def test_clear(self):
self.user_settings.clear()
# Fields were cleared, but settings were not deleted
assert_false(self.user_settings.api_token)
assert_false(self.user_settings.deleted)
# Authorized node settings were deauthorized
assert_false(self.node_settings.dataverse_alias)
assert_false(self.node_settings.dataverse)
assert_false(self.node_settings.dataset_doi)
assert_false(self.node_settings._dataset_id) # Getter makes request
assert_false(self.node_settings.dataset)
assert_false(self.node_settings.user_settings)
# Authorized node settings were not deleted
assert_false(self.node_settings.deleted)
@mock.patch('website.addons.dataverse.model.AddonDataverseUserSettings.clear')
def test_delete(self, mock_clear):
self.user_settings.delete()
assert_true(self.user_settings.deleted)
mock_clear.assert_called_once_with()
class TestDataverseNodeSettings(DataverseAddonTestCase):
def test_fields(self):
node_settings = AddonDataverseNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_true(node_settings.user_settings)
assert_equal(node_settings.user_settings.owner, self.user)
assert_true(hasattr(node_settings, 'dataverse'))
assert_true(hasattr(node_settings, 'dataverse_alias'))
assert_true(hasattr(node_settings, 'dataset'))
assert_true(hasattr(node_settings, 'dataset_doi'))
def test_defaults(self):
node_settings = AddonDataverseNodeSettings(user_settings=self.user_settings)
node_settings.save()
assert_is_none(node_settings.dataverse)
assert_is_none(node_settings.dataverse_alias)
assert_is_none(node_settings.dataset)
assert_is_none(node_settings.dataset_doi)
def test_has_auth(self):
node_settings = AddonDataverseNodeSettings()
node_settings.save()
assert_false(node_settings.has_auth)
user_settings = AddonDataverseUserSettings()
user_settings.save()
node_settings.user_settings = user_settings
node_settings.save()
assert_false(node_settings.has_auth)
user_settings.api_token = 'foo-bar'
user_settings.save()
assert_true(node_settings.has_auth)
@mock.patch('website.addons.dataverse.model.AddonDataverseNodeSettings.deauthorize')
def test_delete(self, mock_deauth):
num_old_logs = len(self.project.logs)
self.node_settings.delete()
assert_true(self.node_settings.deleted)
args, kwargs = mock_deauth.call_args
assert_equal(kwargs, {'add_log': False})
# Log was not generated
self.project.reload()
assert_equal(len(self.project.logs), num_old_logs)
def test_set_user_auth(self):
project = ProjectFactory()
project.add_addon('dataverse', auth=Auth(self.user))
node_settings = project.get_addon('dataverse')
num_old_logs = len(project.logs)
assert_false(node_settings.user_settings)
node_settings.set_user_auth(self.user_settings)
node_settings.save()
assert_equal(node_settings.user_settings, self.user_settings)
# Test log
project.reload()
assert_equal(len(project.logs), num_old_logs + 1)
last_log = project.logs[-1]
assert_equal(last_log.action, 'dataverse_node_authorized')
assert_equal(last_log.params['node'], project._primary_key)
assert_is_none(last_log.params['project'])
def test_deauthorize(self):
self.node_settings.deauthorize(Auth(self.user))
assert_false(self.node_settings.dataverse_alias)
assert_false(self.node_settings.dataverse)
assert_false(self.node_settings.dataset_doi)
assert_false(self.node_settings.dataset)
assert_false(self.node_settings.user_settings)
class TestNodeSettingsCallbacks(DataverseAddonTestCase):
def test_after_fork_by_authorized_dataverse_user(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=self.user_settings.owner
)
assert_equal(clone.user_settings, self.user_settings)
def test_after_fork_by_unauthorized_dataverse_user(self):
fork = ProjectFactory()
user = UserFactory()
clone, message = self.node_settings.after_fork(
node=self.project, fork=fork, user=user,
save=True
)
assert_is_none(clone.user_settings)
def test_before_fork(self):
node = ProjectFactory()
message = self.node_settings.before_fork(node, self.user)
assert_true(message)
def test_before_remove_contributor_message(self):
message = self.node_settings.before_remove_contributor(
self.project, self.user)
assert_true(message)
assert_in(self.user.fullname, message)
assert_in(self.project.project_or_component, message)
def test_after_remove_authorized_dataverse_user_not_self(self):
message = self.node_settings.after_remove_contributor(
node=self.project, removed=self.user_settings.owner)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_in("You can re-authenticate", message)
def test_after_remove_authorized_dataverse_user_self(self):
auth = Auth(user=self.user_settings.owner)
message = self.node_settings.after_remove_contributor(
self.project, self.user_settings.owner, auth)
self.node_settings.save()
assert_is_none(self.node_settings.user_settings)
assert_true(message)
assert_not_in("You can re-authenticate", message)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
assert_true(self.node_settings.dataverse_alias is None)
assert_true(self.node_settings.dataverse is None)
assert_true(self.node_settings.dataset_doi is None)
assert_true(self.node_settings.dataset is None)
def test_does_not_get_copied_to_registrations(self):
registration = self.project.register_node(
schema=None,
auth=Auth(user=self.project.creator),
template='Template1',
data='hodor'
)
assert_false(registration.has_addon('dataverse'))
|
|
'''
Author: Tobi and Gundram
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.contrib.layers import batch_norm
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.rnn import bidirectional_rnn
from util.LoaderUtil import read_image_list, get_list_vals
from random import shuffle
from util.STR2CTC import get_charmap_lp, get_charmap_lp_inv
import os
import time
import numpy as np
import matplotlib.pyplot as plt
# Goes done to 10%
INPUT_PATH_TRAIN = './private/lists/lp_only_shifted_train.lst'
INPUT_PATH_VAL = './private/lists/lp_only_val.lst'
cm, nClasses = get_charmap_lp()
# Additional NaC Channel
nClasses += 1
nEpochs = 15
batchSize = 16
# It is assumed that the TextLines are ALL saved with a consistent height of imgH
imgH = 48
# Depending on the size the image is cropped or zero padded
imgW = 256
channels = 1
nHiddenLSTM1 = 256
os.chdir("../..")
trainList = read_image_list(INPUT_PATH_TRAIN)
numT = 32998
stepsPerEpocheTrain = numT / batchSize
valList = read_image_list(INPUT_PATH_VAL)
stepsPerEpocheVal = len(valList) / batchSize
def get_saver_dict(prefix):
dict = {}
if prefix[-1] != '/':
prefix = prefix + '/'
res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)
for t in res:
key = t.name
key = key[len(prefix):]
dict[str(key)] = t
# print(dict)
return dict
def inference(images, seqLen, keep_prob, phase_train):
with tf.variable_scope('readPart') as scope:
with tf.variable_scope('conv1') as scope:
kernel = tf.Variable(tf.truncated_normal([6, 5, channels, 32], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(images, kernel, [1, 4, 3, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[32]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name=scope.name)
norm1 = tf.nn.local_response_normalization(conv1, name='norm1')
# _activation_summary(conv1)
# norm1 = tf.nn.local_response_normalization(conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm1')
seqFloat = tf.to_float(seqLen)
seqL2 = tf.ceil(seqFloat * 0.33)
with tf.variable_scope('conv2') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[64]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name=scope.name)
norm2 = tf.nn.local_response_normalization(conv2, name='norm2')
# _activation_summary(conv2)
# norm2
# norm2 = tf.nn.local_response_normalization(conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1, 4, 2, 1], strides=[1, 4, 2, 1], padding='SAME', name='pool2')
seqL3 = tf.ceil(seqL2 * 0.5)
with tf.variable_scope('conv3') as scope:
kernel = tf.Variable(tf.truncated_normal([5, 3, 64, 128], stddev=5e-2), name='weights')
##Weight Decay?
# weight_decay = tf.mul(tf.nn.l2_loss(kernel), 0.002, name='weight_loss')
# tf.add_to_collection('losses', weight_decay)
conv = tf.nn.conv2d(pool2, kernel, [1, 1, 1, 1], padding='SAME')
biases = tf.Variable(tf.constant(0.1, shape=[128]), name='biases')
pre_activation = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(pre_activation, name=scope.name)
norm3 = tf.nn.local_response_normalization(conv3, name='norm3')
pool3 = tf.nn.max_pool(norm3, ksize=[1, 3, 1, 1], strides=[1, 3, 1, 1], padding='SAME', name='pool2')
# NO POOLING HERE -> CTC needs an appropriate length.
seqLenAfterConv = tf.to_int32(seqL3)
with tf.variable_scope('RNN_Prep') as scope:
# (#batch Y X Z) --> (X #batch Y Z)
rnnIn = tf.transpose(pool3, [2, 0, 1, 3])
# (X #batch Y Z) --> (X #batch Y*Z)
shape = rnnIn.get_shape()
steps = shape[0]
rnnIn = tf.reshape(rnnIn, tf.pack([shape[0], shape[1], -1]))
# (X #batch Y*Z) --> (X*#batch Y*Z)
shape = rnnIn.get_shape()
rnnIn = tf.reshape(rnnIn, tf.pack([-1, shape[2]]))
# (X*#batch Y*Z) --> list of X tensors of shape (#batch, Y*Z)
rnnIn = tf.split(0, steps, rnnIn)
with tf.variable_scope('BLSTM1') as scope:
forwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedFW = rnn_cell.DropoutWrapper(forwardH1, output_keep_prob=keep_prob)
backwardH1 = rnn_cell.LSTMCell(nHiddenLSTM1, use_peepholes=True, state_is_tuple=True)
droppedBW = rnn_cell.DropoutWrapper(backwardH1, output_keep_prob=keep_prob)
outputs, _, _ = bidirectional_rnn(droppedFW, droppedBW, rnnIn, dtype=tf.float32)
fbH1rs = [tf.reshape(t, [batchSize, 2, nHiddenLSTM1]) for t in outputs]
# outH1 = [tf.reduce_sum(tf.mul(t, weightsOutH1), reduction_indices=1) + biasesOutH1 for t in fbH1rs]
outH1 = [tf.reduce_sum(t, reduction_indices=1) for t in fbH1rs]
with tf.variable_scope('LOGIT') as scope:
weightsClasses = tf.Variable(tf.truncated_normal([nHiddenLSTM1, nClasses],
stddev=np.sqrt(2.0 / nHiddenLSTM1)))
biasesClasses = tf.Variable(tf.zeros([nClasses]))
logitsFin = [tf.matmul(t, weightsClasses) + biasesClasses for t in outH1]
logits3d = tf.pack(logitsFin)
return logits3d, seqLenAfterConv
def loss(logits3d, tgt, seqLenAfterConv):
loss = tf.reduce_sum(ctc.ctc_loss(logits3d, tgt, seqLenAfterConv))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
return loss
print('Defining graph')
graph = tf.Graph()
with graph.as_default():
####Graph input
inputX = tf.placeholder(tf.float32, shape=(batchSize, imgH, imgW, channels))
targetIxs = tf.placeholder(tf.int64)
targetVals = tf.placeholder(tf.int32)
targetShape = tf.placeholder(tf.int64)
targetY = tf.SparseTensor(targetIxs, targetVals, targetShape)
seqLengths = tf.placeholder(tf.int32, shape=(batchSize))
keep_prob = tf.placeholder(tf.float32)
trainIN = tf.placeholder_with_default(tf.constant(False), [])
logits3d, seqAfterConv = inference(inputX, seqLengths, keep_prob, trainIN)
loss = loss(logits3d, targetY, seqAfterConv)
dict = get_saver_dict('readPart')
saver = tf.train.Saver(dict)
# optimizer = tf.train.MomentumOptimizer(learningRate, momentum).minimize(loss)
optimizer = tf.train.AdamOptimizer().minimize(loss)
# pred = tf.to_int32(ctc.ctc_beam_search_decoder(logits3d, seqAfterConv, merge_repeated=False)[0][0])
pred = tf.to_int32(ctc.ctc_greedy_decoder(logits3d, seqAfterConv)[0][0])
edist = tf.edit_distance(pred, targetY, normalize=False)
tgtLens = tf.to_float(tf.size(targetY.values))
err = tf.reduce_sum(edist) / tgtLens
with tf.Session(graph=graph) as session:
# writer = tf.train.SummaryWriter('./log', session.graph)
print('Initializing')
tf.global_variables_initializer().run()
# ckpt = tf.train.get_checkpoint_state("./private/models/lp2/")
# if ckpt and ckpt.model_checkpoint_path:
# saver.restore(session, ckpt.model_checkpoint_path)
# print(ckpt)
# workList = valList[:]
# errV = 0
# lossV = 0
# timeVS = time.time()
# cmInv = get_charmap_lp_inv()
# for bStep in range(stepsPerEpocheVal):
# bList, workList = workList[:batchSize], workList[batchSize:]
# batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
# imgW,
# mvn=True)
# feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
# targetShape: batchTargetShape, seqLengths: batchSeqLengths}
# lossB, aErr, p = session.run([loss, err, pred], feed_dict=feedDict)
# print(aErr)
# res = []
# for idx in p.values:
# res.append(cmInv[idx])
# print(res)
# # print(p)
# plt.imshow(batchInputs[0,:,:,0], cmap=plt.cm.gray)
# plt.show()
#
# lossV += lossB
# errV += aErr
# print('Val: CTC-loss ', lossV)
# errVal = errV / stepsPerEpocheVal
# print('Val: CER ', errVal)
# print('Val time ', time.time() - timeVS)
for epoch in range(nEpochs):
workList = trainList[:]
shuffle(workList)
workList = workList[0:32998]
print('Epoch', epoch + 1, '...')
lossT = 0
errT = 0
timeTS = time.time()
for bStep in range(stepsPerEpocheTrain):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: imgW*np.ones([batchSize]), keep_prob: 0.5, trainIN: True}
_, lossB, aErr = session.run([optimizer, loss, err], feed_dict=feedDict)
# _, lossB, aErr, sET, sLT = session.run([optimizer, loss, err, err_train, loss_train], feed_dict=feedDict)
lossT += lossB
# writer.add_summary(sET, epoch * stepsPerEpocheTrain + bStep)
# writer.add_summary(sLT, epoch * stepsPerEpocheTrain + bStep)
errT += aErr
print('Train: CTC-loss ', lossT)
cerT = errT / stepsPerEpocheTrain
print('Train: CER ', cerT)
print('Train time ', time.time() - timeTS)
workList = valList[:]
errV = 0
lossV = 0
timeVS = time.time()
for bStep in range(stepsPerEpocheVal):
bList, workList = workList[:batchSize], workList[batchSize:]
batchInputs, batchSeqLengths, batchTargetIdxs, batchTargetVals, batchTargetShape = get_list_vals(bList, cm,
imgW,
mvn=True)
feedDict = {inputX: batchInputs, targetIxs: batchTargetIdxs, targetVals: batchTargetVals,
targetShape: batchTargetShape, seqLengths: imgW*np.ones([batchSize]), keep_prob: 1.0, trainIN: False}
lossB, aErr = session.run([loss, err], feed_dict=feedDict)
# lossB, aErr, sE, sL = session.run([loss, err, err_val, loss_val], feed_dict=feedDict)
# writer.add_summary(sE, epoch*stepsPerEpocheVal + bStep)
# writer.add_summary(sL, epoch * stepsPerEpocheVal + bStep)
lossV += lossB
errV += aErr
print('Val: CTC-loss ', lossV)
errVal = errV / stepsPerEpocheVal
print('Val: CER ', errVal)
print('Val time ', time.time() - timeVS)
# Write a checkpoint.
checkpoint_file = os.path.join('./private/models/lp24/', 'checkpoint')
saver.save(session, checkpoint_file, global_step=epoch)
|
|
"""Default values and other various configuration for projects,
including available theme names and repository types.
"""
import re
from django.utils.translation import ugettext_lazy as _
THEME_DEFAULT = 'default'
THEME_SPHINX = 'sphinxdoc'
THEME_SCROLLS = 'scrolls'
THEME_AGOGO = 'agogo'
THEME_TRADITIONAL = 'traditional'
THEME_NATURE = 'nature'
THEME_HAIKU = 'haiku'
DOCUMENTATION_CHOICES = (
('auto', _('Automatically Choose')),
('sphinx', _('Sphinx Html')),
('mkdocs', _('Mkdocs (Markdown)')),
('sphinx_htmldir', _('Sphinx HtmlDir')),
('sphinx_singlehtml', _('Sphinx Single Page HTML')),
#('sphinx_websupport2', _('Sphinx Websupport')),
#('rdoc', 'Rdoc'),
)
DEFAULT_THEME_CHOICES = (
# Translators: This is a name of a Sphinx theme.
(THEME_DEFAULT, _('Default')),
# Translators: This is a name of a Sphinx theme.
(THEME_SPHINX, _('Sphinx Docs')),
#(THEME_SCROLLS, 'Scrolls'),
#(THEME_AGOGO, 'Agogo'),
# Translators: This is a name of a Sphinx theme.
(THEME_TRADITIONAL, _('Traditional')),
# Translators: This is a name of a Sphinx theme.
(THEME_NATURE, _('Nature')),
# Translators: This is a name of a Sphinx theme.
(THEME_HAIKU, _('Haiku')),
)
SAMPLE_FILES = (
('Installation', 'projects/samples/installation.rst.html'),
('Getting started', 'projects/samples/getting_started.rst.html'),
)
SCRAPE_CONF_SETTINGS = [
'copyright',
'project',
'version',
'release',
'source_suffix',
'html_theme',
'extensions',
]
HEADING_MARKUP = (
(1, '='),
(2, '-'),
(3, '^'),
(4, '"'),
)
LIVE_STATUS = 1
DELETED_STATUS = 99
STATUS_CHOICES = (
(LIVE_STATUS, _('Live')),
(DELETED_STATUS, _('Deleted')),
)
REPO_CHOICES = (
('git', _('Git')),
('svn', _('Subversion')),
('hg', _('Mercurial')),
('bzr', _('Bazaar')),
)
PUBLIC = 'public'
PROTECTED = 'protected'
PRIVATE = 'private'
PRIVACY_CHOICES = (
(PUBLIC, _('Public')),
(PROTECTED, _('Protected')),
(PRIVATE, _('Private')),
)
IMPORTANT_VERSION_FILTERS = {
'slug': 'important'
}
# in the future this constant can be replaced with a implementation that
# detect all available Python interpreters in the fly (Maybe using
# update-alternatives linux tool family?).
PYTHON_CHOICES = (
('python', _('CPython 2.x')),
('python3', _('CPython 3.x')),
)
# Via http://sphinx-doc.org/latest/config.html#confval-language
# Languages supported for the lang_slug in the URL
# Translations for builtin Sphinx messages only available for a subset of these
LANGUAGES = (
("aa", "Afar"),
("ab", "Abkhaz"),
("af", "Afrikaans"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("iu", "Inuktitut"),
("ja", "Japanese"),
("jv", "Javanese"),
("ka", "Georgian"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zh", "Chinese"),
("zu", "Zulu"),
# Try these to test our non-2 letter language support
("nb_NO", "Norwegian Bokmal"),
("pt_BR", "Brazilian Portuguese"),
("uk_UA", "Ukrainian"),
("zh_CN", "Simplified Chinese"),
("zh_TW", "Traditional Chinese"),
)
LANGUAGES_REGEX = "|".join(
[re.escape(code[0]) for code in LANGUAGES]
)
PROGRAMMING_LANGUAGES = (
("words", "Only Words"),
("py", "Python"),
("js", "Javascript"),
("php", "PHP"),
("ruby", "Ruby"),
("perl", "Perl"),
("java", "Java"),
("go", "Go"),
("julia", "Julia"),
("c", "C"),
("csharp", "C#"),
("cpp", "C++"),
("objc", "Objective-C"),
("other", "Other"),
)
LOG_TEMPLATE = u"(Build) [{project}:{version}] {msg}"
PROJECT_PK_REGEX = '(?:[-\w]+)'
PROJECT_SLUG_REGEX = '(?:[-\w]+)'
|
|
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.utils import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (CURSOR, SINGLE, MULTI, NO_RESULTS,
ORDER_DIR, GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import get_order_dir, Query
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def __call__(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
return self(name)
def compile(self, node):
vendor_impl = getattr(
node, 'as_' + self.connection.vendor, None)
if vendor_impl:
return vendor_impl(self, self.connection)
else:
return node.as_sql(self, self.connection)
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
having_group_by = self.query.having.get_group_by_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
params.extend(o_params)
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError("select_for_update cannot be used outside of a transaction.")
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = self.compile(col)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, annotation in self.query.annotation_select.items():
agg_sql, agg_params = self.compile(annotation)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.column
for seen_model, seen_alias in seen_models.items():
if seen_model and seen_alias == alias:
ancestor_link = seen_model._meta.get_ancestor_link(model)
if ancestor_link:
column = ancestor_link.column
break
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field))
aliases.add(alias)
continue
if with_aliases and column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
# For plain DISTINCT queries any ORDER BY clause must appear
# in SELECT clause.
# http://www.postgresql.org/message-id/[email protected]
must_append_to_select = distinct and not self.query.distinct_fields
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.annotation_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not must_append_to_select or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif not self.query._extra or get_order_dir(field)[0] not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if must_append_to_select and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
if must_append_to_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
result.append('%s %s' % (elt, order))
else:
result.append("(%s) %s" % (self.query.extra[col][0], order))
params.extend(self.query.extra[col][1])
else:
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(alias, [t.column for t in targets], order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = self.compile(extra_cond)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_initial_alias(), self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
self.compile(col)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or order_params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns, _ = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
if restricted:
next = requested.get(f.name, {})
else:
next = False
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, _ = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo((col[0], col[1].column), col[1]) for col in columns)
next = requested.get(f.related_query_name(), {})
self.fill_related_selections(model._meta, alias, cur_depth + 1,
next, restricted)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def get_converters(self, fields):
converters = {}
index_extra_select = len(self.query.extra_select)
for i, field in enumerate(fields):
if field:
backend_converters = self.connection.ops.get_db_converters(field.get_internal_type())
field_converters = field.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[index_extra_select + i] = (backend_converters, field_converters, field)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (backend_converters, field_converters, field) in converters.items():
value = row[pos]
for converter in backend_converters:
value = converter(value, field)
for converter in field_converters:
value = converter(value, self.connection)
row[pos] = value
return tuple(row)
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
fields = None
converters = None
has_annotation_select = bool(self.query.annotation_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# If the field was deferred, exclude it from being passed
# into `get_converters` because it wasn't selected.
only_load = self.deferred_to_columns()
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
elif self.query.default_cols:
fields = self.query.get_meta().concrete_fields
else:
fields = []
if only_load:
# strip deferred fields
fields = [
f for f in fields if
f.model._meta.db_table not in only_load or
f.column in only_load[f.model._meta.db_table]
]
# annotations come before the related cols
if has_annotation_select:
# extra is always at the start of the field list
prepended_cols = len(self.query.extra_select)
annotation_start = len(fields) + prepended_cols
fields = fields + [
anno.output_field for alias, anno in self.query.annotation_select.items()]
annotation_end = len(fields) + prepended_cols
# add related fields
fields = fields + [
# strip deferred
f.field for f in self.query.related_select_cols if
f.field.model._meta.db_table not in only_load or
f.field.column in only_load[f.field.model._meta.db_table]
]
converters = self.get_converters(fields)
if has_annotation_select:
for (alias, annotation), position in zip(
self.query.annotation_select.items(),
range(annotation_start, annotation_end + 1)):
if position in converters:
# annotation conversions always run first
converters[position][1].insert(0, annotation.convert_value)
else:
converters[position] = ([], [annotation.convert_value], annotation.output_field)
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = cursor_iter(cursor,
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, qn):
inner_qn = self
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (inner_qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False)
elif hasattr(val, 'prepare_database_save'):
if field.rel:
val = val.prepare_database_save(field)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self
sql, params = [], []
for annotation in self.query.annotation_select.values():
agg_sql, agg_params = self.compile(annotation)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
from django.db.models.fields import DateField
converters = self.get_converters([DateField()])
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = self.apply_converters(row, converters)[offset]
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
from django.db.models.fields import DateTimeField
converters = self.get_converters([DateTimeField()])
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = self.apply_converters(row, converters)[offset]
# Datetimes are artificially returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.datetimes(). Are time zone "
"definitions for your database and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def cursor_iter(cursor, sentinel):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield rows
finally:
cursor.close()
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
finally:
cursor.close()
|
|
import subprocess
from typing import Any, Dict, List
from zulint.printer import ENDC, GREEN
from .template_parser import is_django_block_tag, tokenize
def pretty_print_html(html: str, num_spaces: int = 4) -> str:
# We use 1-based indexing for both rows and columns.
tokens = tokenize(html)
lines = html.split("\n")
# We will keep a stack of "start" tags so that we know
# when HTML ranges end. Note that some start tags won't
# be blocks from an indentation standpoint.
stack: List[Dict[str, Any]] = []
# Seed our stack with a pseudo entry to make depth calculations
# easier.
info: Dict[str, Any] = dict(
block=False,
depth=-1,
line=-1,
token_kind="html_start",
tag="html",
extra_indent=0,
ignore_lines=[],
)
stack.append(info)
# Our main job is to figure out offsets that we use to nudge lines
# over by.
offsets: Dict[int, int] = {}
# Loop through our start/end tokens, and calculate offsets. As
# we proceed, we will push/pop info dictionaries on/off a stack.
for token in tokens:
if (
token.kind
in (
"html_start",
"handlebars_start",
"handlebars_singleton",
"html_singleton",
"django_start",
"jinja2_whitespace_stripped_type2_start",
"jinja2_whitespace_stripped_start",
)
and stack[-1]["tag"] != "pre"
):
# An HTML start tag should only cause a new indent if we
# are on a new line.
if token.tag not in ("extends", "include", "else", "elif") and (
is_django_block_tag(token.tag) or token.kind != "django_start"
):
is_block = token.line > stack[-1]["line"]
if is_block:
if (
(
token.kind == "handlebars_start"
and stack[-1]["token_kind"] == "handlebars_start"
)
or (
token.kind
in {
"django_start",
"jinja2_whitespace_stripped_type2_start",
"jinja2_whitespace_stripped_start",
}
and stack[-1]["token_kind"]
in {
"django_start",
"jinja2_whitespace_stripped_type2_start",
"jinja2_whitespace_stripped_start",
}
)
) and not stack[-1]["indenting"]:
info = stack.pop()
info["depth"] = info["depth"] + 1
info["indenting"] = True
info["adjust_offset_until"] = token.line
stack.append(info)
new_depth = stack[-1]["depth"] + 1
extra_indent = stack[-1]["extra_indent"]
line = lines[token.line - 1]
adjustment = len(line) - len(line.lstrip()) + 1
offset = (1 + extra_indent + new_depth * num_spaces) - adjustment
info = dict(
block=True,
depth=new_depth,
actual_depth=new_depth,
line=token.line,
tag=token.tag,
token_kind=token.kind,
line_span=token.line_span,
offset=offset,
extra_indent=token.col - adjustment + extra_indent,
extra_indent_prev=extra_indent,
adjustment=adjustment,
indenting=True,
adjust_offset_until=token.line,
ignore_lines=[],
)
if token.kind in ("handlebars_start", "django_start"):
info.update(dict(depth=new_depth - 1, indenting=False))
else:
info = dict(
block=False,
depth=stack[-1]["depth"],
actual_depth=stack[-1]["depth"],
line=token.line,
tag=token.tag,
token_kind=token.kind,
extra_indent=stack[-1]["extra_indent"],
ignore_lines=[],
)
stack.append(info)
elif (
token.kind
in (
"html_end",
"handlebars_end",
"html_singleton_end",
"django_end",
"handlebars_singleton_end",
"jinja2_whitespace_stripped_end",
)
and (stack[-1]["tag"] != "pre" or token.tag == "pre")
):
info = stack.pop()
if info["block"]:
# We are at the end of an indentation block. We
# assume the whole block was formatted ok before, just
# possibly at an indentation that we don't like, so we
# nudge over all lines in the block by the same offset.
start_line = info["line"]
end_line = token.line
if token.tag == "pre":
offsets[start_line] = 0
offsets[end_line] = 0
stack[-1]["ignore_lines"].append(start_line)
stack[-1]["ignore_lines"].append(end_line)
else:
offsets[start_line] = info["offset"]
line = lines[token.line - 1]
adjustment = len(line) - len(line.lstrip()) + 1
if adjustment == token.col and token.kind != "html_singleton_end":
offsets[end_line] = (
info["offset"]
+ info["adjustment"]
- adjustment
+ info["extra_indent"]
- info["extra_indent_prev"]
)
elif start_line + info["line_span"] - 1 == end_line and info["line_span"] > 1:
offsets[end_line] = (
1 + info["extra_indent"] + (info["depth"] + 1) * num_spaces
) - adjustment
# We would like singleton tags and tags which spread over
# multiple lines to have 2 space indentation.
offsets[end_line] -= 2
elif token.line != info["line"]:
offsets[end_line] = info["offset"]
if token.tag != "pre" and token.tag != "script":
for line_num in range(start_line + 1, end_line):
# Be careful not to override offsets that happened
# deeper in the HTML within our block.
if line_num not in offsets:
line = lines[line_num - 1]
new_depth = info["depth"] + 1
if (
line.lstrip().startswith("{{else}}")
or line.lstrip().startswith("{% else %}")
or line.lstrip().startswith("{% elif")
):
new_depth = info["actual_depth"]
extra_indent = info["extra_indent"]
adjustment = len(line) - len(line.lstrip()) + 1
offset = (1 + extra_indent + new_depth * num_spaces) - adjustment
if line_num <= start_line + info["line_span"] - 1:
# We would like singleton tags and tags which spread over
# multiple lines to have 2 space indentation.
offset -= 2
offsets[line_num] = offset
elif (
token.kind in ("handlebars_end", "django_end")
and info["indenting"]
and line_num < info["adjust_offset_until"]
and line_num not in info["ignore_lines"]
):
offsets[line_num] += num_spaces
elif token.tag != "pre":
for line_num in range(start_line + 1, end_line):
if line_num not in offsets:
offsets[line_num] = info["offset"]
else:
for line_num in range(start_line + 1, end_line):
if line_num not in offsets:
offsets[line_num] = 0
stack[-1]["ignore_lines"].append(line_num)
# Now that we have all of our offsets calculated, we can just
# join all our lines together, fixing up offsets as needed.
formatted_lines = []
for i, line in enumerate(html.split("\n")):
row = i + 1
offset = offsets.get(row, 0)
pretty_line = line
if line.strip() == "":
pretty_line = ""
else:
if offset > 0:
pretty_line = (" " * offset) + pretty_line
elif offset < 0:
pretty_line = pretty_line[-1 * offset :]
assert line.strip() == pretty_line.strip()
formatted_lines.append(pretty_line)
return "\n".join(formatted_lines)
def validate_indent_html(fn: str, fix: bool) -> int:
with open(fn) as f:
html = f.read()
phtml = pretty_print_html(html)
if not html.split("\n") == phtml.split("\n"):
if fix:
print(GREEN + "Automatically fixing problems..." + ENDC)
with open(fn, "w") as f:
f.write(phtml)
# Since we successfully fixed the issues, we exit with status 0
return 0
print(
"Invalid indentation detected in file: "
f"{fn}\nDiff for the file against expected indented file:",
flush=True,
)
subprocess.run(["diff", fn, "-"], input=phtml, universal_newlines=True)
print()
print("This problem can be fixed with the `--fix` option.")
return 0
return 1
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:14101")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:14101")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Slendermancoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Slendermancoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __main__
import argparse
import code
import os
import sys
def add_config_parameter(parser):
parser.add_argument(
'-c', '--config', dest='config_file', action='store', type=str,
help='custom config file', default=None
)
def load_run_parsers(subparsers):
run_parser = subparsers.add_parser(
'run', help='run application locally'
)
run_parser.add_argument(
'-p', '--port', dest='port', action='store', type=str,
help='application port', default='8000'
)
run_parser.add_argument(
'-a', '--address', dest='address', action='store', type=str,
help='application address', default='0.0.0.0'
)
run_parser.add_argument(
'--fake-tasks', action='store_true', help='fake tasks'
)
run_parser.add_argument(
'--fake-tasks-amqp', action='store_true',
help='fake tasks with real AMQP'
)
run_parser.add_argument(
'--keepalive',
action='store_true',
help='run keep alive thread'
)
add_config_parameter(run_parser)
run_parser.add_argument(
'--fake-tasks-tick-count', action='store', type=int,
help='Fake tasks tick count'
)
run_parser.add_argument(
'--fake-tasks-tick-interval', action='store', type=int,
help='Fake tasks tick interval in seconds'
)
run_parser.add_argument(
'--authentication-method', action='store', type=str,
help='Choose authentication type',
choices=['none', 'fake', 'keystone'],
)
def load_db_parsers(subparsers):
subparsers.add_parser(
'syncdb', help='sync application database'
)
subparsers.add_parser(
'dropdb', help='drop application database'
)
# fixtures
loaddata_parser = subparsers.add_parser(
'loaddata', help='load data from fixture'
)
loaddata_parser.add_argument(
'fixture', action='store', help='json fixture to load'
)
dumpdata_parser = subparsers.add_parser(
'dumpdata', help='dump models as fixture'
)
dumpdata_parser.add_argument(
'model', action='store', help='model name to dump; underscored name'
'should be used, e.g. network_group for NetworkGroup model'
)
subparsers.add_parser(
'loaddefault',
help='load data from default fixtures '
'(settings.FIXTURES_TO_IPLOAD)'
)
def load_alembic_parsers(migrate_parser):
alembic_parser = migrate_parser.add_subparsers(
dest="alembic_command",
help='alembic command'
)
for name in ['current', 'history', 'branches']:
parser = alembic_parser.add_parser(name)
for name in ['upgrade', 'downgrade']:
parser = alembic_parser.add_parser(name)
parser.add_argument('--delta', type=int)
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision', nargs='?')
parser = alembic_parser.add_parser('stamp')
parser.add_argument('--sql', action='store_true')
parser.add_argument('revision')
parser = alembic_parser.add_parser('revision')
parser.add_argument('-m', '--message')
parser.add_argument('--autogenerate', action='store_true')
parser.add_argument('--sql', action='store_true')
def load_db_migrate_parsers(subparsers):
migrate_parser = subparsers.add_parser(
'migrate', help='dealing with DB migration'
)
load_alembic_parsers(migrate_parser)
def load_dbshell_parsers(subparsers):
dbshell_parser = subparsers.add_parser(
'dbshell', help='open database shell'
)
add_config_parameter(dbshell_parser)
def load_test_parsers(subparsers):
subparsers.add_parser(
'test', help='run unit tests'
)
def load_shell_parsers(subparsers):
shell_parser = subparsers.add_parser(
'shell', help='open python REPL'
)
add_config_parameter(shell_parser)
def load_settings_parsers(subparsers):
subparsers.add_parser(
'dump_settings', help='dump current settings to YAML'
)
def action_dumpdata(params):
import logging
logging.disable(logging.WARNING)
from nailgun.db.sqlalchemy import fixman
fixman.dump_fixture(params.model)
sys.exit(0)
def action_loaddata(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
with open(params.fixture, "r") as fileobj:
fixman.upload_fixture(fileobj)
logger.info("Done")
def action_loaddefault(params):
from nailgun.db.sqlalchemy import fixman
from nailgun.logger import logger
logger.info("Uploading fixture...")
fixman.upload_fixtures()
logger.info("Done")
def action_syncdb(params):
from nailgun.db import syncdb
from nailgun.logger import logger
logger.info("Syncing database...")
syncdb()
logger.info("Done")
def action_dropdb(params):
from nailgun.db import dropdb
from nailgun.logger import logger
logger.info("Dropping database...")
dropdb()
logger.info("Done")
def action_migrate(params):
from nailgun.db.migration import action_migrate_alembic
action_migrate_alembic(params)
def action_test(params):
from nailgun.logger import logger
from nailgun.unit_test import TestRunner
logger.info("Running tests...")
TestRunner.run()
logger.info("Done")
def action_dbshell(params):
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
args = ['psql']
env = {}
if settings.DATABASE['passwd']:
env['PGPASSWORD'] = settings.DATABASE['passwd']
if settings.DATABASE['user']:
args += ["-U", settings.DATABASE['user']]
if settings.DATABASE['host']:
args.extend(["-h", settings.DATABASE['host']])
if settings.DATABASE['port']:
args.extend(["-p", str(settings.DATABASE['port'])])
args += [settings.DATABASE['name']]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvpe('psql', args, env)
def action_dump_settings(params):
from nailgun.settings import settings
sys.stdout.write(settings.dump())
def action_shell(params):
from nailgun.db import db
from nailgun.settings import settings
if params.config_file:
settings.update_from_file(params.config_file)
try:
from IPython import embed
embed()
except ImportError:
code.interact(local={'db': db, 'settings': settings})
def action_run(params):
from nailgun.settings import settings
settings.update({
'LISTEN_PORT': int(params.port),
'LISTEN_ADDRESS': params.address,
})
for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
param = getattr(params, attr.lower())
if param is not None:
settings.update({attr: param})
if params.authentication_method:
auth_method = params.authentication_method
settings.AUTH.update({'AUTHENTICATION_METHOD' : auth_method})
if params.config_file:
settings.update_from_file(params.config_file)
from nailgun.app import appstart
appstart()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(
dest="action", help='actions'
)
load_run_parsers(subparsers)
load_db_parsers(subparsers)
load_db_migrate_parsers(subparsers)
load_dbshell_parsers(subparsers)
load_test_parsers(subparsers)
load_shell_parsers(subparsers)
load_settings_parsers(subparsers)
params, other_params = parser.parse_known_args()
sys.argv.pop(1)
action = getattr(
__main__,
"action_{0}".format(params.action)
)
action(params) if action else parser.print_help()
|
|
# -*- coding: utf-8 -*-
import os
import sys
import types
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.ensemble.forest import ExtraTreesClassifier
from sklearn.svm.classes import LinearSVC
from sklearn.svm.classes import SVC
from sklearn.svm.classes import NuSVC
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn_porter.utils.Environment import Environment
from sklearn_porter.utils.Shell import Shell
# from sklearn_porter.language import *
class Porter(object):
def __init__(self, estimator, language='java', method='predict', **kwargs):
# pylint: disable=unused-argument
"""
Transpile a trained estimator to the
chosen target programming language.
Parameters
----------
language : {'c', 'go', 'java', 'js', 'php', 'ruby'}, default: 'java'
The required target programming language.
method : {'predict', 'predict_proba'}, default: 'predict'
The target prediction method.
"""
# Check language support:
language = str(language).strip().lower()
if language not in ['c', 'go', 'java', 'js', 'php', 'ruby']:
error = "The given language '{}' isn't supported.".format(language)
raise AttributeError(error)
self.target_language = language
# Check method support:
method = str(method).strip().lower()
if method not in ['predict', 'predict_proba']:
error = "The given method '{}' isn't supported.".format(method)
raise AttributeError(error)
self.target_method = method
# Determine the local version of sklearn:
from sklearn import __version__ as sklearn_ver
sklearn_ver = str(sklearn_ver).split('.')
sklearn_ver = [int(v) for v in sklearn_ver]
major, minor = sklearn_ver[0], sklearn_ver[1]
patch = sklearn_ver[2] if len(sklearn_ver) >= 3 else 0
self.sklearn_ver = (major, minor, patch)
# Extract estimator from 'Pipeline':
# sklearn version >= 0.15.0
if not hasattr(self, 'estimator') and self.sklearn_ver[:2] >= (0, 15):
from sklearn.pipeline import Pipeline
if isinstance(estimator, Pipeline):
if hasattr(estimator, '_final_estimator') and \
estimator._final_estimator is not None:
self.estimator = estimator._final_estimator
# Extract estimator from optimizer (GridSearchCV, RandomizedSearchCV):
# sklearn version >= 0.19.0
if not hasattr(self, 'estimator') and self.sklearn_ver[:2] >= (0, 19):
from sklearn.model_selection._search import GridSearchCV
from sklearn.model_selection._search import RandomizedSearchCV
optimizers = (GridSearchCV, RandomizedSearchCV)
if isinstance(estimator, optimizers):
if hasattr(estimator, 'best_estimator_') and \
hasattr(estimator.best_estimator_, '_final_estimator'):
self.estimator = estimator.best_estimator_._final_estimator
if not hasattr(self, 'estimator'):
self.estimator = estimator
# Determine the local supported estimators:
self.supported_classifiers = self._classifiers
self.supported_regressors = self._regressors
# Read algorithm name and type:
self.estimator_name = str(type(self.estimator).__name__)
if isinstance(self.estimator, self.supported_classifiers):
self.estimator_type = 'classifier'
elif isinstance(self.estimator, self.supported_regressors):
self.estimator_type = 'regressor'
else:
error = "Currently the given estimator '{estimator}' isn't" \
" supported.".format(**self.__dict__)
raise ValueError(error)
# Import estimator class:
if sys.version_info[:2] < (3, 3):
pckg = 'estimator.{estimator_type}.{estimator_name}'
level = -1
else:
pckg = 'sklearn_porter.estimator.{estimator_type}.{estimator_name}'
level = 0
pckg = pckg.format(**self.__dict__)
try:
clazz = __import__(pckg, globals(), locals(),
[self.estimator_name], level)
clazz = getattr(clazz, self.estimator_name)
except ImportError:
error = "Currently the given model '{algorithm_name}' " \
"isn't supported.".format(**self.__dict__)
raise AttributeError(error)
# Set target programming language:
pwd = os.path.dirname(__file__)
template_dir = os.path.join(pwd, 'estimator', self.estimator_type,
self.estimator_name, 'templates',
self.target_language)
has_template = os.path.isdir(template_dir)
if not has_template:
error = "Currently the chosen target programming language '{}' " \
"isn't supported for the estimator '{}'." \
"".format(self.estimator_name, self.target_language)
raise AttributeError(error)
# Set target prediction method:
has_method = self.target_method in \
set(getattr(clazz, 'SUPPORTED_METHODS'))
if not has_method:
error = "Currently the chosen model method" \
" '{}' isn't supported.".format(self.target_method)
raise AttributeError(error)
self._tested_dependencies = False
# Create instance with all parameters:
self.template = clazz(**self.__dict__)
def export(self, class_name=None, method_name=None,
num_format=lambda x: str(x), details=False, **kwargs):
# pylint: disable=unused-argument
"""
Transpile a trained model to the syntax of a
chosen programming language.
Parameters
----------
:param class_name : string, default: None
The name for the ported class.
:param method_name : string, default: None
The name for the ported method.
:param num_format : lambda x, default: lambda x: str(x)
The representation of the floating-point values.
:param details : bool, default False
Return additional data for the compilation and execution.
Returns
-------
model : {mix}
The ported model as string or a dictionary
with further information.
"""
if class_name is None or class_name == '':
class_name = self.estimator_name
if method_name is None or method_name == '':
method_name = self.target_method
if isinstance(num_format, types.LambdaType):
self.template._num_format = num_format
output = self.template.export(class_name=class_name,
method_name=method_name, **kwargs)
if not details:
return output
language = self.target_language
filename = Porter._get_filename(class_name, language)
comp_cmd, exec_cmd = Porter._get_commands(filename, class_name,
language)
output = {
'estimator': str(output),
'filename': filename,
'class_name': class_name,
'method_name': method_name,
'cmd': {
'compilation': comp_cmd,
'execution': exec_cmd
},
'algorithm': {
'type': self.estimator_type,
'name': self.estimator_name
}
}
return output
def port(self, class_name=None, method_name=None,
num_format=lambda x: str(x), details=False, **kwargs):
# pylint: disable=unused-argument
"""
Transpile a trained model to the syntax of a
chosen programming language.
Parameters
----------
:param class_name : string, default: None
The name for the ported class.
:param method_name : string, default: None
The name for the ported method.
:param num_format : lambda x, default: lambda x: str(x)
The representation of the floating-point values.
:param details : bool, default: False
Return additional data for the compilation
and execution.
Returns
-------
model : {mix}
The ported model as string or a dictionary
with further information.
"""
loc = locals()
loc.pop(str('self'))
return self.export(**loc)
@property
def _classifiers(self):
"""
Get a set of supported classifiers.
Returns
-------
classifiers : {set}
The set of supported classifiers.
"""
# sklearn version < 0.18.0
classifiers = (
AdaBoostClassifier,
BernoulliNB,
DecisionTreeClassifier,
ExtraTreesClassifier,
GaussianNB,
KNeighborsClassifier,
LinearSVC,
NuSVC,
RandomForestClassifier,
SVC,
)
# sklearn version >= 0.18.0
if self.sklearn_ver[:2] >= (0, 18):
from sklearn.neural_network.multilayer_perceptron \
import MLPClassifier
classifiers += (MLPClassifier, )
return classifiers
@property
def _regressors(self):
"""
Get a set of supported regressors.
Returns
-------
regressors : {set}
The set of supported regressors.
"""
# sklearn version < 0.18.0
regressors = ()
# sklearn version >= 0.18.0
if self.sklearn_ver[:2] >= (0, 18):
from sklearn.neural_network.multilayer_perceptron \
import MLPRegressor
regressors += (MLPRegressor, )
return regressors
def predict(self, X, class_name=None, method_name=None, tnp_dir='tmp',
keep_tmp_dir=False, num_format=lambda x: str(x)):
"""
Predict using the transpiled model.
Parameters
----------
:param X : {array-like}, shape (n_features) or (n_samples, n_features)
The input data.
:param class_name : string, default: None
The name for the ported class.
:param method_name : string, default: None
The name for the ported method.
:param tnp_dir : string, default: 'tmp'
The path to the temporary directory for
storing the transpiled (and compiled) model.
:param keep_tmp_dir : bool, default: False
Whether to delete the temporary directory
or not.
:param num_format : lambda x, default: lambda x: str(x)
The representation of the floating-point values.
Returns
-------
y : int or array-like, shape (n_samples,)
The predicted class or classes.
"""
if class_name is None:
class_name = self.estimator_name
if method_name is None:
method_name = self.target_method
# Dependencies:
if not self._tested_dependencies:
self._test_dependencies()
self._tested_dependencies = True
# Support:
if 'predict' not in set(self.template.SUPPORTED_METHODS):
error = "Currently the given model method" \
" '{}' isn't supported.".format('predict')
raise AttributeError(error)
# Cleanup:
Shell.call('rm -rf {}'.format(tnp_dir))
Shell.call('mkdir {}'.format(tnp_dir))
# Transpiled model:
details = self.export(class_name=class_name,
method_name=method_name,
num_format=num_format,
details=True)
filename = Porter._get_filename(class_name, self.target_language)
target_file = os.path.join(tnp_dir, filename)
with open(target_file, str('w')) as file_:
file_.write(details.get('estimator'))
# Compilation command:
comp_cmd = details.get('cmd').get('compilation')
if comp_cmd is not None:
Shell.call(comp_cmd, cwd=tnp_dir)
# Execution command:
exec_cmd = details.get('cmd').get('execution')
exec_cmd = str(exec_cmd).split()
pred_y = None
# Single feature set:
if exec_cmd is not None and len(X.shape) == 1:
full_exec_cmd = exec_cmd + [str(sample).strip() for sample in X]
pred_y = Shell.check_output(full_exec_cmd, cwd=tnp_dir)
pred_y = int(pred_y)
# Multiple feature sets:
if exec_cmd is not None and len(X.shape) > 1:
pred_y = np.empty(X.shape[0], dtype=int)
for idx, features in enumerate(X):
full_exec_cmd = exec_cmd + [str(f).strip() for f in features]
pred = Shell.check_output(full_exec_cmd, cwd=tnp_dir)
pred_y[idx] = int(pred)
# Cleanup:
if not keep_tmp_dir:
Shell.call('rm -rf {}'.format(tnp_dir))
return pred_y
def integrity_score(self, X, method='predict', normalize=True,
num_format=lambda x: str(x)):
"""
Compute the accuracy of the ported classifier.
Parameters
----------
:param X : ndarray, shape (n_samples, n_features)
Input data.
:param method : string, default: 'predict'
The method which should be tested.
:param normalize : bool, default: True
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
:param num_format : lambda x, default: lambda x: str(x)
The representation of the floating-point values.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
"""
X = np.array(X)
if not X.ndim > 1:
X = np.array([X])
method = str(method).strip().lower()
if method not in ['predict', 'predict_proba']:
error = "The given method '{}' isn't supported.".format(method)
raise AttributeError(error)
if method == 'predict':
y_true = self.estimator.predict(X)
y_pred = self.predict(X, tnp_dir='tmp_integrity_score',
keep_tmp_dir=True, num_format=num_format)
return accuracy_score(y_true, y_pred, normalize=normalize)
return False
def _test_dependencies(self):
"""
Check all target programming and operating
system dependencies.
"""
lang = self.target_language
# Dependencies:
deps = {
'c': ['gcc'],
'java': ['java', 'javac'],
'js': ['node'],
'go': ['go'],
'php': ['php'],
'ruby': ['ruby']
}
current_deps = deps.get(lang) + ['mkdir', 'rm']
Environment.check_deps(current_deps)
@staticmethod
def _get_filename(class_name, language):
"""
Generate the specific filename.
Parameters
----------
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
filename : str
The generated filename.
"""
name = str(class_name).strip()
lang = str(language)
# Name:
if language in ['java', 'php']:
name = "".join([name[0].upper() + name[1:]])
# Suffix:
suffix = {
'c': 'c', 'java': 'java', 'js': 'js',
'go': 'go', 'php': 'php', 'ruby': 'rb'
}
suffix = suffix.get(lang, lang)
# Filename:
return '{}.{}'.format(name, suffix)
@staticmethod
def _get_commands(filename, class_name, language):
"""
Generate the related compilation and
execution commands.
Parameters
----------
:param filename : str
The used filename.
:param class_name : str
The used class name.
:param language : {'c', 'go', 'java', 'js', 'php', 'ruby'}
The target programming language.
Returns
-------
comp_cmd, exec_cmd : (str, str)
The compilation and execution command.
"""
cname = str(class_name)
fname = str(filename)
lang = str(language)
# Compilation variants:
comp_vars = {
# gcc brain.c -o brain
'c': 'gcc {} -lm -o {}'.format(fname, cname),
# javac Brain.java
'java': 'javac {}'.format(fname),
# go build -o brain brain.go
'go': 'go build -o {} {}.go'.format(cname, cname)
}
comp_cmd = comp_vars.get(lang, None)
# Execution variants:
exec_vars = {
# ./brain
'c': os.path.join('.', cname),
# java -classpath . Brain
'java': 'java -classpath . {}'.format(cname),
# node brain.js
'js': 'node {}'.format(fname),
# php -f Brain.php
'php': 'php -f {}'.format(fname),
# ruby brain.rb
'ruby': 'ruby {}'.format(fname),
# ./brain
'go': os.path.join('.', cname),
}
exec_cmd = exec_vars.get(lang, None)
return comp_cmd, exec_cmd
|
|
from io import BytesIO
from unittest import TestCase
from eve.io.media import MediaStorage
from eve.io.mongo import GridFSMediaStorage
from eve.tests import TestBase, MONGO_DBNAME
from eve import STATUS_OK, ID_FIELD, STATUS, STATUS_ERR, ISSUES, ETAG
import base64
from bson import ObjectId
class TestMediaStorage(TestCase):
def test_base_media_storage(self):
a = MediaStorage()
self.assertEqual(a.app, None)
a = MediaStorage("hello")
self.assertEqual(a.app, "hello")
self.assertRaises(NotImplementedError, a.get, 1)
self.assertRaises(NotImplementedError, a.put, "clean", "filename")
self.assertRaises(NotImplementedError, a.delete, 1)
self.assertRaises(NotImplementedError, a.exists, 1)
class TestGridFSMediaStorage(TestBase):
def setUp(self):
super(TestGridFSMediaStorage, self).setUp()
self.url = self.known_resource_url
self.headers = [('Content-Type', 'multipart/form-data')]
self.test_field, self.test_value = 'ref', "1234567890123456789054321"
# we want an explicit binary as Py3 encodestring() expects binaries.
self.clean = b'my file contents'
# encodedstring will raise a DeprecationWarning under Python3.3, but
# the alternative encodebytes is not available in Python 2.
self.encoded = base64.encodestring(self.clean).decode('utf-8')
def test_gridfs_media_storage_errors(self):
self.assertRaises(TypeError, GridFSMediaStorage)
self.assertRaises(TypeError, GridFSMediaStorage, "hello")
def test_gridfs_media_storage_post(self):
# send something different than a file and get an error back
data = {'media': 'not a file'}
r, s = self.parse_response(
self.test_client.post(self.url, data=data, headers=self.headers))
self.assertEqual(STATUS_ERR, r[STATUS])
# validates media fields
self.assertTrue('file was expected' in r[ISSUES]['media'])
# also validates ordinary fields
self.assertTrue('required' in r[ISSUES][self.test_field])
r, s = self._post()
self.assertEqual(STATUS_OK, r[STATUS])
# compare original and returned data
_id = r[ID_FIELD]
self.assertMediaField(_id, self.encoded, self.clean)
# GET the file at the resource endpoint
where = 'where={"%s": "%s"}' % (ID_FIELD, _id)
r, s = self.parse_response(
self.test_client.get('%s?%s' % (self.url, where)))
self.assertEqual(len(r['_items']), 1)
returned = r['_items'][0]['media']
# returned value is a base64 encoded string
self.assertEqual(returned, self.encoded)
# which decodes to the original clean
self.assertEqual(base64.decodestring(returned.encode()), self.clean)
def test_gridfs_media_storage_post_excluded_file_in_result(self):
# send something different than a file and get an error back
data = {'media': 'not a file'}
r, s = self.parse_response(
self.test_client.post(self.url, data=data, headers=self.headers))
self.assertEqual(STATUS_ERR, r[STATUS])
# validates media fields
self.assertTrue('file was expected' in r[ISSUES]['media'])
# also validates ordinary fields
self.assertTrue('required' in r[ISSUES][self.test_field])
r, s = self._post()
self.assertEqual(STATUS_OK, r[STATUS])
self.app.config['RETURN_MEDIA_AS_BASE64_STRING'] = False
# compare original and returned data
_id = r[ID_FIELD]
# GET the file at the resource endpoint
where = 'where={"%s": "%s"}' % (ID_FIELD, _id)
r, s = self.parse_response(
self.test_client.get('%s?%s' % (self.url, where)))
self.assertEqual(len(r['_items']), 1)
returned = r['_items'][0]['media']
# returned value is a base64 encoded string
self.assertEqual(returned, None)
def test_gridfs_media_storage_post_extended(self):
r, s = self._post()
self.assertEqual(STATUS_OK, r[STATUS])
# request extended format file response
self.app.config['EXTENDED_MEDIA_INFO'] = ['content_type', 'length']
# compare original and returned data
_id = r[ID_FIELD]
self.assertMediaFieldExtended(_id, self.encoded, self.clean)
# GET the file at the resource endpoint
where = 'where={"%s": "%s"}' % (ID_FIELD, _id)
r, s = self.parse_response(
self.test_client.get('%s?%s' % (self.url, where)))
self.assertEqual(len(r['_items']), 1)
returned = r['_items'][0]['media']
# returned value is a base64 encoded string
self.assertEqual(returned['file'], self.encoded)
# which decodes to the original clean
self.assertEqual(base64.decodestring(returned['file'].encode()),
self.clean)
# also verify our extended fields
self.assertEqual(returned['content_type'], 'text/plain')
self.assertEqual(returned['length'], 16)
def test_gridfs_media_storage_post_extended_excluded_file_in_result(self):
r, s = self._post()
self.assertEqual(STATUS_OK, r[STATUS])
# request extended format file response
self.app.config['EXTENDED_MEDIA_INFO'] = ['content_type', 'length']
self.app.config['RETURN_MEDIA_AS_BASE64_STRING'] = False
# compare original and returned data
_id = r[ID_FIELD]
# GET the file at the resource endpoint
where = 'where={"%s": "%s"}' % (ID_FIELD, _id)
r, s = self.parse_response(
self.test_client.get('%s?%s' % (self.url, where)))
self.assertEqual(len(r['_items']), 1)
returned = r['_items'][0]['media']
# returned value is None
self.assertEqual(returned['file'], None)
# also verify our extended fields
self.assertEqual(returned['content_type'], 'text/plain')
self.assertEqual(returned['length'], 16)
def test_gridfs_media_storage_put(self):
r, s = self._post()
_id = r[ID_FIELD]
etag = r[ETAG]
# compare original and returned data
self.assertMediaField(_id, self.encoded, self.clean)
with self.app.test_request_context():
# retrieve media_id
media_id = self.assertMediaStored(_id)
# PUT replaces the file with new one
clean = b'my new file contents'
encoded = base64.encodestring(clean).decode()
test_field, test_value = 'ref', "9234567890123456789054321"
data = {'media': (BytesIO(clean), 'test.txt'), test_field: test_value}
headers = [('Content-Type', 'multipart/form-data'), ('If-Match', etag)]
r, s = self.parse_response(
self.test_client.put(('%s/%s' % (self.url, _id)), data=data,
headers=headers))
self.assertEqual(STATUS_OK, r[STATUS])
with self.app.test_request_context():
# media has been properly stored
self.assertMediaStored(_id)
# compare original and returned data
r, s = self.assertMediaField(_id, encoded, clean)
# and of course, the ordinary field has been updated too
self.assertEqual(r[test_field], test_value)
with self.app.test_request_context():
# previous media doesn't exist anymore (it's been deleted)
self.assertFalse(self.app.media.exists(media_id))
def test_gridfs_media_storage_patch(self):
r, s = self._post()
_id = r[ID_FIELD]
etag = r[ETAG]
# compare original and returned data
self.assertMediaField(_id, self.encoded, self.clean)
with self.app.test_request_context():
# retrieve media_id
media_id = self.assertMediaStored(_id)
# PATCH replaces the file with new one
clean = b'my new file contents'
encoded = base64.encodestring(clean).decode()
test_field, test_value = 'ref', "9234567890123456789054321"
data = {'media': (BytesIO(clean), 'test.txt'), test_field: test_value}
headers = [('Content-Type', 'multipart/form-data'), ('If-Match', etag)]
r, s = self.parse_response(
self.test_client.patch(('%s/%s' % (self.url, _id)), data=data,
headers=headers))
self.assertEqual(STATUS_OK, r[STATUS])
# compare original and returned data
r, s = self.assertMediaField(_id, encoded, clean)
# and of course, the ordinary field has been updated too
self.assertEqual(r[test_field], test_value)
with self.app.test_request_context():
# previous media doesn't exist anymore (it's been deleted)
self.assertFalse(self.app.media.exists(media_id))
def test_gridfs_media_storage_patch_null(self):
# set 'media' field to 'nullable'
self.domain[self.known_resource]['schema']['media']['nullable'] = True
response, status = self._post()
self.assert201(status)
_id = response[ID_FIELD]
etag = response[ETAG]
# test that nullable media field can be set to None
data = {'media': None}
headers = [('If-Match', etag)]
response, status = self.patch(('%s/%s' % (self.url, _id)), data=data,
headers=headers)
self.assert200(status)
response, status = self.get(self.known_resource, item=_id)
self.assert200(status)
self.assertEqual(response['media'], None)
def test_gridfs_media_storage_delete(self):
r, s = self._post()
_id = r[ID_FIELD]
etag = r[ETAG]
with self.app.test_request_context():
# retrieve media_id and compare original and returned data
self.assertMediaField(_id, self.encoded, self.clean)
media_id = self.assertMediaStored(_id)
# DELETE deletes both the document and the media file
headers = [('If-Match', etag)]
r, s = self.parse_response(
self.test_client.delete(('%s/%s' % (self.url, _id)),
headers=headers))
self.assert204(s)
with self.app.test_request_context():
# media doesn't exist anymore (it's been deleted)
self.assertFalse(self.app.media.exists(media_id))
# GET returns 404
r, s = self.parse_response(self.test_client.get('%s/%s' % (self.url,
_id)))
self.assert404(s)
def test_gridfs_media_storage_delete_projection(self):
""" test that #284 is fixed: If you have a media field, and set
datasource projection to 0 for that field, the media will not be
deleted
"""
r, s = self._post()
_id = r[ID_FIELD]
with self.app.test_request_context():
# retrieve media_id and compare original and returned data
media_id = self.assertMediaStored(_id)
self.app.config['DOMAIN']['contacts']['datasource']['projection'] = \
{"media": 0}
r, s = self.parse_response(self.test_client.get('%s/%s' % (self.url,
_id)))
etag = r[ETAG]
# DELETE deletes both the document and the media file
headers = [('If-Match', etag)]
r, s = self.parse_response(
self.test_client.delete(('%s/%s' % (self.url, _id)),
headers=headers))
self.assert204(s)
with self.app.test_request_context():
# media doesn't exist anymore (it's been deleted)
self.assertFalse(self.app.media.exists(media_id))
# GET returns 404
r, s = self.parse_response(self.test_client.get('%s/%s' % (self.url,
_id)))
self.assert404(s)
def test_gridfs_media_storage_return_url(self):
self.app._init_media_endpoint()
self.app.config['RETURN_MEDIA_AS_BASE64_STRING'] = False
self.app.config['RETURN_MEDIA_AS_URL'] = True
r, s = self._post()
self.assertEqual(STATUS_OK, r[STATUS])
_id = r[ID_FIELD]
# GET the file at the resource endpoint
where = 'where={"%s": "%s"}' % (ID_FIELD, _id)
r, s = self.parse_response(
self.test_client.get('%s?%s' % (self.url, where)))
self.assertEqual(len(r['_items']), 1)
url = r['_items'][0]['media']
with self.app.test_request_context():
media_id = self.assertMediaStored(_id)
self.assertEqual('/media/%s' % media_id, url)
response = self.test_client.get(url)
self.assertEqual(self.clean, response.get_data())
def assertMediaField(self, _id, encoded, clean):
# GET the file at the item endpoint
r, s = self.parse_response(self.test_client.get('%s/%s' % (self.url,
_id)))
returned = r['media']
# returned value is a base64 encoded string
self.assertEqual(returned, encoded)
# which decodes to the original file clean
self.assertEqual(base64.decodestring(returned.encode()), clean)
return r, s
def assertMediaFieldExtended(self, _id, encoded, clean):
# GET the file at the item endpoint
r, s = self.parse_response(self.test_client.get('%s/%s' % (self.url,
_id)))
returned = r['media']['file']
# returned value is a base64 encoded string
self.assertEqual(returned, encoded)
# which decodes to the original file clean
self.assertEqual(base64.decodestring(returned.encode()), clean)
return r, s
def assertMediaStored(self, _id):
_db = self.connection[MONGO_DBNAME]
# retrieve media id
media_id = _db.contacts.find_one({ID_FIELD: ObjectId(_id)})['media']
# verify it's actually stored in the media storage system
self.assertTrue(self.app.media.exists(media_id))
return media_id
def _post(self):
# send a file and a required, ordinary field with no issues
data = {'media': (BytesIO(self.clean), 'test.txt'), self.test_field:
self.test_value}
return self.parse_response(self.test_client.post(
self.url, data=data, headers=self.headers))
|
|
"""UI classes like StringWriter and Buttons"""
import pygame
import sys
def update_resolution(settings, player, width, height):
pixel_width = ((player.segment_height + player.segment_margin) * width) + player.segment_margin
pixel_height = ((player.segment_height + player.segment_margin) * height) + (player.segment_margin + settings.overlay_width)
settings.screen_size = [pixel_width, pixel_height]
settings.screen_segments[0] = width
settings.screen_segments[1] = height
class StringWriter:
"""Class for drawing generic text to the screen"""
def __init__(self, screen, string, size, x, y, color=(0, 0, 0), bold=False):
self.screen = screen
if not bold:
self.font = pygame.font.Font("Comfortaa-Regular.ttf", size) # Create font with desired size
else:
self.font = pygame.font.Font("Comfortaa-Bold.ttf", size)
self.text = self.font.render(string, 1, color) # Create a text "sprite"
self.text_rect = self.text.get_rect() # get it's background_rect
self.text_rect.centerx = x # and set it's location
self.text_rect.centery = y
def update_text(self, string, color=(0, 0, 0)):
try:
self.text = self.font.render(string, 1, color)
except TypeError:
self.text = self.font.render(str(string), 1, color)
def reposition(self, x, y):
self.text_rect = self.text.get_rect()
self.text_rect.centerx = x
self.text_rect.centery = y
def draw(self):
self.screen.blit(self.text, self.text_rect)
class StringInputField:
"""Input field class for capturing input from the keyboard"""
def __init__(self, screen, settings, x, y, limit=20,
default_text="Click to write...",
background_color=(255, 255, 255), width=500, height=50,
text_color=(0, 0, 0), border=6, border_color=(0, 0, 0)):
self.screen = screen
self.settings = settings
self.x = x
self.y = y
self.limit = limit
self.default_text = default_text
self.keyboard_input = default_text
self.text_length = len(self.keyboard_input)
self.capturing = False
# Grahpics stuff
# Text
self.text = StringWriter(self.screen, self.keyboard_input, 25,
self.x, self.y, color=text_color)
# Input box
self.image = pygame.Surface([width, height])
self.image.fill(background_color)
self.rect = self.image.get_rect()
self.rect.centerx = self.x
self.rect.centery = self.y
# Border
self.border = pygame.Surface([width+border, height+border])
self.border.fill(border_color)
self.border_rect = self.border.get_rect()
self.border_rect.centerx = self.x
self.border_rect.centery = self.y
def capture(self, event):
banned_chars = [pygame.K_BACKSPACE, pygame.K_TAB,
pygame.K_KP_ENTER]
if self.capturing:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE and self.capturing:
self.capturing = False
elif event.key == pygame.K_BACKSPACE and len(self.keyboard_input) > 0:
self.keyboard_input = self.keyboard_input[:-1]
elif len(self.keyboard_input) <= self.limit and event.key not in banned_chars:
self.keyboard_input += event.unicode
def restore_defaults(self):
self.keyboard_input = self.default_text
self.capturing = False
def draw_frame(self):
if self.text_length != len(self.keyboard_input):
self.text.update_text(self.keyboard_input)
self.text.reposition(self.x, self.y)
self.text_length = len(self.keyboard_input)
self.screen.blit(self.border, self.border_rect)
self.screen.blit(self.image, self.rect)
self.text.draw()
def check_pressed(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if event.pos[0] >= self.rect.left and event.pos[0] <= self.rect.right:
if event.pos[1] >= self.rect.top and event.pos[1] <= self.rect.bottom:
self.keyboard_input = ""
self.capturing = True
else:
self.restore_defaults()
elif event.type == pygame.QUIT:
sys.exit()
class Button:
"""Button class for creating buttons"""
def __init__(self, x, y, width, height, color, text, screen, border=6,
border_color=(0, 0, 0)):
self.screen = screen
self.text = text
self.image = pygame.Surface([width, height])
self.border = pygame.Surface([width+border, height+border])
self.image.fill(color)
self.border.fill(border_color)
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
self.border_rect = self.border.get_rect()
self.border_rect.centerx = self.rect.centerx
self.border_rect.centery = self.rect.centery
self.button_text = StringWriter(self.screen, text, 30,
self.rect.centerx,
self.rect.centery)
def draw_button(self):
self.screen.blit(self.border, self.border_rect)
self.screen.blit(self.image, self.rect)
self.button_text.draw()
def pressed(self, event):
if self.rect.left <= event.pos[0] <= self.rect.right:
if self.rect.top <= event.pos[1] <= self.rect.bottom:
return True
else:
return False
class IntSelector:
def __init__(self, screen, x, y, title, integer,
color=(96, 130, 182), min_int=0, max_int=999):
self.screen = screen
self.title = title
self.integer = integer
self.x = x
self.y = y
self.min_int = min_int
self.max_int = max_int
# Labels
self.title_label = StringWriter(self.screen, self.title, 20, x, y-50)
self.display = StringWriter(self.screen, str(self.integer), 20,
x, y)
# Buttons
# Plus
self.plus_button = Button(x+100, y, 50, 50, color, "+", self.screen)
self.plus_five_button = Button(x + 160, y, 50, 50, color, "+5",
self.screen)
# Minus
self.minus_button = Button(x - 100, y, 50, 50, color, "-", self.screen)
self.minus_five_button = Button(x - 160, y, 50, 50, color, "-5",
self.screen)
def get_int(self):
return self.integer
def draw(self):
self.title_label.draw()
self.display.draw()
self.plus_button.draw_button()
self.plus_five_button.draw_button()
self.minus_button.draw_button()
self.minus_five_button.draw_button()
def check_events(self, event):
if self.plus_button.pressed(event) and self.integer < self.max_int:
self.integer += 1
self.display.update_text(str(self.integer))
elif self.plus_five_button.pressed(event) and self.integer+5 <= self.max_int:
self.integer += 5
self.display.update_text(str(self.integer))
elif self.minus_button.pressed(event) and self.integer > self.min_int:
self.integer -= 1
self.display.update_text(str(self.integer))
elif self.minus_five_button.pressed(event) and self.integer-5 >= self.min_int:
self.integer -= 5
self.display.update_text(str(self.integer))
class GameOverlay:
def __init__(self, screen, settings):
self.screen = screen
self.settings = settings
self.previous_score = settings.score
self.previous_speed = settings.game_speed
self.previous_size = settings.snake_size
# Overlay background
self.background = pygame.Surface([self.settings.screen_size[0],
self.settings.overlay_width])
self.background.fill(self.settings.colors["d-grey"])
self.background_rect = self.background.get_rect()
# Divider line
self.line = pygame.Surface([self.settings.screen_size[0], 5])
self.line_rect = self.line.get_rect()
line_y = self.settings.overlay_width - self.line.get_height()
# Positioning
self.background_rect.topleft = (0, 0)
self.line_rect.topleft = (0, line_y)
# Text labels
self.score_label = StringWriter(screen, "Score: " + str(settings.score),
25, 100,
settings.overlay_width//2 - self.line.get_height(),
bold=True)
self.speed_label = StringWriter(self.screen,
"Speed: " + str(self.settings.game_speed),
25, 250,
self.settings.overlay_width//2 - self.line.get_height(),
bold=True)
self.size_label = StringWriter(self.screen, "Size: " + str(self.previous_size),
25, 400,
self.settings.overlay_width // 2 - self.line.get_height(),
bold=True)
def draw(self, player):
self.update_labels(player)
self.screen.blit(self.background, self.background_rect)
self.screen.blit(self.line, self.line_rect)
self.speed_label.draw()
self.score_label.draw()
self.size_label.draw()
def update_labels(self, player):
if self.previous_speed != self.settings.game_speed:
self.previous_speed = self.settings.game_speed
self.speed_label.update_text("Speed: " + str(self.settings.game_speed))
if self.previous_score != self.settings.score:
self.previous_score = self.settings.score
self.score_label.update_text("Score: " + str(self.settings.score))
if self.previous_size != len(player.snake_segments):
self.previous_size = len(player.snake_segments)
self.size_label.update_text(("Size: " + str(self.previous_size)))
|
|
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import time
from unittest import mock
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log
from neutron.agent.common import ip_lib
from neutron.agent.common import ovs_lib
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import ovs_test_base
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import test_vlanmanager
Switch = collections.namedtuple('Switch', ['br_name'])
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 420
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, VIF_ID, VIF_MAC,
Switch(br_name='br_name'))
VIF_PORTS = {VIF_ID: VIF_PORT}
FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
VM_DEVICE_OWNER = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
TUN_OFPORTS = {n_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort(object):
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding(object):
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(object):
def setUp(self):
super(TunnelTest, self).setUp()
self.useFixture(test_vlanmanager.LocalVlanManagerFixture())
conn_patcher = mock.patch(
'neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
mock.patch(
'neutron.api.rpc.handlers.resources_rpc.ResourcesPullRpcApi'
).start()
self.addCleanup(conn_patcher.stop)
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
cfg.CONF.set_override('explicitly_egress_direct', True, 'AGENT')
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.AUX_BRIDGE = 'ancillary_bridge'
self.NET_MAPPING = ['net1:%s' % self.MAP_TUN_BRIDGE]
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_INT_OFPORT = 33333
self.MAP_TUN_PHY_OFPORT = 44444
self.LVM_DATA = (
LV_ID, 'gre', None, LS_ID, VIF_PORTS)
self.LVM_FLAT_DATA = (
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
self.LVM_VLAN_DATA = (
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
self.inta = mock.Mock()
self.intb = mock.Mock()
mock.patch.object(ovs_lib.BaseOVS, 'config',
new_callable=mock.PropertyMock,
return_value={}).start()
mock.patch('neutron.agent.ovsdb.impl_idl._connection').start()
self.ovs_bridges = {
self.INT_BRIDGE: mock.create_autospec(
self.br_int_cls('br-int')),
self.TUN_BRIDGE: mock.create_autospec(
self.br_tun_cls('br-tun')),
self.MAP_TUN_BRIDGE: mock.create_autospec(
self.br_phys_cls('br-phys')),
self.AUX_BRIDGE: mock.create_autospec(
ovs_lib.OVSBridge('br-aux')),
}
self.ovs_int_ofports = {
'patch-tun': self.TUN_OFPORT,
'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
}
mock.patch('neutron.agent.rpc.PluginReportStateAPI.'
'has_alive_neutron_server').start()
def lookup_br(br_name, *args, **kwargs):
return self.ovs_bridges[br_name]
self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS,
autospec=True).start()
self.mock_int_bridge_cls.side_effect = lookup_br
self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS,
autospec=True).start()
self.mock_phys_bridge_cls.side_effect = lookup_br
self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS,
autospec=True).start()
self.mock_tun_bridge_cls.side_effect = lookup_br
self.mock_aux_bridge_cls = mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge',
autospec=True).start()
self.mock_aux_bridge_cls.side_effect = lookup_br
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
self.mock_int_bridge.add_patch_port.side_effect = (
lambda tap, peer: self.ovs_int_ofports[tap])
self.mock_int_bridge.port_exists.return_value = False
self.mock_int_bridge.get_vif_ports.return_value = []
self.mock_int_bridge.get_ports_attributes.return_value = []
self.mock_int_bridge.db_get_val.return_value = {}
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.add_patch_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.port_exists.return_value = False
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.get_bridges = mock.patch.object(ovs_lib.BaseOVS,
'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE,
self.AUX_BRIDGE]
self.get_bridge_external_bridge_id = mock.patch.object(
ovs_lib.BaseOVS,
'get_bridge_external_bridge_id').start()
self.get_bridge_external_bridge_id.side_effect = (
lambda bridge, log_errors: bridge if bridge in self.ovs_bridges
else None)
self.execute = mock.patch('neutron.agent.common.utils.execute').start()
self.mock_check_bridge_datapath_id = mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'_check_bridge_datapath_id').start()
self._define_expected_calls()
def _define_expected_calls(self, arp_responder=False, igmp_snooping=False):
self.mock_int_bridge_cls_expected = [
mock.call(self.INT_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_phys_bridge_cls_expected = [
mock.call(self.MAP_TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_tun_bridge_cls_expected = [
mock.call(self.TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.set_igmp_snooping_state(igmp_snooping),
mock.call.setup_default_table(enable_openflow_dhcp=False,
enable_dhcpv6=False),
]
self.mock_map_tun_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
mock.call.port_exists('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'type', log_errors=False),
mock.call.port_exists('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
mock.call.set_igmp_snooping_flood('int-%s' % self.MAP_TUN_BRIDGE,
igmp_snooping),
]
self.mock_int_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
mock.call.set_db_attribute(
'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'options', {'peer': 'phy-%s' % self.MAP_TUN_BRIDGE}),
]
self.mock_map_tun_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
mock.call.set_db_attribute(
'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
'options', {'peer': 'int-%s' % self.MAP_TUN_BRIDGE}),
]
self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE]
self.mock_aux_bridge_expected = [
]
self.mock_tun_bridge_expected = [
mock.call.create(secure_mode=True),
mock.call.setup_controllers(mock.ANY),
mock.call.port_exists('patch-int'),
mock.ANY,
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.port_exists('patch-tun'),
mock.call.add_patch_port('patch-tun', 'patch-int'),
mock.call.set_igmp_snooping_flood('patch-tun', igmp_snooping),
]
self.mock_int_bridge_expected += [
mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT,
ovs_lib.UNASSIGNED_OFPORT)),
mock.call.get_ports_attributes(
'Port', columns=['name', 'other_config', 'tag'], ports=[])
]
self.mock_tun_bridge_expected += [
mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
]
self.ipdevice_expected = []
self.ipwrapper_expected = [mock.call()]
self.get_bridges_expected = [mock.call(), mock.call()]
self.inta_expected = []
self.intb_expected = []
self.execute_expected = []
self.mock_int_bridge_expected += [
mock.call.install_goto(
dest_table_id=constants.LOCAL_MAC_DIRECT,
in_port=self.MAP_TUN_INT_OFPORT,
priority=4, table_id=constants.TRANSIENT_TABLE),
mock.call.install_goto(
dest_table_id=constants.LOCAL_MAC_DIRECT,
in_port=self.TUN_OFPORT,
priority=4, table_id=constants.TRANSIENT_TABLE),
mock.call.install_goto(
dest_table_id=constants.TRANSIENT_EGRESS_TABLE,
table_id=constants.LOCAL_MAC_DIRECT),
]
def _build_agent(self, **config_opts_agent):
"""Configure and initialize OVS agent.
:param config_opts_agent: a dict with options to override the
default values for the AGENT group.
"""
bridge_classes = {
'br_int': self.mock_int_bridge_cls,
'br_phys': self.mock_phys_bridge_cls,
'br_tun': self.mock_tun_bridge_cls,
}
cfg.CONF.set_override('integration_bridge', self.INT_BRIDGE, 'OVS')
cfg.CONF.set_override('tunnel_bridge', self.TUN_BRIDGE, 'OVS')
cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS')
cfg.CONF.set_override('bridge_mappings', self.NET_MAPPING, 'OVS')
cfg.CONF.set_override('polling_interval', 2, 'AGENT')
cfg.CONF.set_override('tunnel_types', ['gre'], 'AGENT')
cfg.CONF.set_override('minimize_polling', False, 'AGENT')
cfg.CONF.set_override('enable_ipv6', False, 'DHCP')
for k, v in config_opts_agent.items():
cfg.CONF.set_override(k, v, 'AGENT')
ext_mgr = mock.Mock()
ext_mgr.names = mock.Mock(return_value=[])
agent = self.mod_agent.OVSNeutronAgent(
bridge_classes, ext_mgr, cfg.CONF)
mock.patch.object(agent.ovs.ovsdb, 'idl_monitor').start()
return agent
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(expected, mock_obj.mock_calls)
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_int_bridge_cls,
self.mock_int_bridge_cls_expected)
self._verify_mock_call(self.mock_tun_bridge_cls,
self.mock_tun_bridge_cls_expected)
self._verify_mock_call(self.mock_phys_bridge_cls,
self.mock_phys_bridge_cls_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.mock_aux_bridge,
self.mock_aux_bridge_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
agent = self._build_agent()
self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host)
self._verify_mock_calls()
# TODO(ethuleau): Initially, local ARP responder is be dependent to the
# ML2 l2 population mechanism driver.
# The next two tests use l2_pop flag to test ARP responder
def test_construct_with_arp_responder(self):
self._build_agent(l2_population=True, arp_responder=True)
self._define_expected_calls(arp_responder=True)
self._verify_mock_calls()
def test_construct_with_igmp_snooping(self):
cfg.CONF.set_override('igmp_snooping_enable', True, 'OVS')
self._build_agent()
self._define_expected_calls(igmp_snooping=True)
self._verify_mock_calls()
def test_construct_without_arp_responder(self):
self._build_agent(l2_population=False, arp_responder=True)
self._verify_mock_calls()
def test_construct_vxlan(self):
self._build_agent(tunnel_types=['vxlan'])
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = list(TUN_OFPORTS[n_const.TYPE_GRE].values())
self.mock_tun_bridge_expected += [
mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports),
mock.call.provision_local_vlan(
network_type=n_const.TYPE_GRE,
lvid=LV_ID,
segmentation_id=LS_ID),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, n_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=None,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=None))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, n_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, n_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, n_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, n_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.reclaim_local_vlan(network_type='gre',
segmentation_id=LS_ID),
mock.call.delete_flood_to_tun(LV_ID),
mock.call.delete_unicast_to_tun(LV_ID, None),
mock.call.delete_arp_responder(LV_ID, None),
]
a = self._build_agent()
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_FLAT_DATA[0]))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=None))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_FLAT_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_FLAT_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_VLAN_DATA[0]))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=LS_ID))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_VLAN_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_VLAN_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
vlan_mapping = {'segmentation_id': str(LS_ID),
'physical_network': 'None',
'net_uuid': NET_UUID,
'network_type': 'gre'}
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', 'port', 'other_config'),
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'other_config',
vlan_mapping)]
a = self._build_agent()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.local_dvr_map = {}
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {}
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
FIXED_IPS, VM_DEVICE_OWNER, False)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = self._build_agent()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag',
log_errors=True),
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', constants.DEAD_VLAN_TAG,
log_errors=True),
mock.call.drop_port(in_port=VIF_PORT.ofport),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock()
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1',
'gre', 4789, True, False, None),
mock.call.setup_tunnel_port('gre', tunnel_port),
]
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.10.1',
tunnel_type=n_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply_ge_1 = {'added': [{'name': 'tap0', 'ofport': 3,
'external_ids': {
'attached-mac': 'test_mac'}}],
'removed': []}
reply_ge_2 = {'added': [],
'removed': [{'name': 'tap0', 'ofport': 3,
'external_ids': {
'attached-mac': 'test_mac'}}]}
reply_pe_1 = {'current': set(['tap0']),
'added': set(['tap0']),
'removed': set([])}
reply_pe_2 = {'current': set([]),
'added': set([]),
'removed': set(['tap0'])}
reply_ancillary = {'current': set([]),
'added': set([]),
'removed': set([])}
self.mock_int_bridge_expected += [
mock.call.check_canary_table(),
mock.call.deferred(full_ordered=True, use_bundle=True),
mock.call.deferred().__enter__(),
mock.call.deferred().__exit__(None, None, None),
mock.call.cleanup_flows(),
mock.call.check_canary_table(),
mock.call.deferred(full_ordered=True, use_bundle=True),
mock.call.deferred().__enter__(),
mock.call.deferred().__exit__(None, None, None),
]
self.mock_map_tun_bridge_expected += [
mock.call.cleanup_flows(),
]
self.mock_tun_bridge_expected += [
mock.call.cleanup_flows()
]
# No cleanup is expected on ancillary bridge
self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \
constants.OVS_NORMAL
with mock.patch.object(log.KeywordArgumentAdapter,
'exception') as log_exception,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'process_ports_events') as process_p_events,\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'process_network_ports') as process_network_ports,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'tunnel_sync'),\
mock.patch.object(time, 'sleep'),\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'update_stale_ofport_rules') as update_stale:
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
update_stale.return_value = []
devices_not_ready = set()
process_p_events.side_effect = [
(reply_pe_1, reply_ancillary, devices_not_ready),
(reply_pe_2, reply_ancillary, devices_not_ready)]
interface_polling = mock.Mock()
interface_polling.get_events.side_effect = [reply_ge_1, reply_ge_2]
failed_devices = {'removed': set([]), 'added': set([])}
failed_ancillary_devices = {'removed': set([]), 'added': set([])}
process_network_ports.side_effect = [
failed_devices,
Exception('Fake exception to get out of the loop')]
n_agent = self._build_agent()
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
n_agent.rpc_loop(interface_polling)
except Exception:
pass
# FIXME(salv-orlando): There should not be assertions on log
# messages
log_exception.assert_called_once_with(
"Error while processing VIF ports")
process_p_events.assert_has_calls([
mock.call(reply_ge_1, set(), set(), devices_not_ready,
failed_devices, failed_ancillary_devices, set()),
mock.call(reply_ge_2, set(['tap0']), set(), devices_not_ready,
failed_devices, failed_ancillary_devices,
set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap0'])}, False),
])
self.assertTrue(update_stale.called)
self._verify_mock_calls()
class TunnelTestOSKen(TunnelTest, ovs_test_base.OVSOSKenTestBase):
pass
|
|
# Copyright (c) 2012-2022, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, integer
class LoggingProperties(AWSProperty):
"""
`LoggingProperties <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-cluster-loggingproperties.html>`__
"""
props: PropsDictType = {
"BucketName": (str, True),
"S3KeyPrefix": (str, False),
}
class Cluster(AWSObject):
"""
`Cluster <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-cluster.html>`__
"""
resource_type = "AWS::Redshift::Cluster"
props: PropsDictType = {
"AllowVersionUpgrade": (boolean, False),
"AquaConfigurationStatus": (str, False),
"AutomatedSnapshotRetentionPeriod": (integer, False),
"AvailabilityZone": (str, False),
"AvailabilityZoneRelocation": (boolean, False),
"AvailabilityZoneRelocationStatus": (str, False),
"Classic": (boolean, False),
"ClusterIdentifier": (str, False),
"ClusterParameterGroupName": (str, False),
"ClusterSecurityGroups": ([str], False),
"ClusterSubnetGroupName": (str, False),
"ClusterType": (str, True),
"ClusterVersion": (str, False),
"DBName": (str, True),
"DeferMaintenance": (boolean, False),
"DeferMaintenanceDuration": (integer, False),
"DeferMaintenanceEndTime": (str, False),
"DeferMaintenanceStartTime": (str, False),
"DestinationRegion": (str, False),
"ElasticIp": (str, False),
"Encrypted": (boolean, False),
"EnhancedVpcRouting": (boolean, False),
"HsmClientCertificateIdentifier": (str, False),
"HsmConfigurationIdentifier": (str, False),
"IamRoles": ([str], False),
"KmsKeyId": (str, False),
"LoggingProperties": (LoggingProperties, False),
"MaintenanceTrackName": (str, False),
"ManualSnapshotRetentionPeriod": (integer, False),
"MasterUserPassword": (str, True),
"MasterUsername": (str, True),
"NodeType": (str, True),
"NumberOfNodes": (integer, False),
"OwnerAccount": (str, False),
"Port": (integer, False),
"PreferredMaintenanceWindow": (str, False),
"PubliclyAccessible": (boolean, False),
"ResourceAction": (str, False),
"RevisionTarget": (str, False),
"RotateEncryptionKey": (boolean, False),
"SnapshotClusterIdentifier": (str, False),
"SnapshotCopyGrantName": (str, False),
"SnapshotCopyManual": (boolean, False),
"SnapshotCopyRetentionPeriod": (integer, False),
"SnapshotIdentifier": (str, False),
"Tags": (Tags, False),
"VpcSecurityGroupIds": ([str], False),
}
class AmazonRedshiftParameter(AWSProperty):
"""
`AmazonRedshiftParameter <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-property-redshift-clusterparametergroup-parameter.html>`__
"""
props: PropsDictType = {
"ParameterName": (str, True),
"ParameterValue": (str, True),
}
class ClusterParameterGroup(AWSObject):
"""
`ClusterParameterGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clusterparametergroup.html>`__
"""
resource_type = "AWS::Redshift::ClusterParameterGroup"
props: PropsDictType = {
"Description": (str, True),
"ParameterGroupFamily": (str, True),
"Parameters": ([AmazonRedshiftParameter], False),
"Tags": (Tags, False),
}
class ClusterSecurityGroup(AWSObject):
"""
`ClusterSecurityGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersecuritygroup.html>`__
"""
resource_type = "AWS::Redshift::ClusterSecurityGroup"
props: PropsDictType = {
"Description": (str, True),
"Tags": (Tags, False),
}
class ClusterSecurityGroupIngress(AWSObject):
"""
`ClusterSecurityGroupIngress <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersecuritygroupingress.html>`__
"""
resource_type = "AWS::Redshift::ClusterSecurityGroupIngress"
props: PropsDictType = {
"CIDRIP": (str, False),
"ClusterSecurityGroupName": (str, True),
"EC2SecurityGroupName": (str, False),
"EC2SecurityGroupOwnerId": (str, False),
}
class ClusterSubnetGroup(AWSObject):
"""
`ClusterSubnetGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-clustersubnetgroup.html>`__
"""
resource_type = "AWS::Redshift::ClusterSubnetGroup"
props: PropsDictType = {
"Description": (str, True),
"SubnetIds": ([str], True),
"Tags": (Tags, False),
}
class EndpointAccess(AWSObject):
"""
`EndpointAccess <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-endpointaccess.html>`__
"""
resource_type = "AWS::Redshift::EndpointAccess"
props: PropsDictType = {
"ClusterIdentifier": (str, False),
"EndpointName": (str, True),
"ResourceOwner": (str, False),
"SubnetGroupName": (str, False),
"VpcSecurityGroupIds": ([str], True),
}
class EndpointAuthorization(AWSObject):
"""
`EndpointAuthorization <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-endpointauthorization.html>`__
"""
resource_type = "AWS::Redshift::EndpointAuthorization"
props: PropsDictType = {
"Account": (str, True),
"ClusterIdentifier": (str, True),
"Force": (boolean, False),
"VpcIds": ([str], False),
}
class EventSubscription(AWSObject):
"""
`EventSubscription <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-eventsubscription.html>`__
"""
resource_type = "AWS::Redshift::EventSubscription"
props: PropsDictType = {
"Enabled": (boolean, False),
"EventCategories": ([str], False),
"Severity": (str, False),
"SnsTopicArn": (str, False),
"SourceIds": ([str], False),
"SourceType": (str, False),
"SubscriptionName": (str, True),
"Tags": (Tags, False),
}
class PauseClusterMessage(AWSProperty):
"""
`PauseClusterMessage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-scheduledaction-pauseclustermessage.html>`__
"""
props: PropsDictType = {
"ClusterIdentifier": (str, True),
}
class ResizeClusterMessage(AWSProperty):
"""
`ResizeClusterMessage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-scheduledaction-resizeclustermessage.html>`__
"""
props: PropsDictType = {
"Classic": (boolean, False),
"ClusterIdentifier": (str, True),
"ClusterType": (str, False),
"NodeType": (str, False),
"NumberOfNodes": (integer, False),
}
class ResumeClusterMessage(AWSProperty):
"""
`ResumeClusterMessage <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-scheduledaction-resumeclustermessage.html>`__
"""
props: PropsDictType = {
"ClusterIdentifier": (str, True),
}
class ScheduledActionType(AWSProperty):
"""
`ScheduledActionType <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-scheduledaction-scheduledactiontype.html>`__
"""
props: PropsDictType = {
"PauseCluster": (PauseClusterMessage, False),
"ResizeCluster": (ResizeClusterMessage, False),
"ResumeCluster": (ResumeClusterMessage, False),
}
class ScheduledAction(AWSObject):
"""
`ScheduledAction <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-redshift-scheduledaction.html>`__
"""
resource_type = "AWS::Redshift::ScheduledAction"
props: PropsDictType = {
"Enable": (boolean, False),
"EndTime": (str, False),
"IamRole": (str, False),
"Schedule": (str, False),
"ScheduledActionDescription": (str, False),
"ScheduledActionName": (str, True),
"StartTime": (str, False),
"TargetAction": (ScheduledActionType, False),
}
class Endpoint(AWSProperty):
"""
`Endpoint <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-cluster-endpoint.html>`__
"""
props: PropsDictType = {
"Address": (str, False),
"Port": (str, False),
}
class VpcSecurityGroup(AWSProperty):
"""
`VpcSecurityGroup <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-redshift-endpointaccess-vpcsecuritygroup.html>`__
"""
props: PropsDictType = {
"Status": (str, False),
"VpcSecurityGroupId": (str, False),
}
|
|
"""
A sudoku game solver.
(c) 2015 Brandon L. Reiss
"""
from collections import Counter
from copy import copy, deepcopy
from itertools import ifilter
import logging
from random import shuffle, randint
LOGGER = logging.getLogger(__name__)
def format_board(board):
"""
Pretty format a board.
###########################################
#|---+---+---|#|---+---+---|#|---+---+---|#
#|123|12 |1 |#|123|12 |1 |#|123|12 |1 |#
#|456| 5 |4 |#|456| 5 |4 |#|456| 5 |4 |#
#|789|7 | 8 |#|789|7 | 8 |#|789|7 | 8 |#
#|---+---+---|#|---+---+---|#|---+---+---|#
...
#|---+---+---|#|---+---+---|#|---+---+---|#
###########################################
#|---+---+---|#|---+---+---|#|---+---+---|#
...
###########################################
Each 3x3 block is outlined by '#' and each value is outlined by '|-+'.
:return list: list of strings representing lines of the formatted board
"""
def outer_border_row():
return "###########################################"
def inner_border_row():
return "#|---+---+---|#|---+---+---|#|---+---+---|#"
def format_row(row):
fmt_rows = []
# Generate rows for grid values 123, 456, 789.
for val_div in xrange(3):
# Generate strings of either space or matched possible values for
# every cell in the row for this subset of possible values 1-9.
possible_vals = range((3 * val_div) + 1, (3 * val_div) + 4)
row_vals = [
''.join(str(pval) if pval in values else ' '
for pval in possible_vals)
for values in row]
# Loop over the 3 3x3 block sections.
by_block = []
for blk_idx in xrange(3):
block_expanded = '|'.join(row_vals[3 * blk_idx:(3 * blk_idx) + 3])
by_block.append('|{}|'.format(block_expanded))
fmt_rows.append('#{}#'.format('#'.join(by_block)))
return fmt_rows
rows = [outer_border_row()]
# Loop over the 3 3x3 block sections.
for block_div in xrange(3):
rows.append(inner_border_row())
for row in board[3 * block_div:(3 * block_div) + 3]:
rows.extend(format_row(row))
rows.append(inner_border_row())
rows.append(outer_border_row())
return rows
def solve(initial_board):
"""
Solve a Sudoku puzzle.
:param list initial_board: list of lists of either digits in [1, 9] or None
indicating known and vacant spaces of a Sudoku puzzle.
:return list solution: list of lists of digits in [1, 9] representing the
solved puzzle configuration; there are no vacant spaces in a solution.
:raise ValueError: if the board is unsolvable as determined by the Sudoku
game invariants.
"""
def _check_elim_row_col(select_pred, counts, iter_rc):
"""
A DRY method for checking a row or column for a forced move. Separates
spaces selected from those that have only one possible value remaining.
These are accumulated into unique_not_selected and the first is
returned.
"""
for i, counted in enumerate(counts):
selected_values = set(next(iter(board[sel_i][sel_j]))
for sel_i, sel_j in selected
if select_pred(sel_i, sel_j, i))
unique_values = set(val
for val, count in counted.iteritems()
if count == 1)
unique_not_selected = unique_values - selected_values
# Select the first unique-but-not-yet-selected value.
if len(unique_not_selected) > 0:
value = next(iter(unique_not_selected))
j = next(idx
for idx, values in enumerate(iter_rc(board, i))
if value in values)
return (i, j), value
return None, None
def check_elim_row(row_counts):
return _check_elim_row_col(lambda sel_i, _, i: i == sel_i,
row_counts,
lambda board, i: iter(board[i]))
def check_elim_col(col_counts):
return _check_elim_row_col(lambda _, sel_j, j: j == sel_j,
col_counts,
lambda board, j: (row[j] for row in board))
def board_is_valid(board, row_counts, col_counts):
"""
Look for
- any space that has zero possible values
- any row where at least one value in [1, 9] is impossible
- any column where at least one value in [1, 9] is impossible
:returns: True when the board is valid and False otherwise
"""
empty_space = next((True
for row in board
for values in row
if len(values) == 0), False)
if empty_space:
return False
empty_count = lambda counts: any(count == 0 for count in counts.itervalues())
empty_row_count = next((True
for counts in row_counts
if empty_count(counts)), False)
if empty_row_count:
return False
empty_col_count = next((True
for counts in col_counts
if empty_count(counts)), False)
if empty_col_count:
return False
return True
def set_value(board, value, i, j, row_counts, col_counts):
"""
Set value (i, j) within the board. Values must be eliminated within the
local 3x3 block as well as down the row and column.
"""
# Get 3x3 block address.
block_i = i / 3
block_j = j / 3
# Eliminate value from block.
for my_i, row in enumerate(board[3 * block_i:(3 * block_i) + 3], 3 * block_i):
for my_j, values in enumerate(row[3 * block_j:(3 * block_j) + 3], 3 * block_j):
if value in values:
values.remove(value)
row_counts[my_i][value] -= 1
col_counts[my_j][value] -= 1
# Eliminate value from row.
for my_j, values in enumerate(board[i]):
if value in values:
values.discard(value)
row_counts[i][value] -= 1
col_counts[my_j][value] -= 1
# Eliminate value from column.
for my_i, values in enumerate(row[j] for row in board):
if value in values:
values.discard(value)
row_counts[my_i][value] -= 1
col_counts[j][value] -= 1
# Set the value.
row_counts[i][value] = 1
col_counts[j][value] = 1
for discard_value in board[i][j]:
row_counts[i][discard_value] -= 1
col_counts[j][discard_value] -= 1
board[i][j] = set((value,))
# Move type flags.
move_type_elim, \
move_type_row_elim, \
move_type_col_elim, \
move_type_guess = range(4)
move_type_to_str = {
move_type_elim : "elimination",
move_type_row_elim : "row elimination",
move_type_col_elim : "col elimination",
move_type_guess : "guess",
}
board = [[set(xrange(1, 10)) for _ in xrange(9)] for _ in xrange(9)]
row_counts = [Counter(v for values in row for v in values)
for row in board]
col_counts = [Counter(v for row in board for v in row[j])
for j in xrange(9)]
selected = set()
for i, j, value in ((i, j, value)
for i, row in enumerate(initial_board)
for j, value in enumerate(row)
if value is not None):
#print "Setting ({}, {}) = {}".format(i, j, value)
set_value(board, value, i, j, row_counts, col_counts)
selected.add((i,j))
#print '\n'.join(format_board(board))
num_set_initially = len(selected)
#print 'Initial board:'
#print '\n'.join(format_board(board))
#raw_input("Continue...")
#print 'num_set_initially:', num_set_initially
failed_guesses = 0
longest_discarded_moves = 0
move_counter = 0
board_stack = []
while True:
# Check whether or not the board is still valid.
if not board_is_valid(board, row_counts, col_counts):
# If we didn't guess previously, then this board is no good.
if len(board_stack) == 0:
raise ValueError("Puzzle is invalid and therefore unsolvable.\n"
'\n'.join(format_board(board)))
failed_guesses += 1
# Pop the board state and update the counts.
move_count, board, selected = board_stack.pop()
row_counts = [Counter(v for values in row for v in values)
for row in board]
col_counts = [Counter(v for row in board for v in row[j])
for j in xrange(9)]
discarded_moves = move_counter - move_count
longest_discarded_moves = max(discarded_moves, longest_discarded_moves)
#print "Guess after move {} failed after {} moves".format(
# move_count, discarded_moves)
#print '\n'.join(format_board(board))
#raw_input("Continue...")
# All done?
if len(selected) == 81:
break
next_move = None
# Check for naive unique value (only remaining choice).
try:
i, j = next(((i, j)
for i, row in enumerate(board)
for j, values in enumerate(row)
if len(values) == 1
and (i, j) not in selected))
next_move = (i, j), next(iter(board[i][j])), move_type_elim
except StopIteration:
pass
# Check for unique value within a row.
if next_move is None:
ij_pre, value = check_elim_row(row_counts)
if ij_pre is not None:
i, j = ij_pre
next_move = (i, j), value, move_type_row_elim
# Check for unique value within a column.
if next_move is None:
ji_pre, value = check_elim_col(col_counts)
if ji_pre is not None:
j, i = ji_pre
next_move = (i, j), value, move_type_col_elim
# See if we must guess. All forced moves are exhausted here.
if next_move is None:
# Find a good place to guess.
count_values = [(len(values),
i, j)
for i, row in enumerate(board)
for j, values in enumerate(row)
if len(values) > 1]
min_remaining, _, _ = min(count_values)
min_values = [(i, j)
for count, i, j in count_values
if count == min_remaining]
# Get a random guess location with the min possible values
# remaining.
i, j = min_values[randint(0, len(min_values) - 1)]
# Guess and push the world.
value = next(iter(board[i][j]))
board_copy, selected_copy = deepcopy(board), copy(selected)
board_copy[i][j].discard(value)
board_stack.append((move_counter, board_copy, selected_copy))
next_move = (i, j), value, move_type_guess
#print '\n'.join(format_board(board))
#raw_input("Continue...")
# We must have found a move by now since the board is valid and we can
# guess.
(i, j), value, move_type = next_move
move_counter += 1
#print '{:3d} : Picked ({},{}) = {} ({})'.format(
# move_counter, i, j, value, move_type_to_str[move_type])
set_value(board, value, i, j, row_counts, col_counts)
selected.add((i, j))
#print '\n'.join(format_board(board))
#raw_input("Continue...")
moves_to_solve_directly = 81 - num_set_initially
LOGGER.info("Solved after {} moves with {} discarded from {} failed guesses".format(
move_counter, move_counter - moves_to_solve_directly, failed_guesses))
LOGGER.info("Longest discarded move sequence: {}".format(longest_discarded_moves))
return [[next(iter(values)) for values in row] for row in board]
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import io
import logging
import socket
from keystoneauth1 import adapter
from keystoneauth1 import exceptions as ksa_exc
import OpenSSL
from oslo_utils import importutils
from oslo_utils import netutils
import requests
import urllib.parse
try:
import json
except ImportError:
import simplejson as json
from oslo_utils import encodeutils
from glanceclient.common import utils
from glanceclient import exc
osprofiler_web = importutils.try_import("osprofiler.web")
LOG = logging.getLogger(__name__)
USER_AGENT = 'python-glanceclient'
CHUNKSIZE = 1024 * 64 # 64kB
REQ_ID_HEADER = 'X-OpenStack-Request-ID'
TOKEN_HEADERS = ['X-Auth-Token', 'X-Service-Token']
def encode_headers(headers):
"""Encodes headers.
Note: This should be used right before
sending anything out.
:param headers: Headers to encode
:returns: Dictionary with encoded headers'
names and values
"""
# NOTE(rosmaita): This function's rejection of any header name without a
# corresponding value is arguably justified by RFC 7230. In any case, that
# behavior was already here and there is an existing unit test for it.
# Bug #1766235: According to RFC 8187, headers must be encoded as ASCII.
# So we first %-encode them to get them into range < 128 and then turn
# them into ASCII.
encoded_dict = {}
for h, v in headers.items():
if v is not None:
# if the item is token, do not quote '+' as well.
# NOTE(imacdonn): urllib.parse.quote() is intended for quoting the
# path part of a URL, but headers like x-image-meta-location
# include an entire URL. We should avoid encoding the colon in
# this case (bug #1788942)
safe = '=+/' if h in TOKEN_HEADERS else '/:'
key = urllib.parse.quote(h, safe)
value = urllib.parse.quote(v, safe)
encoded_dict[key] = value
return dict((encodeutils.safe_encode(h, encoding='ascii'),
encodeutils.safe_encode(v, encoding='ascii'))
for h, v in encoded_dict.items())
class _BaseHTTPClient(object):
@staticmethod
def _chunk_body(body):
chunk = body
while chunk:
chunk = body.read(CHUNKSIZE)
if not chunk:
break
yield chunk
def _set_common_request_kwargs(self, headers, kwargs):
"""Handle the common parameters used to send the request."""
# Default Content-Type is octet-stream
content_type = headers.get('Content-Type', 'application/octet-stream')
# NOTE(jamielennox): remove this later. Managers should pass json= if
# they want to send json data.
data = kwargs.pop("data", None)
if data is not None and not isinstance(data, str):
try:
data = json.dumps(data)
content_type = 'application/json'
except TypeError:
# Here we assume it's
# a file-like object
# and we'll chunk it
data = self._chunk_body(data)
headers['Content-Type'] = content_type
kwargs['stream'] = content_type == 'application/octet-stream'
return data
def _handle_response(self, resp):
if not resp.ok:
LOG.debug("Request returned failure status %s.", resp.status_code)
raise exc.from_response(resp, resp.content)
elif (resp.status_code == requests.codes.MULTIPLE_CHOICES and
resp.request.path_url != '/versions'):
# NOTE(flaper87): Eventually, we'll remove the check on `versions`
# which is a bug (1491350) on the server.
raise exc.from_response(resp)
content_type = resp.headers.get('Content-Type')
# Read body into string if it isn't obviously image data
if content_type == 'application/octet-stream':
# Do not read all response in memory when downloading an image.
body_iter = _close_after_stream(resp, CHUNKSIZE)
else:
content = resp.text
if content_type and content_type.startswith('application/json'):
# Let's use requests json method, it should take care of
# response encoding
body_iter = resp.json()
else:
body_iter = io.StringIO(content)
try:
body_iter = json.loads(''.join([c for c in body_iter]))
except ValueError:
body_iter = None
return resp, body_iter
class HTTPClient(_BaseHTTPClient):
def __init__(self, endpoint, **kwargs):
self.endpoint = endpoint
self.identity_headers = kwargs.get('identity_headers')
self.auth_token = kwargs.get('token')
self.language_header = kwargs.get('language_header')
self.global_request_id = kwargs.get('global_request_id')
if self.identity_headers:
self.auth_token = self.identity_headers.pop('X-Auth-Token',
self.auth_token)
self.session = requests.Session()
self.session.headers["User-Agent"] = USER_AGENT
if self.language_header:
self.session.headers["Accept-Language"] = self.language_header
self.timeout = float(kwargs.get('timeout', 600))
if self.endpoint.startswith("https"):
if kwargs.get('insecure', False) is True:
self.session.verify = False
else:
if kwargs.get('cacert', None) != '':
self.session.verify = kwargs.get('cacert', True)
self.session.cert = (kwargs.get('cert_file'),
kwargs.get('key_file'))
def __del__(self):
if self.session:
try:
self.session.close()
except Exception as e:
LOG.exception(e)
finally:
self.session = None
@staticmethod
def parse_endpoint(endpoint):
return netutils.urlsplit(endpoint)
def log_curl_request(self, method, url, headers, data, kwargs):
curl = ['curl -g -i -X %s' % method]
headers = copy.deepcopy(headers)
headers.update(self.session.headers)
for (key, value) in headers.items():
header = '-H \'%s: %s\'' % utils.safe_header(key, value)
curl.append(header)
if not self.session.verify:
curl.append('-k')
else:
if isinstance(self.session.verify, str):
curl.append(' --cacert %s' % self.session.verify)
if self.session.cert:
curl.append(' --cert %s --key %s' % self.session.cert)
if data and isinstance(data, str):
curl.append('-d \'%s\'' % data)
curl.append(url)
msg = ' '.join([encodeutils.safe_decode(item, errors='ignore')
for item in curl])
LOG.debug(msg)
@staticmethod
def log_http_response(resp):
status = (resp.raw.version / 10.0, resp.status_code, resp.reason)
dump = ['\nHTTP/%.1f %s %s' % status]
headers = resp.headers.items()
dump.extend(['%s: %s' % utils.safe_header(k, v) for k, v in headers])
dump.append('')
content_type = resp.headers.get('Content-Type')
if content_type != 'application/octet-stream':
dump.extend([resp.text, ''])
LOG.debug('\n'.join([encodeutils.safe_decode(x, errors='ignore')
for x in dump]))
def _request(self, method, url, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
headers = copy.deepcopy(kwargs.pop('headers', {}))
if self.identity_headers:
for k, v in self.identity_headers.items():
headers.setdefault(k, v)
data = self._set_common_request_kwargs(headers, kwargs)
# add identity header to the request
if not headers.get('X-Auth-Token'):
headers['X-Auth-Token'] = self.auth_token
if self.global_request_id:
headers.setdefault(REQ_ID_HEADER, self.global_request_id)
if osprofiler_web:
headers.update(osprofiler_web.get_trace_id_headers())
# Note(flaper87): Before letting headers / url fly,
# they should be encoded otherwise httplib will
# complain.
headers = encode_headers(headers)
if self.endpoint.endswith("/") or url.startswith("/"):
conn_url = "%s%s" % (self.endpoint, url)
else:
conn_url = "%s/%s" % (self.endpoint, url)
self.log_curl_request(method, conn_url, headers, data, kwargs)
try:
resp = self.session.request(method,
conn_url,
data=data,
headers=headers,
timeout=self.timeout,
**kwargs)
except requests.exceptions.Timeout as e:
message = ("Error communicating with %(url)s: %(e)s" %
dict(url=conn_url, e=e))
raise exc.InvalidEndpoint(message=message)
except requests.exceptions.ConnectionError as e:
message = ("Error finding address for %(url)s: %(e)s" %
dict(url=conn_url, e=e))
raise exc.CommunicationError(message=message)
except socket.gaierror as e:
message = "Error finding address for %s: %s" % (
self.endpoint_hostname, e)
raise exc.InvalidEndpoint(message=message)
except (socket.error, socket.timeout, IOError) as e:
endpoint = self.endpoint
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': endpoint, 'e': e})
raise exc.CommunicationError(message=message)
except OpenSSL.SSL.Error as e:
message = ("SSL Error communicating with %(url)s: %(e)s" %
{'url': conn_url, 'e': e})
raise exc.CommunicationError(message=message)
# log request-id for each api call
request_id = resp.headers.get('x-openstack-request-id')
if request_id:
LOG.debug('%(method)s call to image for '
'%(url)s used request id '
'%(response_request_id)s',
{'method': resp.request.method,
'url': resp.url,
'response_request_id': request_id})
resp, body_iter = self._handle_response(resp)
self.log_http_response(resp)
return resp, body_iter
def head(self, url, **kwargs):
return self._request('HEAD', url, **kwargs)
def get(self, url, **kwargs):
return self._request('GET', url, **kwargs)
def post(self, url, **kwargs):
return self._request('POST', url, **kwargs)
def put(self, url, **kwargs):
return self._request('PUT', url, **kwargs)
def patch(self, url, **kwargs):
return self._request('PATCH', url, **kwargs)
def delete(self, url, **kwargs):
return self._request('DELETE', url, **kwargs)
def _close_after_stream(response, chunk_size):
"""Iterate over the content and ensure the response is closed after."""
# Yield each chunk in the response body
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
# Once we're done streaming the body, ensure everything is closed.
# This will return the connection to the HTTPConnectionPool in urllib3
# and ideally reduce the number of HTTPConnectionPool full warnings.
response.close()
class SessionClient(adapter.Adapter, _BaseHTTPClient):
def __init__(self, session, **kwargs):
kwargs.setdefault('user_agent', USER_AGENT)
kwargs.setdefault('service_type', 'image')
super(SessionClient, self).__init__(session, **kwargs)
def request(self, url, method, **kwargs):
headers = kwargs.pop('headers', {})
if self.global_request_id:
headers.setdefault(REQ_ID_HEADER, self.global_request_id)
kwargs['raise_exc'] = False
data = self._set_common_request_kwargs(headers, kwargs)
try:
# NOTE(pumaranikar): To avoid bug #1641239, no modification of
# headers should be allowed after encode_headers() is called.
resp = super(SessionClient,
self).request(url,
method,
headers=encode_headers(headers),
data=data,
**kwargs)
except ksa_exc.ConnectTimeout as e:
conn_url = self.get_endpoint(auth=kwargs.get('auth'))
conn_url = "%s/%s" % (conn_url.rstrip('/'), url.lstrip('/'))
message = ("Error communicating with %(url)s %(e)s" %
dict(url=conn_url, e=e))
raise exc.InvalidEndpoint(message=message)
except ksa_exc.ConnectFailure as e:
conn_url = self.get_endpoint(auth=kwargs.get('auth'))
conn_url = "%s/%s" % (conn_url.rstrip('/'), url.lstrip('/'))
message = ("Error finding address for %(url)s: %(e)s" %
dict(url=conn_url, e=e))
raise exc.CommunicationError(message=message)
return self._handle_response(resp)
def get_http_client(endpoint=None, session=None, **kwargs):
if session:
return SessionClient(session, **kwargs)
elif endpoint:
return HTTPClient(endpoint, **kwargs)
else:
raise AttributeError('Constructing a client must contain either an '
'endpoint or a session')
|
|
import imp
import re
import sys
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext_lazy as _
from djblets.util.filesystem import is_exe_in_path
from reviewboard.scmtools import sshutils
from reviewboard.scmtools.errors import AuthenticationError, \
BadHostKeyError, \
UnknownHostKeyError, \
UnverifiedCertificateError
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.site.validation import validate_review_groups, validate_users
class RepositoryForm(forms.ModelForm):
"""
A specialized form for RepositoryAdmin that makes the "password"
field use a PasswordInput widget.
"""
# NOTE: The list of fields must match that of the corresponding
# bug tracker (not including the hosting_ and bug_tracker_
# prefixes), for hosting services matching bug trackers.
HOSTING_SERVICE_INFO = SortedDict([
('bitbucket', {
'label': _('Bitbucket'),
'fields': ['hosting_project_name', 'hosting_owner'],
'tools': {
'Mercurial': {
'path': 'http://bitbucket.org/%(hosting_owner)s/'
'%(hosting_project_name)s/',
'mirror_path': 'ssh://[email protected]/'
'%(hosting_owner)s/'
'%(hosting_project_name)s/'
},
},
}),
('fedorahosted', {
'label': _('Fedora Hosted'),
'fields': ['hosting_project_name'],
'hidden_fields': ['raw_file_url', 'username', 'password'],
'tools': {
'Git': {
'path': 'git://git.fedorahosted.org/git/'
'%(hosting_project_name)s.git',
'mirror_path': 'git://git.fedorahosted.org/git/'
'%(hosting_project_name)s.git',
'raw_file_url': 'http://git.fedorahosted.org/git/?p='
'%(hosting_project_name)s.git;'
'a=blob_plain;'
'f=<filename>;h=<revision>'
},
'Mercurial': {
'path': 'http://hg.fedorahosted.org/hg/'
'%(hosting_project_name)s/',
'mirror_path': 'https://hg.fedorahosted.org/hg/'
'%(hosting_project_name)s/'
},
'Subversion': {
'path': 'http://svn.fedorahosted.org/svn/'
'%(hosting_project_name)s/',
'mirror_path': 'https://svn.fedorahosted.org/svn/'
'%(hosting_project_name)s/',
},
},
}),
('github', {
'label': _('GitHub'),
'fields': ['hosting_project_name', 'hosting_owner'],
'hidden_fields': ['raw_file_url','username', 'password'],
'tools': {
'Git': {
'path': 'git://github.com/%(hosting_owner)s/'
'%(hosting_project_name)s.git',
'mirror_path': '[email protected]:%(hosting_owner)s/'
'%(hosting_project_name)s.git',
'raw_file_url': 'http://github.com/api/v2/yaml/blob/show/'
'%(hosting_owner)s/'
'%(hosting_project_name)s/'
'<revision>'
},
},
}),
('github-private', {
'label': _('GitHub (Private)'),
'fields': ['hosting_project_name', 'hosting_owner', 'api_token'],
'hidden_fields': ['raw_file_url', 'username', 'password'],
'tools': {
'Git': {
'path': '[email protected]:%(hosting_owner)s/'
'%(hosting_project_name)s.git',
'mirror_path': '',
'raw_file_url': 'http://github.com/api/v2/yaml/blob/show/'
'%(hosting_owner)s/'
'%(hosting_project_name)s/'
'<revision>'
'?login=%(hosting_owner)s'
'&token=%(api_token)s'
},
},
}),
('github-private-org', {
'label': _('GitHub (Private Organization)'),
'fields': ['hosting_project_name', 'hosting_owner', 'api_token',
'username'],
'hidden_fields': ['raw_file_url', 'password'],
'tools': {
'Git': {
'path': '[email protected]:%(hosting_owner)s/'
'%(hosting_project_name)s.git',
'mirror_path': '',
'raw_file_url': 'http://github.com/api/v2/yaml/blob/show/'
'%(hosting_owner)s/'
'%(hosting_project_name)s/'
'<revision>'
'?login=%(username)s'
'&token=%(api_token)s'
},
},
}),
('gitorious', {
'label': _('Gitorious'),
'fields': ['project_slug', 'repository_name'],
'hidden_fields': ['raw_file_url','username', 'password'],
'tools': {
'Git': {
'path': 'git://gitorious.org/%(project_slug)s/'
'%(repository_name)s.git',
'mirror_path': 'http://git.gitorious.org/%(project_slug)s/'
'%(repository_name)s.git',
'raw_file_url': 'http://git.gitorious.org/%(project_slug)s/'
'%(repository_name)s/blobs/raw/<revision>'
},
},
}),
('googlecode', {
'label': _('Google Code'),
'fields': ['hosting_project_name'],
'tools': {
'Mercurial': {
'path': 'http://%(hosting_project_name)s'
'.googlecode.com/hg',
'mirror_path': 'https://%(hosting_project_name)s'
'.googlecode.com/hg',
},
'Subversion': {
'path': 'http://%(hosting_project_name)s'
'.googlecode.com/svn',
'mirror_path': 'https://%(hosting_project_name)s'
'.googlecode.com/svn',
},
},
}),
('sourceforge', {
'label': _('SourceForge'),
'fields': ['hosting_project_name'],
'tools': {
'Bazaar': {
'path': 'bzr://%(hosting_project_name)s'
'.bzr.sourceforge.net/bzrroot/'
'%(hosting_project_name)s',
'mirror_path': 'bzr+ssh://%(hosting_project_name)s'
'.bzr.sourceforge.net/bzrroot/'
'%(hosting_project_name)s',
},
'CVS': {
'path': ':pserver:anonymous@%(hosting_project_name)s'
'.cvs.sourceforge.net:/cvsroot/'
'%(hosting_project_name)s',
'mirror_path': '%(hosting_project_name)s'
'.cvs.sourceforge.net/cvsroot/'
'%(hosting_project_name)s',
},
'Mercurial': {
'path': 'http://%(hosting_project_name)s'
'.hg.sourceforge.net:8000/hgroot/'
'%(hosting_project_name)s',
'mirror_path': 'ssh://%(hosting_project_name)s'
'.hg.sourceforge.net/hgroot/'
'%(hosting_project_name)s',
},
'Subversion': {
'path': 'http://%(hosting_project_name)s'
'.svn.sourceforge.net/svnroot/'
'%(hosting_project_name)s',
'mirror_path': 'https://%(hosting_project_name)s'
'.svn.sourceforge.net/svnroot/'
'%(hosting_project_name)s',
},
# TODO: Support Git
},
}),
('custom', {
'label': _('Custom'),
'fields': ['path', 'mirror_path'],
}),
])
BUG_TRACKER_INFO = SortedDict([
('none', {
'label': _('None'),
'fields': [],
'format': '',
}),
('bitbucket', {
'label': 'Bitbucket',
'fields': ['bug_tracker_project_name', 'bug_tracker_owner'],
'format': 'http://bitbucket.org/%(bug_tracker_owner)s/'
'%(bug_tracker_project_name)s/issue/%%s/',
}),
('bugzilla', {
'label': 'Bugzilla',
'fields': ['bug_tracker_base_url'],
'format': '%(bug_tracker_base_url)s/show_bug.cgi?id=%%s',
}),
('fedorahosted', {
'label': 'Fedora Hosted',
'fields': ['bug_tracker_project_name'],
'format': 'https://fedorahosted.org/%(bug_tracker_project_name)s'
'/ticket/%%s',
}),
('github', {
'label': 'GitHub',
'fields': ['bug_tracker_project_name', 'bug_tracker_owner'],
'format': 'http://github.com/%(bug_tracker_owner)s/'
'%(bug_tracker_project_name)s/issues#issue/%%s',
}),
('github-private', {
'label': 'GitHub (Private)',
'fields': ['bug_tracker_project_name', 'bug_tracker_owner'],
'format': 'http://github.com/%(bug_tracker_owner)s/'
'%(bug_tracker_project_name)s/issues#issue/%%s',
}),
('googlecode', {
'label': 'Google Code',
'fields': ['bug_tracker_project_name'],
'format': 'http://code.google.com/p/%(bug_tracker_project_name)s/'
'issues/detail?id=%%s',
}),
('redmine', {
'label': 'Redmine',
'fields': ['bug_tracker_base_url'],
'format': '%(bug_tracker_base_url)s/issues/%%s',
}),
('sourceforge', {
'label': 'SourceForge',
'fields': [],
'format': 'http://sourceforge.net/support/tracker.php?aid=%%s',
}),
('trac', {
'label': 'Trac',
'fields': ['bug_tracker_base_url'],
'format': '%(bug_tracker_base_url)s/ticket/%%s',
}),
('custom', {
'label': _('Custom'),
'fields': ['bug_tracker'],
'format': '%(bug_tracker)s',
}),
])
HOSTING_FIELDS = [
"path", "mirror_path", "hosting_owner", "hosting_project_name",
"api_token", "project_slug", "repository_name",
]
BUG_TRACKER_FIELDS = [
"bug_tracker_base_url", "bug_tracker_owner",
"bug_tracker_project_name", "bug_tracker",
]
FORMAT_STR_RE = re.compile(r'%\(([A-Za-z0-9_-]+)\)s')
# Host trust state
reedit_repository = forms.BooleanField(
label=_("Re-edit repository"),
required=False)
trust_host = forms.BooleanField(
label=_("I trust this host"),
required=False)
# Fields
hosting_type = forms.ChoiceField(
label=_("Hosting service"),
required=True,
choices=[(service_id, info['label'])
for service_id, info in HOSTING_SERVICE_INFO.iteritems()],
initial="custom")
hosting_owner = forms.CharField(
label=_("Project's owner"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
hosting_project_name = forms.CharField(
label=_("Project name"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
project_slug = forms.CharField(
label=_("Project slug"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
repository_name = forms.CharField(
label=_("Repository name"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
api_token = forms.CharField(
label=_("API token"),
max_length=128,
required=False,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_('The API token provided by the hosting service. This is '
'needed in order to access files on this repository. '
'On GitHub, you can find this on your '
'<a href="http://github.com/account">Account</a> page '
'under "Account Admin."'))
tool = forms.ModelChoiceField(
label=_("Repository type"),
required=True,
empty_label=None,
queryset=Tool.objects.all())
bug_tracker_use_hosting = forms.BooleanField(
label=_("Use hosting service's bug tracker"),
required=False)
bug_tracker_type = forms.ChoiceField(
label=_("Type"),
required=True,
choices=[(tracker_id, info['label'])
for tracker_id, info in BUG_TRACKER_INFO.iteritems()],
initial="none")
bug_tracker_owner = forms.CharField(
label=_("Bug Tracker's owner"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
bug_tracker_project_name = forms.CharField(
label=_("Project name"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '30'}))
bug_tracker_base_url = forms.CharField(
label=_("Bug tracker URL"),
max_length=256,
required=False,
widget=forms.TextInput(attrs={'size': '60'}),
help_text=_("This should be the path to the bug tracker for this "
"repository."))
def __init__(self, *args, **kwargs):
super(RepositoryForm, self).__init__(*args, **kwargs)
self.hostkeyerror = None
self.certerror = None
self.userkeyerror = None
local_site_name = None
if self.instance and self.instance.local_site:
local_site_name = self.instance.local_site.name
elif self.fields['local_site'].initial:
local_site_name = self.fields['local_site'].initial.name
self.public_key = \
sshutils.get_public_key(sshutils.get_user_key(local_site_name))
self._populate_hosting_service_fields()
self._populate_bug_tracker_fields()
def _populate_hosting_service_fields(self):
if (not self.instance or
not self.instance.path):
return
tool_name = self.instance.tool.name
for service_id, info in self.HOSTING_SERVICE_INFO.iteritems():
if (service_id == 'custom' or tool_name not in info['tools']):
continue
field_info = info['tools'][tool_name]
is_path_match, field_data = \
self._match_url(self.instance.path,
field_info['path'],
info['fields'])
if not is_path_match:
continue
if not self._match_url(self.instance.mirror_path,
field_info['mirror_path'], [])[0]:
continue
if 'raw_file_url' in field_info:
is_raw_match, raw_field_data = \
self._match_url(self.instance.raw_file_url,
field_info['raw_file_url'],
info['fields'])
if not is_raw_match:
continue
field_data.update(raw_field_data)
# It all matched.
self.fields['hosting_type'].initial = service_id
for key, value in field_data.iteritems():
self.fields[key].initial = value
break
def _populate_bug_tracker_fields(self):
if not self.instance or not self.instance.bug_tracker:
return
for tracker_id, info in self.BUG_TRACKER_INFO.iteritems():
if tracker_id == 'none':
continue
is_match, field_data = \
self._match_url(self.instance.bug_tracker,
info['format'], info['fields'])
if is_match:
self.fields['bug_tracker_type'].initial = tracker_id
for key, value in field_data.iteritems():
self.fields[key].initial = value
# Figure out whether this matches the hosting service.
if tracker_id == self.fields['hosting_type'].initial:
is_match = True
for field in info['fields']:
hosting_field = field.replace("bug_tracker_",
"hosting_")
if (self.fields[hosting_field].initial !=
self.fields[field].initial):
is_match = False
break
if is_match:
self.fields['bug_tracker_use_hosting'].initial = True
break
def _clean_hosting_info(self):
hosting_type = self.cleaned_data['hosting_type']
if hosting_type == 'custom':
return
# Should be caught during validation.
assert hosting_type in self.HOSTING_SERVICE_INFO
info = self.HOSTING_SERVICE_INFO[hosting_type]
tool_name = self.cleaned_data['tool'].name
assert tool_name in info['tools']
field_data = {}
for field in info['fields']:
field_data[field] = self.cleaned_data[field]
for field, value in info['tools'][tool_name].iteritems():
self.cleaned_data[field] = value % field_data
self.data[field] = value % field_data
def _clean_bug_tracker_info(self):
use_hosting = self.cleaned_data['bug_tracker_use_hosting']
bug_tracker_type = self.cleaned_data['bug_tracker_type']
if bug_tracker_type == 'none' and not use_hosting:
self.instance.bug_tracker = ""
return
if use_hosting:
match_type = self.cleaned_data['hosting_type']
else:
match_type = bug_tracker_type
assert match_type in self.BUG_TRACKER_INFO
info = self.BUG_TRACKER_INFO[match_type]
field_data = {}
for field in info['fields']:
src_field = field
if use_hosting:
src_field = src_field.replace("bug_tracker_", "hosting_")
field_data[field] = self.cleaned_data[src_field]
bug_tracker_url = info['format'] % field_data
self.cleaned_data['bug_tracker'] = bug_tracker_url
self.data['bug_tracker'] = bug_tracker_url
def full_clean(self):
if self.data:
hosting_type = (self['hosting_type'].data or
self.fields['hosting_type'].initial)
use_hosting = (self['bug_tracker_use_hosting'].data or
self.fields['bug_tracker_use_hosting'].initial)
self.fields['path'].required = (hosting_type == "custom")
self.fields['bug_tracker_type'].required = not use_hosting
return super(RepositoryForm, self).full_clean()
def clean(self):
"""
Performs validation on the form.
This will check the form fields for errors, calling out to the
various clean_* methods.
It will check the repository path to see if it represents
a valid repository and if an SSH key or HTTPS certificate needs
to be verified.
This will also build repository and bug tracker URLs based on other
fields set in the form.
"""
self._clean_hosting_info()
self._clean_bug_tracker_info()
validate_review_groups(self)
validate_users(self)
if not self.cleaned_data['reedit_repository']:
self._verify_repository_path()
return super(RepositoryForm, self).clean()
def clean_path(self):
return self.cleaned_data['path'].strip()
def clean_mirror_path(self):
return self.cleaned_data['mirror_path'].strip()
def clean_bug_tracker_base_url(self):
data = self.cleaned_data['bug_tracker_base_url']
return data.rstrip("/")
def clean_tool(self):
"""
Checks the SCMTool used for this repository for dependencies.
If one or more dependencies aren't found, they will be presented
as validation errors.
"""
tool = self.cleaned_data['tool']
scmtool_class = tool.get_scmtool_class()
errors = []
for dep in scmtool_class.dependencies.get('modules', []):
try:
imp.find_module(dep)
except ImportError:
errors.append('The Python module "%s" is not installed.'
'You may need to restart the server '
'after installing it.' % dep)
for dep in scmtool_class.dependencies.get('executables', []):
if not is_exe_in_path(dep):
if sys.platform == 'win32':
exe_name = '%s.exe' % dep
else:
exe_name = dep
errors.append('The executable "%s" is not in the path.' %
exe_name)
if errors:
raise forms.ValidationError(errors)
return tool
def is_valid(self):
"""
Returns whether or not the form is valid.
This will return True if the form fields are all valid, if there's
no certificate error, host key error, and if the form isn't
being re-displayed after canceling an SSH key or HTTPS certificate
verification.
"""
return (super(RepositoryForm, self).is_valid() and
not self.hostkeyerror and
not self.certerror and
not self.userkeyerror and
not self.cleaned_data['reedit_repository'])
def _match_url(self, url, format, fields):
"""
Matches a URL against a format string.
This will determine if the URL can be represented by the format
string. If so, the URL will parsed for the list of fields and
returned.
The result is in the form of (bool, field_dict).
"""
def replace_match_group(m):
name = m.group(1)
if name in found_groups:
return r'(?P=%s)' % name
else:
found_groups[name] = True
return r'(?P<%s>[A-Za-z0-9:/._-]+)' % name
# First, transform our Python format-style pattern to a regex.
pattern = format.replace("%%s", "%s")
pattern = pattern.replace("?", "\?")
pattern = pattern.replace("+", "\+")
# A list of match groups to replace that we've already found.
# re.sub will get angry if it sees two with the same name.
found_groups = {}
pattern = self.FORMAT_STR_RE.sub(replace_match_group, pattern)
m = re.match(pattern, url)
if not m:
return False, {}
field_data = {}
for field in fields:
try:
field_data[field] = m.group(field)
except IndexError:
pass
return True, field_data
def _verify_repository_path(self):
"""
Verifies the repository path to check if it's valid.
This will check if the repository exists and if an SSH key or
HTTPS certificate needs to be verified.
"""
tool = self.cleaned_data.get('tool', None)
if not tool:
# This failed validation earlier, so bail.
return
scmtool_class = tool.get_scmtool_class()
path = self.cleaned_data['path']
username = self.cleaned_data['username']
password = self.cleaned_data['password']
local_site_name = None
if self.cleaned_data['local_site']:
try:
local_site = self.cleaned_data['local_site']
local_site_name = local_site.name
except LocalSite.DoesNotExist, e:
raise forms.ValidationError(e)
while 1:
# Keep doing this until we have an error we don't want
# to ignore, or it's successful.
try:
scmtool_class.check_repository(path, username, password,
local_site_name)
# Success.
break
except BadHostKeyError, e:
if self.cleaned_data['trust_host']:
try:
sshutils.replace_host_key(e.hostname,
e.raw_expected_key,
e.raw_key,
local_site_name)
except IOError, e:
raise forms.ValidationError(e)
else:
self.hostkeyerror = e
break
except UnknownHostKeyError, e:
if self.cleaned_data['trust_host']:
try:
sshutils.add_host_key(e.hostname, e.raw_key,
local_site_name)
except IOError, e:
raise forms.ValidationError(e)
else:
self.hostkeyerror = e
break
except UnverifiedCertificateError, e:
if self.cleaned_data['trust_host']:
try:
scmtool_class.accept_certificate(path, local_site_name)
except IOError, e:
raise forms.ValidationError(e)
else:
self.certerror = e
break
except AuthenticationError, e:
if 'publickey' in e.allowed_types and e.user_key is None:
self.userkeyerror = e
break
raise forms.ValidationError(e)
except Exception, e:
raise forms.ValidationError(e)
class Meta:
model = Repository
widgets = {
'path': forms.TextInput(attrs={'size': '60'}),
'mirror_path': forms.TextInput(attrs={'size': '60'}),
'raw_file_url': forms.TextInput(attrs={'size': '60'}),
'bug_tracker': forms.TextInput(attrs={'size': '60'}),
'username': forms.TextInput(attrs={'size': '30',
'autocomplete': 'off'}),
'password': forms.PasswordInput(attrs={'size': '30',
'autocomplete': 'off'}),
'users': FilteredSelectMultiple(_('users with access'), False),
'review_groups': FilteredSelectMultiple(
_('review groups with access'), False),
}
|
|
#!/usr/bin/env python
'''
The reporter module is in charge of producing the HTML Report as well as
provide plugins with common HTML Rendering functions
'''
import cgi
import codecs
from tornado.template import Template, Loader
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.dependency_management.interfaces import ReporterInterface
from framework.lib.general import *
from framework.interface.html.filter import sanitiser
class Reporter(BaseComponent, ReporterInterface):
COMPONENT_NAME = "reporter"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.resource = self.get_component("resource")
self.transaction = self.get_component("transaction")
self.plugin_handler = self.get_component("plugin_handler")
self.requester = None
self.Init = False
self.Sanitiser = sanitiser.HTMLSanitiser()
self.Loader = Loader(self.config.FrameworkConfigGet('POUTPUT_TEMPLATES_DIR'))
self.mNumLinesToShow = 15
self.CounterList = []
def init(self):
self.requester = self.get_component("requester")
def TransactionTableFromIDs(self, TransactionIDs, NumLinesReq=15, NumLinesRes=15):
""" Draws a table of HTTP Transactions """
# functions to get the first lines of a long string
transactions = self.transaction.GetByIDs(TransactionIDs)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None):
transaction = self.requester.GetTransaction(
UseCache, URL, method=Method, data=Data)
return self.TransactionTableForTransactions([transaction])
def TransactionTableForURLList(
self,
UseCache,
URLList,
Method=None,
Data=None):
transactions = self.requester.GetTransactions(
UseCache,
URLList,
method=Method,
data=Data)
return self.TransactionTableForTransactions(transactions)
def TransactionTableForTransactions(self, Transactions):
return self.Loader.load("transaction_table.html").generate(TransactionList=Transactions)
def unicode(self, *args):
try:
return unicode(*args)
except TypeError:
return args[0] # Input is already Unicode
def sanitize_html(self, RawHTML):
return self.Sanitiser.CleanThirdPartyHTML(RawHTML)
def reset_loader(self):
return self.Loader.reset()
#----------------------------------- Methods exported from plugin_helper.py ---------------------------------
def CommandTable(self, Command):
return self.Loader.load("command_table.html").generate(Command=Command)
def LinkList(self, LinkListName, Links):
"""
Wrapper to allow rendering a bunch of links -without name- as resource
links with name = link
"""
return self.Loader.load("link_list.html").generate(
LinkListName=LinkListName,
Links=Links)
def ResourceLinkList(self, ResourceListName, ResourceList):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
return self.Loader.load("resource_link_list.html").generate(
ResourceListName=ResourceListName,
ResourceList=ResourceList)
def TabbedResourceLinkList(self, ResourcesList):
"""
ResourceList = [
"ResourceListName", [["Name1","Resource1"],["Name2","Resource2"]]
]
"""
TabData = []
Resources = []
for ResourceListName, ResourceList in ResourcesList:
TabID = ResourceListName.replace(' ', '_')
TabData.append([ResourceListName, TabID])
Resources.append([TabID, ResourceList])
return self.Loader.load("tabbed_resource_link_list.html").generate(
TabData=TabData,
Resources=Resources)
def ListPostProcessing(self, ResourceListName, LinkList, HTMLLinkList):
return self.Loader.load("list_post_processing.html").generate(
ResourceListName=ResourceListName,
LinkList=LinkList,
HTMLLinkList=HTMLLinkList)
def RequestLinkList(self, ResourceListName, LinkList):
return self.Loader.load("request_link_list.html").generate(
ResourceListName=ResourceListName,
LinkList=LinkList)
def VulnerabilitySearchBox(self, SearchStr):
"""
Draws an HTML Search box for defined Vuln Search resources
"""
VulnSearchResources = self.resource.GetResources('VulnSearch')
return self.Loader.load("vulnerability_search_box.html").generate(
SearchStr=SearchStr,
VulnSearchResources=VulnSearchResources)
def SuggestedCommandBox(
self,
PluginOutputDir,
CommandCategoryList,
Header=''):
"""
Draws HTML tabs for a list of TabName => Resource Group (i.e. how to run hydra, etc)
"""
TitleList = []
CommandList = []
for item in CommandCategoryList:
TitleList.append(item[0])
CommandList.append(self.resource.GetResources(item[1]))
return self.Loader.load("suggested_command_box.html").generate(
Header=Header,
TitleList=TitleList,
CommandList=CommandList) # TODO: Fix up the plugin
def CommandDump(
self,
Name,
CommandIntro,
ModifiedCommand,
RelativeFilePath,
OutputIntro,
TimeStr):
AbsPath = self.plugin_handler.RetrieveAbsPath(RelativeFilePath)
OutputLines = open(AbsPath, "r").readlines()
longOutput = (len(OutputLines) > self.mNumLinesToShow)
if (len(OutputLines) > self.mNumLinesToShow):
OutputLines = ''.join(OutputLines[0:self.mNumLinesToShow])
else:
OutputLines = ''.join(OutputLines)
table_vars = {
"Name": Name,
"CommandIntro": CommandIntro,
"ModifiedCommand": ModifiedCommand,
"FilePath": RelativeFilePath,
"OutputIntro": OutputIntro,
"OutputLines": OutputLines,
"TimeStr": TimeStr,
"mNumLinesToShow": self.mNumLinesToShow,
"longOutput": longOutput
}
return self.Loader.load("command_dump.html").generate(**table_vars)
def URLsFromStr(self, TimeStr, VisitURLs, URLList, NumFound):
html_content = self.Loader.load("urls_from_str.html").generate(
TimeStr=TimeStr,
VisitURLs=VisitURLs,
NumURLs=len(URLList),
NumFound=NumFound)
if URLList:
html_content += self.LinkList("URLs Scraped", URLList)
return html_content
def Robots(
self,
NotStr,
NumLines,
NumAllow,
NumDisallow,
NumSitemap,
SavePath,
EntriesList,
NumAddedURLs):
vars = {
"robots_found": NotStr,
"num_lines": NumLines,
"num_allow": NumAllow,
"num_disallow": NumDisallow,
"num_sitemap": NumSitemap,
"save_path": SavePath,
}
TestResult = self.Loader.load("robots.html").generate(**vars)
# robots.txt contains some entries, show browsable list! :)
if NumDisallow > 0 or NumAllow > 0 or NumSitemap > 0:
for Display, Links in EntriesList:
if Links: # Filters empty lists
TestResult += self.ResourceLinkList(Display, Links)
return TestResult
def HtmlString(self, String):
return(String)
# ---------------------- Grep Plugin Outputs -------------------- #
def ResponseBodyMatches(self, ResponseRegexpName):
RegexpName, GrepOutputs, TransactionIDS, match_percent = self.transaction.SearchByRegexName(ResponseRegexpName, stats=True)
variables = {
"name": RegexpName.replace("RESPONSE_REGEXP_FOR_", "").replace(
'_', ' '),
"matches": GrepOutputs,
"transaction_ids": TransactionIDS,
"match_percent": match_percent
}
return self.Loader.load("response_matches.html").generate(**variables)
def ResponseHeaderMatches(self, HeaderRegexpName):
return self.ResearchHeaders(HeaderRegexpName)[0]
def ResearchHeaders(self, RegexName):
regex_name, grep_outputs, transaction_ids, match_percent = self.transaction.SearchByRegexName(RegexName, stats=True)
# [[unique_matches, matched_transactions, matched_percentage]]
return [self.Loader.load("header_searches.html").generate(
match_percent=match_percent,
matches=grep_outputs,
transaction_ids=transaction_ids), grep_outputs]
def FingerprintData(self):
HeaderTable, matches = self.ResearchHeaders('HEADERS_FOR_FINGERPRINT')
for item in matches:
# Add Vulnerability search boxes after table
HeaderTable += self.VulnerabilitySearchBox(item[1])
return HeaderTable
def TopTransactionsBySpeed(self, Order):
transactions = self.transaction.GetTopTransactionsBySpeed(Order)
return self.TransactionTableForTransactions(transactions)
def CookieAttributeAnalysis(self, CookieValueList, Header2TransacDict):
vars = {
"Cookies": [{
"Name": Cookie.split('=')[0],
"Link": Header2TransacDict[self.config.Get('HEADERS_FOR_COOKIES').lower() + Cookie],
"Attribs": Cookie.replace(Cookie.split('=')[0] + "=", "").replace("; ", ";").split(";"),
} for Cookie in CookieValueList],
}
Table = self.Render.CreateTable({'class': 'report_intro'})
SetCookie = self.config.Get('HEADERS_FOR_COOKIES').lower()
PossibleCookieAttributes = self.config.Get(
'COOKIE_ATTRIBUTES').split(',')
for Cookie in CookieValueList:
CookieName = Cookie.split('=')[0]
CookieLink = self.Render.DrawButtonLink(cgi.escape( CookieName ), Header2TransacDict[SetCookie + Cookie] )
CookieAttribs = Cookie.replace(CookieName + "=", "").replace("; ", ";" ).split( ";" )
#Table.CreateRow(["Cookie: "+CookieLink], True, { 'colspan' : '2' })
Table.CreateCustomRow( '<tr><th colspan="2">' + "Cookie: " + CookieLink + '</th></tr>' )
Table.CreateRow( ['Attribute', 'Value'], True )
#Table += "<th colspan='2'>Cookie: "+CookieLink+"</th>"
#Table += self..DrawTableRow(['Attribute', 'Value'], True)
NotFoundStr = "<b>Not Found</b>"
if CookieAttribs[0]:
CookieValue = CookieAttribs[0]
else:
CookieValue = NotFoundStr
Table.CreateRow( ['Value', CookieValue] )
#Table += self..DrawTableRow(['Value', ])
for Attrib in PossibleCookieAttributes:
DisplayAttribute = NotFoundStr
for PresentAttrib in CookieAttribs:
if PresentAttrib.lower().startswith( Attrib.lower() ): # Avoid false positives due to cookie contents
DisplayAttribute = PresentAttrib
break
Table.CreateRow( [Attrib, DisplayAttribute] )
#Table += self..DrawTableRow([Attrib, DisplayAttribute])
if Table.GetNumRows() == 0:
return "" # No Attributes found
return "<h3>Cookie Attribute Analysis</h3>" + Table.Render()
#Table = "<h3>Cookie Attribute Analysis</h3><table class='report_intro'>"+Table+"</table>"
#return Table
|
|
#! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.descriptor_pool."""
__author__ = '[email protected] (Matt Toia)'
import os
import sys
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import descriptor_pool_test1_pb2
from google.protobuf.internal import descriptor_pool_test2_pb2
from google.protobuf.internal import factory_test1_pb2
from google.protobuf.internal import factory_test2_pb2
from google.protobuf.internal import file_options_test_pb2
from google.protobuf.internal import more_messages_pb2
from google.protobuf import descriptor
from google.protobuf import descriptor_database
from google.protobuf import descriptor_pool
from google.protobuf import message_factory
from google.protobuf import symbol_database
class DescriptorPoolTest(unittest.TestCase):
def setUp(self):
# TODO(jieluo): Should make the pool which is created by
# serialized_pb same with generated pool.
# TODO(jieluo): More test coverage for the generated pool.
self.pool = descriptor_pool.DescriptorPool()
self.factory_test1_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test1_pb2.DESCRIPTOR.serialized_pb)
self.factory_test2_fd = descriptor_pb2.FileDescriptorProto.FromString(
factory_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(self.factory_test1_fd)
self.pool.Add(self.factory_test2_fd)
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
self.pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
def testFindFileByName(self):
name1 = 'google/protobuf/internal/factory_test1.proto'
file_desc1 = self.pool.FindFileByName(name1)
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual(name1, file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
name2 = 'google/protobuf/internal/factory_test2.proto'
file_desc2 = self.pool.FindFileByName(name2)
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual(name2, file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
def testFindFileByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileByName('Does not exist')
def testFindFileContainingSymbol(self):
file_desc1 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(file_desc1, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test1.proto',
file_desc1.name)
self.assertEqual('google.protobuf.python.internal', file_desc1.package)
self.assertIn('Factory1Message', file_desc1.message_types_by_name)
file_desc2 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(file_desc2, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc2.name)
self.assertEqual('google.protobuf.python.internal', file_desc2.package)
self.assertIn('Factory2Message', file_desc2.message_types_by_name)
# Tests top level extension.
file_desc3 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
self.assertIsInstance(file_desc3, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc3.name)
# Tests nested extension inside a message.
file_desc4 = self.pool.FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertIsInstance(file_desc4, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/internal/factory_test2.proto',
file_desc4.name)
file_desc5 = self.pool.FindFileContainingSymbol(
'protobuf_unittest.TestService')
self.assertIsInstance(file_desc5, descriptor.FileDescriptor)
self.assertEqual('google/protobuf/unittest.proto',
file_desc5.name)
# Tests the generated pool.
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.Factory2Message.one_more_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'google.protobuf.python.internal.another_field')
assert descriptor_pool.Default().FindFileContainingSymbol(
'protobuf_unittest.TestService')
def testFindFileContainingSymbolFailure(self):
with self.assertRaises(KeyError):
self.pool.FindFileContainingSymbol('Does not exist')
def testFindMessageTypeByName(self):
msg1 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
self.assertIsInstance(msg1, descriptor.Descriptor)
self.assertEqual('Factory1Message', msg1.name)
self.assertEqual('google.protobuf.python.internal.Factory1Message',
msg1.full_name)
self.assertEqual(None, msg1.containing_type)
self.assertFalse(msg1.has_options)
nested_msg1 = msg1.nested_types[0]
self.assertEqual('NestedFactory1Message', nested_msg1.name)
self.assertEqual(msg1, nested_msg1.containing_type)
nested_enum1 = msg1.enum_types[0]
self.assertEqual('NestedFactory1Enum', nested_enum1.name)
self.assertEqual(msg1, nested_enum1.containing_type)
self.assertEqual(nested_msg1, msg1.fields_by_name[
'nested_factory_1_message'].message_type)
self.assertEqual(nested_enum1, msg1.fields_by_name[
'nested_factory_1_enum'].enum_type)
msg2 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
self.assertIsInstance(msg2, descriptor.Descriptor)
self.assertEqual('Factory2Message', msg2.name)
self.assertEqual('google.protobuf.python.internal.Factory2Message',
msg2.full_name)
self.assertIsNone(msg2.containing_type)
nested_msg2 = msg2.nested_types[0]
self.assertEqual('NestedFactory2Message', nested_msg2.name)
self.assertEqual(msg2, nested_msg2.containing_type)
nested_enum2 = msg2.enum_types[0]
self.assertEqual('NestedFactory2Enum', nested_enum2.name)
self.assertEqual(msg2, nested_enum2.containing_type)
self.assertEqual(nested_msg2, msg2.fields_by_name[
'nested_factory_2_message'].message_type)
self.assertEqual(nested_enum2, msg2.fields_by_name[
'nested_factory_2_enum'].enum_type)
self.assertTrue(msg2.fields_by_name['int_with_default'].has_default_value)
self.assertEqual(
1776, msg2.fields_by_name['int_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['double_with_default'].has_default_value)
self.assertEqual(
9.99, msg2.fields_by_name['double_with_default'].default_value)
self.assertTrue(
msg2.fields_by_name['string_with_default'].has_default_value)
self.assertEqual(
'hello world', msg2.fields_by_name['string_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['bool_with_default'].has_default_value)
self.assertFalse(msg2.fields_by_name['bool_with_default'].default_value)
self.assertTrue(msg2.fields_by_name['enum_with_default'].has_default_value)
self.assertEqual(
1, msg2.fields_by_name['enum_with_default'].default_value)
msg3 = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Message')
self.assertEqual(nested_msg2, msg3)
self.assertTrue(msg2.fields_by_name['bytes_with_default'].has_default_value)
self.assertEqual(
b'a\xfb\x00c',
msg2.fields_by_name['bytes_with_default'].default_value)
self.assertEqual(1, len(msg2.oneofs))
self.assertEqual(1, len(msg2.oneofs_by_name))
self.assertEqual(2, len(msg2.oneofs[0].fields))
for name in ['oneof_int', 'oneof_string']:
self.assertEqual(msg2.oneofs[0],
msg2.fields_by_name[name].containing_oneof)
self.assertIn(msg2.fields_by_name[name], msg2.oneofs[0].fields)
def testFindMessageTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindMessageTypeByName('Does not exist')
def testFindEnumTypeByName(self):
enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Enum')
self.assertIsInstance(enum1, descriptor.EnumDescriptor)
self.assertEqual(0, enum1.values_by_name['FACTORY_1_VALUE_0'].number)
self.assertEqual(1, enum1.values_by_name['FACTORY_1_VALUE_1'].number)
self.assertFalse(enum1.has_options)
nested_enum1 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory1Message.NestedFactory1Enum')
self.assertIsInstance(nested_enum1, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_0'].number)
self.assertEqual(
1, nested_enum1.values_by_name['NESTED_FACTORY_1_VALUE_1'].number)
enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Enum')
self.assertIsInstance(enum2, descriptor.EnumDescriptor)
self.assertEqual(0, enum2.values_by_name['FACTORY_2_VALUE_0'].number)
self.assertEqual(1, enum2.values_by_name['FACTORY_2_VALUE_1'].number)
nested_enum2 = self.pool.FindEnumTypeByName(
'google.protobuf.python.internal.Factory2Message.NestedFactory2Enum')
self.assertIsInstance(nested_enum2, descriptor.EnumDescriptor)
self.assertEqual(
0, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_0'].number)
self.assertEqual(
1, nested_enum2.values_by_name['NESTED_FACTORY_2_VALUE_1'].number)
def testFindEnumTypeByNameFailure(self):
with self.assertRaises(KeyError):
self.pool.FindEnumTypeByName('Does not exist')
def testFindFieldByName(self):
field = self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory1Message.list_value')
self.assertEqual(field.name, 'list_value')
self.assertEqual(field.label, field.LABEL_REPEATED)
self.assertFalse(field.has_options)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindExtensionByName(self):
# An extension defined in a message.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByName(
'google.protobuf.python.internal.another_field')
self.assertEqual(extension.name, 'another_field')
self.assertEqual(extension.number, 1002)
with self.assertRaises(KeyError):
self.pool.FindFieldByName('Does not exist')
def testFindAllExtensions(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
extensions = self.pool.FindAllExtensions(factory1_message)
expected_extension_numbers = set([one_more_field, another_field])
self.assertEqual(expected_extension_numbers, set(extensions))
# Verify that mutating the returned list does not affect the pool.
extensions.append('unexpected_element')
# Get the extensions again, the returned value does not contain the
# 'unexpected_element'.
extensions = self.pool.FindAllExtensions(factory1_message)
self.assertEqual(expected_extension_numbers, set(extensions))
def testFindExtensionByNumber(self):
factory1_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory1Message')
factory2_message = self.pool.FindMessageTypeByName(
'google.protobuf.python.internal.Factory2Message')
# An extension defined in a message.
one_more_field = factory2_message.extensions_by_name['one_more_field']
self.pool.AddExtensionDescriptor(one_more_field)
# An extension defined at file scope.
factory_test2 = self.pool.FindFileByName(
'google/protobuf/internal/factory_test2.proto')
another_field = factory_test2.extensions_by_name['another_field']
self.pool.AddExtensionDescriptor(another_field)
# An extension defined in a message.
extension = self.pool.FindExtensionByNumber(factory1_message, 1001)
self.assertEqual(extension.name, 'one_more_field')
# An extension defined at file scope.
extension = self.pool.FindExtensionByNumber(factory1_message, 1002)
self.assertEqual(extension.name, 'another_field')
with self.assertRaises(KeyError):
extension = self.pool.FindExtensionByNumber(factory1_message, 1234567)
def testExtensionsAreNotFields(self):
with self.assertRaises(KeyError):
self.pool.FindFieldByName('google.protobuf.python.internal.another_field')
with self.assertRaises(KeyError):
self.pool.FindFieldByName(
'google.protobuf.python.internal.Factory2Message.one_more_field')
with self.assertRaises(KeyError):
self.pool.FindExtensionByName(
'google.protobuf.python.internal.Factory1Message.list_value')
def testFindService(self):
service = self.pool.FindServiceByName('protobuf_unittest.TestService')
self.assertEqual(service.full_name, 'protobuf_unittest.TestService')
def testUserDefinedDB(self):
db = descriptor_database.DescriptorDatabase()
self.pool = descriptor_pool.DescriptorPool(db)
db.Add(self.factory_test1_fd)
db.Add(self.factory_test2_fd)
self.testFindMessageTypeByName()
def testAddSerializedFile(self):
self.pool = descriptor_pool.DescriptorPool()
self.pool.AddSerializedFile(self.factory_test1_fd.SerializeToString())
self.pool.AddSerializedFile(self.factory_test2_fd.SerializeToString())
self.testFindMessageTypeByName()
def testComplexNesting(self):
more_messages_desc = descriptor_pb2.FileDescriptorProto.FromString(
more_messages_pb2.DESCRIPTOR.serialized_pb)
test1_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
test2_desc = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test2_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(more_messages_desc)
self.pool.Add(test1_desc)
self.pool.Add(test2_desc)
TEST1_FILE.CheckFile(self, self.pool)
TEST2_FILE.CheckFile(self, self.pool)
def testEnumDefaultValue(self):
"""Test the default value of enums which don't start at zero."""
def _CheckDefaultValue(file_descriptor):
default_value = (file_descriptor
.message_types_by_name['DescriptorPoolTest1']
.fields_by_name['nested_enum']
.default_value)
self.assertEqual(default_value,
descriptor_pool_test1_pb2.DescriptorPoolTest1.BETA)
# First check what the generated descriptor contains.
_CheckDefaultValue(descriptor_pool_test1_pb2.DESCRIPTOR)
# Then check the generated pool. Normally this is the same descriptor.
file_descriptor = symbol_database.Default().pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
self.assertIs(file_descriptor, descriptor_pool_test1_pb2.DESCRIPTOR)
_CheckDefaultValue(file_descriptor)
# Then check the dynamic pool and its internal DescriptorDatabase.
descriptor_proto = descriptor_pb2.FileDescriptorProto.FromString(
descriptor_pool_test1_pb2.DESCRIPTOR.serialized_pb)
self.pool.Add(descriptor_proto)
# And do the same check as above
file_descriptor = self.pool.FindFileByName(
'google/protobuf/internal/descriptor_pool_test1.proto')
_CheckDefaultValue(file_descriptor)
def testDefaultValueForCustomMessages(self):
"""Check the value returned by non-existent fields."""
def _CheckValueAndType(value, expected_value, expected_type):
self.assertEqual(value, expected_value)
self.assertIsInstance(value, expected_type)
def _CheckDefaultValues(msg):
try:
int64 = long
except NameError: # Python3
int64 = int
try:
unicode_type = unicode
except NameError: # Python3
unicode_type = str
_CheckValueAndType(msg.optional_int32, 0, int)
_CheckValueAndType(msg.optional_uint64, 0, (int64, int))
_CheckValueAndType(msg.optional_float, 0, (float, int))
_CheckValueAndType(msg.optional_double, 0, (float, int))
_CheckValueAndType(msg.optional_bool, False, bool)
_CheckValueAndType(msg.optional_string, u'', unicode_type)
_CheckValueAndType(msg.optional_bytes, b'', bytes)
_CheckValueAndType(msg.optional_nested_enum, msg.FOO, int)
# First for the generated message
_CheckDefaultValues(unittest_pb2.TestAllTypes())
# Then for a message built with from the DescriptorPool.
pool = descriptor_pool.DescriptorPool()
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_public_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_import_pb2.DESCRIPTOR.serialized_pb))
pool.Add(descriptor_pb2.FileDescriptorProto.FromString(
unittest_pb2.DESCRIPTOR.serialized_pb))
message_class = message_factory.MessageFactory(pool).GetPrototype(
pool.FindMessageTypeByName(
unittest_pb2.TestAllTypes.DESCRIPTOR.full_name))
_CheckDefaultValues(message_class())
class ProtoFile(object):
def __init__(self, name, package, messages, dependencies=None,
public_dependencies=None):
self.name = name
self.package = package
self.messages = messages
self.dependencies = dependencies or []
self.public_dependencies = public_dependencies or []
def CheckFile(self, test, pool):
file_desc = pool.FindFileByName(self.name)
test.assertEqual(self.name, file_desc.name)
test.assertEqual(self.package, file_desc.package)
dependencies_names = [f.name for f in file_desc.dependencies]
test.assertEqual(self.dependencies, dependencies_names)
public_dependencies_names = [f.name for f in file_desc.public_dependencies]
test.assertEqual(self.public_dependencies, public_dependencies_names)
for name, msg_type in self.messages.items():
msg_type.CheckType(test, None, name, file_desc)
class EnumType(object):
def __init__(self, values):
self.values = values
def CheckType(self, test, msg_desc, name, file_desc):
enum_desc = msg_desc.enum_types_by_name[name]
test.assertEqual(name, enum_desc.name)
expected_enum_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_enum_full_name, enum_desc.full_name)
test.assertEqual(msg_desc, enum_desc.containing_type)
test.assertEqual(file_desc, enum_desc.file)
for index, (value, number) in enumerate(self.values):
value_desc = enum_desc.values_by_name[value]
test.assertEqual(value, value_desc.name)
test.assertEqual(index, value_desc.index)
test.assertEqual(number, value_desc.number)
test.assertEqual(enum_desc, value_desc.type)
test.assertIn(value, msg_desc.enum_values_by_name)
class MessageType(object):
def __init__(self, type_dict, field_list, is_extendable=False,
extensions=None):
self.type_dict = type_dict
self.field_list = field_list
self.is_extendable = is_extendable
self.extensions = extensions or []
def CheckType(self, test, containing_type_desc, name, file_desc):
if containing_type_desc is None:
desc = file_desc.message_types_by_name[name]
expected_full_name = '.'.join([file_desc.package, name])
else:
desc = containing_type_desc.nested_types_by_name[name]
expected_full_name = '.'.join([containing_type_desc.full_name, name])
test.assertEqual(name, desc.name)
test.assertEqual(expected_full_name, desc.full_name)
test.assertEqual(containing_type_desc, desc.containing_type)
test.assertEqual(desc.file, file_desc)
test.assertEqual(self.is_extendable, desc.is_extendable)
for name, subtype in self.type_dict.items():
subtype.CheckType(test, desc, name, file_desc)
for index, (name, field) in enumerate(self.field_list):
field.CheckField(test, desc, name, index, file_desc)
for index, (name, field) in enumerate(self.extensions):
field.CheckField(test, desc, name, index, file_desc)
class EnumField(object):
def __init__(self, number, type_name, default_value):
self.number = number
self.type_name = type_name
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
enum_desc = msg_desc.enum_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_ENUM, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_ENUM,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(enum_desc.values_by_name[self.default_value].number,
field_desc.default_value)
test.assertFalse(enum_desc.values_by_name[self.default_value].has_options)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(enum_desc, field_desc.enum_type)
test.assertEqual(file_desc, enum_desc.file)
class MessageField(object):
def __init__(self, number, type_name):
self.number = number
self.type_name = type_name
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
field_type_desc = msg_desc.nested_types_by_name[self.type_name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertEqual(msg_desc, field_desc.containing_type)
test.assertEqual(field_type_desc, field_desc.message_type)
test.assertEqual(file_desc, field_desc.file)
class StringField(object):
def __init__(self, number, default_value):
self.number = number
self.default_value = default_value
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.fields_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(index, field_desc.index)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(descriptor.FieldDescriptor.TYPE_STRING, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_STRING,
field_desc.cpp_type)
test.assertTrue(field_desc.has_default_value)
test.assertEqual(self.default_value, field_desc.default_value)
test.assertEqual(file_desc, field_desc.file)
class ExtensionField(object):
def __init__(self, number, extended_type):
self.number = number
self.extended_type = extended_type
def CheckField(self, test, msg_desc, name, index, file_desc):
field_desc = msg_desc.extensions_by_name[name]
test.assertEqual(name, field_desc.name)
expected_field_full_name = '.'.join([msg_desc.full_name, name])
test.assertEqual(expected_field_full_name, field_desc.full_name)
test.assertEqual(self.number, field_desc.number)
test.assertEqual(index, field_desc.index)
test.assertEqual(descriptor.FieldDescriptor.TYPE_MESSAGE, field_desc.type)
test.assertEqual(descriptor.FieldDescriptor.CPPTYPE_MESSAGE,
field_desc.cpp_type)
test.assertFalse(field_desc.has_default_value)
test.assertTrue(field_desc.is_extension)
test.assertEqual(msg_desc, field_desc.extension_scope)
test.assertEqual(msg_desc, field_desc.message_type)
test.assertEqual(self.extended_type, field_desc.containing_type.name)
test.assertEqual(file_desc, field_desc.file)
class AddDescriptorTest(unittest.TestCase):
def _TestMessage(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddDescriptor(unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes').full_name)
# AddDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage')
pool.AddDescriptor(unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedMessage',
pool.FindMessageTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').full_name)
# Files are implicitly also indexed when messages are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedMessage').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testMessage(self):
self._TestMessage('')
self._TestMessage('.')
def _TestEnum(self, prefix):
pool = descriptor_pool.DescriptorPool()
pool.AddEnumDescriptor(unittest_pb2.ForeignEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.ForeignEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum').full_name)
# AddEnumDescriptor is not recursive.
with self.assertRaises(KeyError):
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.ForeignEnum.NestedEnum')
pool.AddEnumDescriptor(unittest_pb2.TestAllTypes.NestedEnum.DESCRIPTOR)
self.assertEqual(
'protobuf_unittest.TestAllTypes.NestedEnum',
pool.FindEnumTypeByName(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
# Files are implicitly also indexed when enums are added.
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileContainingSymbol(
prefix + 'protobuf_unittest.TestAllTypes.NestedEnum').name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testEnum(self):
self._TestEnum('')
self._TestEnum('.')
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testService(self):
pool = descriptor_pool.DescriptorPool()
with self.assertRaises(KeyError):
pool.FindServiceByName('protobuf_unittest.TestService')
pool.AddServiceDescriptor(unittest_pb2._TESTSERVICE)
self.assertEqual(
'protobuf_unittest.TestService',
pool.FindServiceByName('protobuf_unittest.TestService').full_name)
@unittest.skipIf(api_implementation.Type() == 'cpp',
'With the cpp implementation, Add() must be called first')
def testFile(self):
pool = descriptor_pool.DescriptorPool()
pool.AddFileDescriptor(unittest_pb2.DESCRIPTOR)
self.assertEqual(
'google/protobuf/unittest.proto',
pool.FindFileByName(
'google/protobuf/unittest.proto').name)
# AddFileDescriptor is not recursive; messages and enums within files must
# be explicitly registered.
with self.assertRaises(KeyError):
pool.FindFileContainingSymbol(
'protobuf_unittest.TestAllTypes')
def testEmptyDescriptorPool(self):
# Check that an empty DescriptorPool() contains no messages.
pool = descriptor_pool.DescriptorPool()
proto_file_name = descriptor_pb2.DESCRIPTOR.name
self.assertRaises(KeyError, pool.FindFileByName, proto_file_name)
# Add the above file to the pool
file_descriptor = descriptor_pb2.FileDescriptorProto()
descriptor_pb2.DESCRIPTOR.CopyToProto(file_descriptor)
pool.Add(file_descriptor)
# Now it exists.
self.assertTrue(pool.FindFileByName(proto_file_name))
def testCustomDescriptorPool(self):
# Create a new pool, and add a file descriptor.
pool = descriptor_pool.DescriptorPool()
file_desc = descriptor_pb2.FileDescriptorProto(
name='some/file.proto', package='package')
file_desc.message_type.add(name='Message')
pool.Add(file_desc)
self.assertEqual(pool.FindFileByName('some/file.proto').name,
'some/file.proto')
self.assertEqual(pool.FindMessageTypeByName('package.Message').name,
'Message')
def testFileDescriptorOptionsWithCustomDescriptorPool(self):
# Create a descriptor pool, and add a new FileDescriptorProto to it.
pool = descriptor_pool.DescriptorPool()
file_name = 'file_descriptor_options_with_custom_descriptor_pool.proto'
file_descriptor_proto = descriptor_pb2.FileDescriptorProto(name=file_name)
extension_id = file_options_test_pb2.foo_options
file_descriptor_proto.options.Extensions[extension_id].foo_name = 'foo'
pool.Add(file_descriptor_proto)
# The options set on the FileDescriptorProto should be available in the
# descriptor even if they contain extensions that cannot be deserialized
# using the pool.
file_descriptor = pool.FindFileByName(file_name)
options = file_descriptor.GetOptions()
self.assertEqual('foo', options.Extensions[extension_id].foo_name)
# The object returned by GetOptions() is cached.
self.assertIs(options, file_descriptor.GetOptions())
class DefaultPoolTest(unittest.TestCase):
def testFindMethods(self):
pool = descriptor_pool.Default()
self.assertIs(
pool.FindFileByName('google/protobuf/unittest.proto'),
unittest_pb2.DESCRIPTOR)
self.assertIs(
pool.FindMessageTypeByName('protobuf_unittest.TestAllTypes'),
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertIs(
pool.FindFieldByName('protobuf_unittest.TestAllTypes.optional_int32'),
unittest_pb2.TestAllTypes.DESCRIPTOR.fields_by_name['optional_int32'])
self.assertIs(
pool.FindEnumTypeByName('protobuf_unittest.ForeignEnum'),
unittest_pb2.ForeignEnum.DESCRIPTOR)
if api_implementation.Type() != 'cpp':
self.skipTest('Only the C++ implementation correctly indexes all types')
self.assertIs(
pool.FindExtensionByName('protobuf_unittest.optional_int32_extension'),
unittest_pb2.DESCRIPTOR.extensions_by_name['optional_int32_extension'])
self.assertIs(
pool.FindOneofByName('protobuf_unittest.TestAllTypes.oneof_field'),
unittest_pb2.TestAllTypes.DESCRIPTOR.oneofs_by_name['oneof_field'])
self.assertIs(
pool.FindServiceByName('protobuf_unittest.TestService'),
unittest_pb2.DESCRIPTOR.services_by_name['TestService'])
def testAddFileDescriptor(self):
pool = descriptor_pool.Default()
file_desc = descriptor_pb2.FileDescriptorProto(name='some/file.proto')
pool.Add(file_desc)
pool.AddSerializedFile(file_desc.SerializeToString())
TEST1_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test1.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest1': MessageType({
'NestedEnum': EnumType([('ALPHA', 1), ('BETA', 2)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('EPSILON', 5), ('ZETA', 6)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('ETA', 7), ('THETA', 8)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ETA')),
('nested_field', StringField(2, 'theta')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'ZETA')),
('nested_field', StringField(2, 'beta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'BETA')),
('nested_message', MessageField(2, 'NestedMessage')),
], is_extendable=True),
'DescriptorPoolTest2': MessageType({
'NestedEnum': EnumType([('GAMMA', 3), ('DELTA', 4)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('IOTA', 9), ('KAPPA', 10)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('LAMBDA', 11), ('MU', 12)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'MU')),
('nested_field', StringField(2, 'lambda')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'IOTA')),
('nested_field', StringField(2, 'delta')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'GAMMA')),
('nested_message', MessageField(2, 'NestedMessage')),
]),
})
TEST2_FILE = ProtoFile(
'google/protobuf/internal/descriptor_pool_test2.proto',
'google.protobuf.python.internal',
{
'DescriptorPoolTest3': MessageType({
'NestedEnum': EnumType([('NU', 13), ('XI', 14)]),
'NestedMessage': MessageType({
'NestedEnum': EnumType([('OMICRON', 15), ('PI', 16)]),
'DeepNestedMessage': MessageType({
'NestedEnum': EnumType([('RHO', 17), ('SIGMA', 18)]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'RHO')),
('nested_field', StringField(2, 'sigma')),
]),
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'PI')),
('nested_field', StringField(2, 'nu')),
('deep_nested_message', MessageField(3, 'DeepNestedMessage')),
])
}, [
('nested_enum', EnumField(1, 'NestedEnum', 'XI')),
('nested_message', MessageField(2, 'NestedMessage')),
], extensions=[
('descriptor_pool_test',
ExtensionField(1001, 'DescriptorPoolTest1')),
]),
},
dependencies=['google/protobuf/internal/descriptor_pool_test1.proto',
'google/protobuf/internal/more_messages.proto'],
public_dependencies=['google/protobuf/internal/more_messages.proto'])
if __name__ == '__main__':
unittest.main()
|
|
import unittest
from the_ark.actions import Actions, ActionException
from the_ark.field_handlers import STRING_FIELD, EMAIL_FIELD, PHONE_FIELD, ZIP_CODE_FIELD, DATE_FIELD
from the_ark.resources.action_constants import *
from the_ark.selenium_helpers import SeleniumHelpers, SeleniumHelperExceptions
from mock import patch
class ActionTestCase(unittest.TestCase):
def setUp(self):
self.instantiate_screenshot_class()
@patch("the_ark.selenium_helpers.SeleniumHelpers")
def instantiate_screenshot_class(self, selenium_helper):
self.ac = Actions(selenium_helper)
# - Dispatch Methods
@patch("the_ark.actions.Actions.dispatch_action")
def test_dispatch_list(self, mock_dispatch):
action_list = ["pickles", "pineapples"]
self.ac.dispatch_list_of_actions(action_list)
mock_dispatch.return_value = True
self.assertEqual(len(mock_dispatch.mock_calls), 2)
mock_dispatch.assert_called_with(action_list[1], None)
@patch("the_ark.actions.Actions.dispatch_action")
def test_dispatch_list_without_list(self, mock_dispatch):
action_list = False
with self.assertRaises(ActionException) as type_error:
self.ac.dispatch_list_of_actions(action_list)
self.assertIn("list type", type_error.exception.msg)
@patch("the_ark.actions.Actions.load_url")
def test_dispatch_action(self, mock_load):
action = {ACTION_KEY: LOAD_URL_ACTION}
mock_load.return_value = True
self.ac.dispatch_action(action)
mock_load.assert_called_with(action, None)
@patch("the_ark.actions.Actions.load_url")
def test_dipatch_action_selenium_error(self, mock_load):
action = {ACTION_KEY: LOAD_URL_ACTION}
mock_load.side_effect = SeleniumHelperExceptions("Boom!", "stacktrace", "www.meltmedia.com")
with self.assertRaises(ActionException) as selenium_error:
self.ac.dispatch_action(action)
self.assertIn("www.meltmedia.com", selenium_error.exception.msg)
@patch("the_ark.actions.Actions.load_url")
def test_dipatch_action_key_error(self, mock_load):
action = {ACTION_KEY: LOAD_URL_ACTION}
mock_load.side_effect = KeyError("Boom!")
with self.assertRaises(ActionException) as key_error:
self.ac.dispatch_action(action)
self.assertIn("key named 'Boom!'", key_error.exception.msg)
@patch("the_ark.actions.Actions.load_url")
def test_dipatch_action_attribute_error(self, mock_load):
action = {ACTION_KEY: LOAD_URL_ACTION}
mock_load.side_effect = AttributeError("Boom!")
with self.assertRaises(ActionException) as attr_error:
self.ac.dispatch_action(action)
self.assertIn("AttributeError", attr_error.exception.msg)
@patch("the_ark.actions.Actions.load_url")
def test_dipatch_action_general_error(self, mock_load):
action = {ACTION_KEY: LOAD_URL_ACTION}
mock_load.side_effect = Exception("Boom!")
with self.assertRaises(ActionException) as e:
self.ac.dispatch_action(action)
self.assertIn("error", e.exception.msg)
# - Load URL Action
@patch("the_ark.selenium_helpers.SeleniumHelpers.load_url")
@patch("the_ark.selenium_helpers.SeleniumHelpers.get_current_url")
def test_load_url_action_with_just_path(self, mock_url, sh_load):
sh = SeleniumHelpers()
ac = Actions(sh)
action = {
ACTION_KEY: LOAD_URL_ACTION,
PATH_KEY: "/pickles"
}
test_url = "http://www.meltmedia.com"
mock_url.return_value = test_url
ac.load_url(action)
sh_load.assert_called_with(test_url + "/pickles", None)
@patch("the_ark.selenium_helpers.SeleniumHelpers.load_url")
def test_load_url_action_with_url_and_path(self, sh_load):
sh = SeleniumHelpers()
ac = Actions(sh)
test_url = "http://www.meltmedia.com"
action = {
ACTION_KEY: LOAD_URL_ACTION,
PATH_KEY: "/pickles",
URL_KEY: test_url
}
ac.load_url(action)
sh_load.assert_called_with(test_url + "/pickles", None)
@patch("the_ark.selenium_helpers.SeleniumHelpers.load_url")
def test_load_url_action_with_just_url_withbypass(self, sh_load):
sh = SeleniumHelpers()
ac = Actions(sh)
test_url = "http://www.meltmedia.com"
action = {
ACTION_KEY: LOAD_URL_ACTION,
URL_KEY: test_url,
BYPASS_404_KEY: True
}
ac.load_url(action)
sh_load.assert_called_with(test_url, True)
# - Click Action
def test_click(self):
action = {
ACTION_KEY: CLICK_ACTION,
CSS_SELECTOR_KEY: ".pickles"
}
self.ac.click(action, None)
self.ac.sh.click_an_element.assert_called_with(action[CSS_SELECTOR_KEY])
def test_click_with_element(self):
action = {
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.click(action, element)
self.ac.sh.click_an_element.assert_called_with(web_element=element)
# - Hover Action
def test_hover(self):
action = {
ACTION_KEY: HOVER_ACTION,
CSS_SELECTOR_KEY: ".pickles"
}
self.ac.hover(action, None)
self.ac.sh.hover_on_element.assert_called_with(action[CSS_SELECTOR_KEY])
def test_hover_with_element(self):
action = {
ACTION_KEY: HOVER_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.hover(action, element)
self.ac.sh.hover_on_element.assert_called_with(web_element=element)
# - Enter Text Action
def test_enter_text(self):
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_KEY: "input text"
}
self.ac.enter_text(action, None)
self.ac.sh.fill_an_element.assert_called_with(action[INPUT_KEY], action[CSS_SELECTOR_KEY])
def test_enter_text_with_element(self):
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
ELEMENT_KEY: True,
INPUT_KEY: "input text"
}
element = "element"
self.ac.enter_text(action, element)
self.ac.sh.fill_an_element.assert_called_with(action[INPUT_KEY], web_element=element)
@patch('the_ark.input_generator.generate_string')
def test_enter_text_as_string_with_element(self, mock_string):
test_string = "Test String"
mock_string.return_value = test_string
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
ELEMENT_KEY: True,
INPUT_TYPE_KEY: STRING_FIELD
}
element = "element"
self.ac.enter_text(action, element)
self.ac.sh.fill_an_element.assert_called_with(test_string, web_element=element)
@patch('the_ark.input_generator.generate_email')
def test_enter_text_as_email(self, mock_email):
test_email = "[email protected]"
mock_email.return_value = test_email
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_TYPE_KEY: EMAIL_FIELD
}
self.ac.enter_text(action, None)
self.ac.sh.fill_an_element.assert_called_with(test_email, action[CSS_SELECTOR_KEY])
@patch('the_ark.input_generator.generate_zip_code')
def test_enter_text_as_zip_code(self, mock_zip):
test_zip = "12345"
mock_zip.return_value = test_zip
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_TYPE_KEY: ZIP_CODE_FIELD
}
self.ac.enter_text(action, None)
self.ac.sh.fill_an_element.assert_called_with(test_zip, action[CSS_SELECTOR_KEY])
@patch('the_ark.input_generator.generate_phone')
def test_enter_text_as_phone_number(self, mock_phone):
test_phone = "5557891011"
mock_phone.return_value = test_phone
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_TYPE_KEY: PHONE_FIELD
}
self.ac.enter_text(action, None)
self.ac.sh.fill_an_element.assert_called_with(test_phone, action[CSS_SELECTOR_KEY])
@patch('the_ark.input_generator.generate_date')
def test_enter_text_as_date(self, mock_date):
test_date = "01/27/1987"
mock_date.return_value = test_date
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_TYPE_KEY: DATE_FIELD
}
self.ac.enter_text(action, None)
self.ac.sh.fill_an_element.assert_called_with(test_date, action[CSS_SELECTOR_KEY])
@patch('the_ark.input_generator.generate_date')
def test_enter_text_as_unknown_type(self, mock_date):
test_date = "01/27/1987"
mock_date.return_value = test_date
action = {
ACTION_KEY: ENTER_TEXT_ACTION,
CSS_SELECTOR_KEY: ".enter",
INPUT_TYPE_KEY: "Pickles"
}
with self.assertRaises(ActionException) as unknown_input_type:
self.ac.enter_text(action, None)
self.assertIn(action[INPUT_TYPE_KEY], unknown_input_type.exception.msg)
# - Scroll Actions
def test_scroll_window_to_position(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_POSITION_ACTION,
POSITION_TOP_KEY: "1",
POSITION_BOTTOM_KEY: "2",
X_POSITION_KEY: 300,
Y_POSITION_KEY: 4000
}
self.ac.scroll_window_to_position(action, None)
self.ac.sh.scroll_window_to_position.assert_called_with(action[Y_POSITION_KEY], action[X_POSITION_KEY],
action[POSITION_TOP_KEY], action[POSITION_BOTTOM_KEY])
def test_scroll_window_to_position_with_defaults(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_POSITION_ACTION,
POSITION_TOP_KEY: "1",
X_POSITION_KEY: 300,
}
self.ac.scroll_window_to_position(action)
self.ac.sh.scroll_window_to_position.assert_called_with(0, action[X_POSITION_KEY],
action[POSITION_TOP_KEY], 0)
def test_scroll_window_to_element_defaults(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".scroll"
}
self.ac.scroll_window_to_element(action, None)
self.ac.sh.scroll_to_element.assert_called_with(action[CSS_SELECTOR_KEY], position_bottom=None,
position_middle=None)
def test_scroll_window_to_element_with_top_and_bottom(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".scroll",
POSITION_BOTTOM_KEY: True,
POSITION_MIDDLE_KEY: True
}
self.ac.scroll_window_to_element(action, None)
self.ac.sh.scroll_to_element.assert_called_with(action[CSS_SELECTOR_KEY], position_bottom=True,
position_middle=True)
def test_scroll_window_to_element_with_element(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_ELEMENT_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.scroll_window_to_element(action, element)
self.ac.sh.scroll_to_element.assert_called_with(web_element=element, position_bottom=None,
position_middle=None)
def test_scroll_window_to_element_with_element_and_top_and_bottom(self):
action = {
ACTION_KEY: SCROLL_WINDOW_TO_ELEMENT_ACTION,
ELEMENT_KEY: True,
POSITION_BOTTOM_KEY: True,
POSITION_MIDDLE_KEY: True
}
element = "element"
self.ac.scroll_window_to_element(action, element)
self.ac.sh.scroll_to_element.assert_called_with(position_bottom=True, position_middle=True,
web_element=element)
def test_scroll_an_element_defaults(self):
action = {
ACTION_KEY: SCROLL_AN_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".scrollable"
}
self.ac.scroll_an_element(action, None)
self.ac.sh.scroll_an_element.assert_called_with(css_selector=action[CSS_SELECTOR_KEY], scroll_bottom=None,
scroll_padding=None, scroll_top=None, x_position=None,
y_position=None)
def test_scroll_an_element_with_values(self):
action = {
ACTION_KEY: SCROLL_AN_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".scrollable",
POSITION_BOTTOM_KEY: True,
X_POSITION_KEY: 100,
Y_POSITION_KEY: 1200,
SCROLL_PADDING_KEY: 45,
POSITION_TOP_KEY: False
}
self.ac.scroll_an_element(action, None)
self.ac.sh.scroll_an_element.assert_called_with(css_selector=action[CSS_SELECTOR_KEY],
scroll_bottom=action[POSITION_BOTTOM_KEY],
scroll_padding=action[SCROLL_PADDING_KEY],
scroll_top=action[POSITION_TOP_KEY],
x_position=action[X_POSITION_KEY],
y_position=action[Y_POSITION_KEY])
def test_scroll_an_element_with_element_and_defaults(self):
action = {
ACTION_KEY: SCROLL_AN_ELEMENT_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.scroll_an_element(action, element)
self.ac.sh.scroll_an_element.assert_called_with(web_element=element, scroll_bottom=None, scroll_padding=None,
scroll_top=None, x_position=None, y_position=None)
def test_scroll_an_element_With_element_and_values(self):
action = {
ACTION_KEY: SCROLL_AN_ELEMENT_ACTION,
ELEMENT_KEY: True,
POSITION_BOTTOM_KEY: True,
X_POSITION_KEY: 100,
Y_POSITION_KEY: 1200,
SCROLL_PADDING_KEY: 45,
POSITION_TOP_KEY: False
}
element = "element"
self.ac.scroll_an_element(action, element)
self.ac.sh.scroll_an_element.assert_called_with(web_element=element, scroll_bottom=action[POSITION_BOTTOM_KEY],
scroll_padding=action[SCROLL_PADDING_KEY],
scroll_top=action[POSITION_TOP_KEY],
x_position=action[X_POSITION_KEY],
y_position=action[Y_POSITION_KEY])
# - Refresh Action
def test_refresh(self):
self.ac.refresh("action")
self.ac.sh.refresh.called_once()
# - Sleep Action
@patch('time.sleep')
def test_sleep(self, mock_sleep):
action = {
ACTION_KEY: SLEEP_ACTION,
DURATION_KEY: 10
}
self.ac.sleep(action)
mock_sleep.assert_called_with(action[DURATION_KEY])
# - Wait for Element Action
def test_wait_for_element_defaults(self):
action = {
ACTION_KEY: WAIT_FOR_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".wait"
}
self.ac.wait_for_element(action)
self.ac.sh.wait_for_element.assert_called_with(action[CSS_SELECTOR_KEY], 15)
def test_wait_for_element_with_duration(self):
action = {
ACTION_KEY: WAIT_FOR_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".wait",
DURATION_KEY: 10
}
self.ac.wait_for_element(action)
self.ac.sh.wait_for_element.assert_called_with(action[CSS_SELECTOR_KEY], action[DURATION_KEY])
# - Special Key Action
def test_send_special_key(self):
action = {
ACTION_KEY: SEND_SPECIAL_KEY_ACTION,
SPECIAL_KEY_KEY: "ENTER"
}
self.ac.send_special_key(action)
self.ac.sh.send_special_key.assert_called_with(action[SPECIAL_KEY_KEY])
# - Show Element Action
def test_show_element(self):
action = {
ACTION_KEY: SHOW_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".pickles"
}
self.ac.show_element(action, None)
self.ac.sh.show_element.assert_called_with(action[CSS_SELECTOR_KEY])
def test_show_element_with_element(self):
action = {
ACTION_KEY: SHOW_ELEMENT_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.show_element(action, element)
self.ac.sh.show_element.assert_called_with(web_element=element)
# - Hide Element Action
def test_hide_element(self):
action = {
ACTION_KEY: HIDE_ELEMENT_ACTION,
CSS_SELECTOR_KEY: ".pickles"
}
self.ac.hide_element(action, None)
self.ac.sh.hide_element.assert_called_with(action[CSS_SELECTOR_KEY])
def test_hide_element_with_element(self):
action = {
ACTION_KEY: HIDE_ELEMENT_ACTION,
ELEMENT_KEY: True
}
element = "element"
self.ac.hide_element(action, element)
self.ac.sh.hide_element.assert_called_with(web_element=element)
# - Execute Script Action
def test_execute_script(self):
action = {
ACTION_KEY: EXECUTE_SCRIPT_ACTION,
SCRIPT_KEY: "script text"
}
self.ac.execute_script(action, None)
self.ac.sh.execute_script.assert_called_with(action[SCRIPT_KEY])
def test_execute_script_with_element(self):
action = {
ACTION_KEY: EXECUTE_SCRIPT_ACTION,
ELEMENT_KEY: True,
SCRIPT_KEY: "script text"
}
element = "element"
self.ac.execute_script(action, element)
self.ac.sh.execute_script.assert_called_with(action[SCRIPT_KEY], element)
# - Switch Window Handle Action
def test_switch_window_handle(self):
action = {
ACTION_KEY: "switch_window_handle"
}
self.ac.switch_window_handle(action)
self.ac.sh.switch_window_handle.assert_called_with()
def test_switch_window_handle_with_index(self):
fake_handles = ["handle1", "handle2"]
action = {
ACTION_KEY: "switch_window_handle",
INDEX_KEY: 1
}
self.ac.sh.get_window_handles.return_value = fake_handles
self.ac.switch_window_handle(action)
self.ac.sh.switch_window_handle.assert_called_with(fake_handles[action[INDEX_KEY]])
# - Close Window Action
def test_close_window(self):
action = {
ACTION_KEY: CLOSE_WINDOW_ACTION
}
self.ac.close_window(action)
self.ac.sh.close_window.assert_called_with()
# - For Each Action
@patch('the_ark.actions.Actions.dispatch_list_of_actions')
def test_for_each(self, mock_dispatch):
action ={
ACTION_KEY: FOR_EACH_ACTION,
CSS_SELECTOR_KEY: ".for-each",
ALLOW_EMPTY_KEY: False,
ACTION_LIST_KEY: [
{
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
},
]
}
self.ac.sh.element_exists.return_value = True
self.ac.sh.get_list_of_elements.return_value = ["element1", "element2"]
self.ac.for_each(action)
self.assertEqual(len(mock_dispatch.mock_calls), 2)
@patch('the_ark.actions.Actions.dispatch_list_of_actions')
def test_for_each_without_elements(self, mock_dispatch):
action = {
ACTION_KEY: FOR_EACH_ACTION,
CSS_SELECTOR_KEY: ".for-each",
ALLOW_EMPTY_KEY: False,
ACTION_LIST_KEY: [
{
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
},
]
}
self.ac.sh.element_exists.return_value = False
with self.assertRaises(ActionException) as type_error:
self.ac.for_each(action)
self.assertIn("no elements", type_error.exception.msg)
@patch('the_ark.actions.Actions.dispatch_list_of_actions')
def test_for_each_child(self, mock_dispatch):
action = {
ACTION_KEY: FOR_EACH_ACTION,
CSS_SELECTOR_KEY: ".for-each",
ALLOW_EMPTY_KEY: False,
CHILD_KEY: True,
ACTION_LIST_KEY: [
{
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
},
]
}
self.ac.sh.element_exists.return_value = ["element1", "element2"]
self.ac.for_each(action)
@patch('the_ark.actions.Actions.dispatch_list_of_actions')
def test_for_each_allow_empty(self, mock_dispatch):
action = {
ACTION_KEY: FOR_EACH_ACTION,
CSS_SELECTOR_KEY: ".for-each",
ALLOW_EMPTY_KEY: True,
ACTION_LIST_KEY: [
{
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
},
]
}
self.ac.sh.element_exists.return_value = False
self.ac.for_each(action)
@patch('the_ark.actions.Actions.dispatch_list_of_actions')
def test_for_each_do_not_increment(self, mock_dispatch):
action = {
ACTION_KEY: FOR_EACH_ACTION,
CSS_SELECTOR_KEY: ".for-each",
ALLOW_EMPTY_KEY: True,
DO_NOT_INCREMENT_KEY: True,
ACTION_LIST_KEY: [
{
ACTION_KEY: CLICK_ACTION,
ELEMENT_KEY: True
},
]
}
self.ac.sh.element_exists.return_value = True
self.ac.sh.get_list_of_elements.return_value = ["element1", "element2"]
self.ac.for_each(action)
# - ActionException
def test_exception_text(self):
ac = ActionException("test")
self.assertIn("test", str(ac))
def test_exception_with_stacktrace(self):
ac = ActionException("test", "stacktrace testing")
self.assertIn("stacktrace testing", str(ac))
def test_exception_with_details(self):
ac = ActionException("test", details={"meltQA": "Rocks"})
self.assertIn("Rocks", str(ac))
self.assertIn("Exception Details", str(ac))
|
|
# -*- coding: utf-8 -*-
"""
Missing Person Registry
@author: nursix
"""
module = request.controller
prefix = request.controller
resourcename = request.function
if prefix not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % prefix)
MISSING = str(T("Missing"))
FOUND = str(T("Found"))
DETAILS = str(T("Details"))
action = lambda l, u: dict(label=str(l), url=str(u), _class="action-btn")
s3_menu(module)
# -----------------------------------------------------------------------------
def index():
""" Home Page """
try:
module_name = deployment_settings.modules[prefix].name_nice
except:
module_name = T("Missing Persons Registry")
prefix = "pr"
resourcename = "person"
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
report_url = URL(c="mpr", f=resourcename,
args=["[id]", "note"],
vars=dict(status="missing"))
s3mgr.configure(tablename,
create_next=report_url,
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group",
"missing"])
def prep(r):
if r.representation == "html":
if not r.id and not r.method:
r.method = "search"
else:
redirect(URL(resourcename, args=request.args))
return True
response.s3.prep = prep
def postp(r, output):
response.s3.actions = []
if not r.component:
open_button_label = DETAILS
if auth.s3_logged_in():
mreport = URL(resourcename,
args=["[id]", "note", "create"],
vars=dict(status="missing"))
freport = URL(resourcename,
args=["[id]", "note", "create"],
vars=dict(status="found"))
response.s3.actions = [action(MISSING, mreport),
action(FOUND, freport)]
# Is the current user reported missing?
if isinstance(output, dict):
person = s3_logged_in_person()
if person and db.pr_person[person].missing:
myself = URL(resourcename,
args=[person, "note", "create"],
vars=dict(status="found"))
output.update(myself=myself)
else:
open_button_label = UPDATE
#linkto = r.resource.crud._linkto(r, update=True)("[id]")
linkto = URL(resourcename,
args=["[id]", "note"])
response.s3.actions.append(action(open_button_label, linkto))
return output
response.s3.postp = postp
output = s3_rest_controller(prefix, resourcename,
module_name=module_name)
response.view = "mpr/index.html"
response.title = module_name
s3_menu(module)
return output
# -----------------------------------------------------------------------------
def person():
""" Missing Persons List """
prefix = "pr"
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
s3.crud_strings[tablename].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons Registry"),
subtitle_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons currently reported missing"))
s3mgr.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
s3mgr.configure(tablename,
create_next = URL(c="mpr", f="person",
args=["[id]", "note", "create"],
vars=dict(status="missing")),
list_fields=["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group",
"missing"
])
def prep(r):
if r.interactive and not r.id:
r.resource.add_filter(db.pr_person.missing == True)
if r.component_name == "config":
_config = db.gis_config
defaults = db(_config.id == 1).select(limitby=(0, 1)).first()
for key in defaults.keys():
if key not in ["id",
"uuid",
"mci",
"update_record",
"delete_record"]:
_config[key].default = defaults[key]
elif r.component_name == "note":
ntable = db.pr_note
status = r.vars.get("status", None)
if status:
if status == "missing":
ntable.status.default = 1
ntable.status.writable = False
ntable.timestmp.label = T("Date/Time when last seen")
ntable.note_text.label = T("Circumstances of disappearance")
s3.crud_strings[str(ntable)].update(
title_create = "Add Missing Report",
subtitle_create = "Add Missing Report")
elif status == "found":
ntable.status.default = 2
ntable.status.writable = False
ntable.timestmp.label = T("Date/Time when found")
ntable.note_text.label = T("Comments")
s3.crud_strings[str(ntable)].update(
title_create = "Add Find Report",
subtitle_create = "Add Find Report")
else:
ntable.status.default = 99
ntable.status.writable = True
return True
response.s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
label = READ
linkto = URL(f="person",
args=("[id]", "note"))
else:
label = UPDATE
linkto = r.resource.crud._linkto(r)("[id]")
response.s3.actions = [action(label, linkto)]
if not r.component:
label = FOUND
linkto = URL(f="person",
args=("[id]", "note", "create"),
vars=dict(status="found"))
response.s3.actions.append(action(label, linkto))
return output
response.s3.postp = postp
ptable = db.pr_person
ptable.missing.default = True
ptable.missing.readable = False
ptable.missing.writable = False
ptable.pe_label.readable = False
ptable.pe_label.writable = False
ptable.occupation.readable = False
ptable.occupation.writable = False
mpr_tabs = [(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "pimage"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note")]
rheader = lambda r: pr_rheader(r, tabs=mpr_tabs)
output = s3_rest_controller("pr", resourcename, rheader=rheader)
s3_menu(module)
return output
# -----------------------------------------------------------------------------
|
|
# Simple Arp Handler v2
# Jack Zhao
# [email protected]
from operator import attrgetter
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import arp
from ryu.lib.packet.packet import Packet
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.arp import arp
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.ofproto import inet
import time
import os
from ryu.lib.packet.lldp import LLDP_MAC_NEAREST_BRIDGE
# config logging
# LOG = logging.getLogger('SimpleArp')
# LOG.setLevel(logging.DEBUG)
# logging.basicConfig()
OFP_SWITCHES_LIST_PREVIOUS = \
'./network-data2/ofp_switches_list_prev.db'
# OFP_SWITCHES_LIST_SCRIPT = \
# './scripts/remote_ovs_operation/get_switch_ofpbr_datapath_id.sh'
class MySimpleArp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(MySimpleArp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.arp_learning = {} # self.arp_learning = {srcMAC:[dst_ip,in_port,time]}
self.packetToport = {}
self.hostname_list = {}
self.dpset = kwargs['dpset']
def _get_hwaddr(self, dpid, port_no):
return self.dpset.get_port(dpid, port_no).hw_addr
def _hostname_Check(self, datapath):
# Given decimal datapath ID, return hostname
if os.path.exists(os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)):
f = os.path.abspath(OFP_SWITCHES_LIST_PREVIOUS)
else:
f = os.path.abspath(OFP_SWITCHES_LIST)
with open(f, 'r') as iff:
for line in iff:
hostname, dpid = line.split()
self.hostname_list[int(dpid, 16)] = hostname
# print self.hostname_list
# NEED add some datapath check later
if datapath not in self.hostname_list.keys():
return datapath
else:
return self.hostname_list[datapath]
# @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
# def switch_features_handler(self, ev):
# """ install table-miss flow entry """
# self.logger.debug("my_arp: switch_features_handler:")
# datapath = ev.msg.datapath
# ofproto = datapath.ofproto
# parser = datapath.ofproto_parser
# self.logger.info("################### datapath in decimal %s", datapath.id)
# self.logger.info("################### datapath in hex %s", hex(int(datapath.id)))
# #
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
# match = parser.OFPMatch()
# actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
# ofproto.OFPCML_NO_BUFFER)]
# self.add_flow(datapath, 0, match, actions)
# def add_flow(self, datapath, priority, match, actions, buffer_id=None):
# self.logger.debug("my_arp:add_flow")
# ofproto = datapath.ofproto
# parser = datapath.ofproto_parser
# inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
# actions)]
# if buffer_id:
# mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
# priority=priority, match=match,
# instructions=inst)
# else:
# mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
# match=match, instructions=inst)
# datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
self.logger.debug("my_arp: _packet_in_handler:")
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
# ofproto = datapath.ofproto
inPort = msg.match['in_port']
packets = Packet(msg.data)
dpid = datapath.id
eth = packets.get_protocols(ethernet)[0]
src = eth.src
dst = eth.dst
self.mac_to_port.setdefault(hex(dpid), {})
self.arp_learning.setdefault(dpid, [])
self.packetToport.setdefault(dpid, {})
if dst == LLDP_MAC_NEAREST_BRIDGE:
return
if src in self.mac_to_port[hex(dpid)].keys():
pass
else:
self.mac_to_port[hex(dpid)][src] = inPort
data = msg.data
etherFrame = packets.get_protocol(ethernet)
# if dst == LLDP_MAC_NEAREST_BRIDGE:
# return
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
# print "etherFrame######", etherFrame
# etherFrame = packets.get_protocol(ethernet)
etherFrame = packets.get_protocol(ethernet)
# print etherFrame
# print ether
# print hex(etherFrame.ethertype)
# print hex(ether.ETH_TYPE_ARP)
if etherFrame.ethertype == ether.ETH_TYPE_ARP:
self.logger.debug("\n:")
arpPacket = packets.get_protocol(arp)
arpArriveTime = time.time()
srcMac = etherFrame.src
arp_dstIP = arpPacket.dst_ip
dst = eth.dst
# print "ARP"
# if dst == "ff:ff:ff:ff:ff:ff":
# self.packetToport[datapath.id][(srcMac, arp_dstIP, inPort)] = arpArriveTime
# print "arp"
# print "packets: ", packets
# print "packets.get_protocols(ethernet): ", packets.get_protocols(ethernet)
# print "ARP: %s" % arpPacket.opcode
# self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# if arpPacket.opcode == 1:
# print "ARP Requst"
# self.logger.info("packet in %s %s %s %s", self._hostname_Check(datapath.id), srcMac, dst, inPort)
# elif arpPacket.opcode == 2:
# print "ARP Reply"
# self.logger.info("packet in %s %s %s %s", self._hostname_Check(datapath.id), srcMac, dst, inPort)
self.receive_arp(datapath, packets, etherFrame, inPort, data)
return 0
else:
self.logger.debug("Drop packet")
return 1
def receive_arp(self, datapath, packets, etherFrame, inPort, data):
self.logger.info("MySimpleArp: receive_arp: ")
arpPacket = packets.get_protocol(arp)
arp_dstIP = arpPacket.dst_ip
arp_srcIP = arpPacket.src_ip
# self.logger.info("packet in %s %s %s %s", self._hostname_Check(datapath.id), etherFrame.src, etherFrame.dst, inPort)
self.logger.info("\t %s: receive ARP PACKET %s => %s (in_port=%d) From %s to %s"
% (self._hostname_Check(datapath.id),
etherFrame.src, etherFrame.dst, inPort, arp_srcIP, arp_dstIP))
if arpPacket.opcode == 1:
print "\t ARP Requst: Test if it is repeated Broadcasting"
if self.anti_arp_brodcast(datapath, etherFrame, inPort, arp_dstIP):
# self.logger.info("%s: receive new ARP request %s => %s (port%d) src_ip=%s dst_ip=%s"
# % (self._hostname_Check(datapath.id), etherFrame.src, etherFrame.dst, inPort, arp_srcIP, arp_dstIP))
# print "-----packetToport: ", self.packetToport
# print "-----arp_learning: ", self.arp_learning
self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
else:
self.logger.info("\t Not ARP Broadcasting No Action is taken!!!!!~~~~")
# self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
elif arpPacket.opcode == 2:
self.logger.info("\t ARP_reply: then Forwarding this ARP reply packet !!!!!!!!!!!!!!")
self.logger.info("\t packet in %s %s %s %s", self._hostname_Check(
datapath.id), etherFrame.src, etherFrame.dst, inPort)
self.reply_arp(datapath, etherFrame, arpPacket, arp_dstIP, inPort, data)
def anti_arp_brodcast(self, datapath, etherFrame, inPort, arp_dstIP):
test = False
self.logger.info("MySimpleArp: anti_arp_brodcast:")
if etherFrame.dst == "ff:ff:ff:ff:ff:ff":
# self.logger.info("self.packetToport:", self.packetToport)
# self.logger.info(self.packetToport[datapath.id].keys())
# self.logger.info(self.packetToport[datapath.id])
if not self.packetToport[datapath.id]:
# self.logger.info("Another muticast packet form %s at %i port in %s " % (
# etherFrame.src, inPort, self._hostname_Check(datapath.id)))
# self.logger.info("packetToport: ", self.packetToport)
# self.logger.info("arp_learning: ", self.arp_learning
self.packetToport[datapath.id][(etherFrame.src, arp_dstIP, inPort)] = time.time()
self.logger.info("\t1 Added (%s %s %s): %s to self.packetToport and Forwarding ARP Broadcasting" %
(etherFrame.src, arp_dstIP, inPort, time.time()))
test = True
return test
elif ((etherFrame.src, arp_dstIP, inPort) in self.packetToport[datapath.id].keys()):
self.logger.info("\t2 ARP BLOCKING, No Further transfer")
# self.logger.info("Another muticast packet form %s at %i port in %s " % (
# etherFrame.src, inPort, self._hostname_Check(datapath.id)))
# self.logger.info("{DPID: { (src_mac, dst_ip, in_port): arpArriveTime, ():time }")
self.logger.info("\t %s %s" % (self._hostname_Check(datapath.id), self.packetToport[datapath.id].keys()))
test = False
return test
else:
# same ARP broadcast but from inport number is diffeernt from original port nubmer, block
for keys in self.packetToport[datapath.id].keys():
if ((etherFrame.src, arp_dstIP) == keys[0:2]) and (inPort != keys[2]):
self.logger.info("\t 4 same ARP packet coming from differe port. So not Forwarding ARP Broadcasting. Detail: (%s %s %s): %s" %
(etherFrame.src, arp_dstIP, inPort, time.time()))
# add this entry, avoid if else checking next time
self.packetToport[datapath.id][(etherFrame.src, arp_dstIP, inPort)] = time.time()
test = False
return test
if ((etherFrame.src, arp_dstIP, inPort) not in self.packetToport[datapath.id].keys()):
self.packetToport[datapath.id][(etherFrame.src, arp_dstIP, inPort)] = time.time()
self.logger.info("\t 3 New ARP Broading casting. Added (%s %s %s): %s to self.packetToport and Forwarding ARP Broadcasting" %
(etherFrame.src, arp_dstIP, inPort, time.time()))
test = True
return test
return test
def reply_arp(self, datapath, etherFrame, arpPacket, arp_dstIp, inPort, data):
# self.logger.info("flood")
dst = etherFrame.dst
dpid = hex(datapath.id)
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
self.logger.info("\t reply_arp: Reply To Port %s !!!!!!!!!!!!!!!!!" % out_port)
else:
out_port = datapath.ofproto.OFPP_FLOOD
self.logger.info("\t reply_arp: Flooding...")
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
out = datapath.ofproto_parser.OFPPacketOut(datapath=datapath, buffer_id=0xffffffff,
in_port=inPort, actions=actions, data=data)
datapath.send_msg(out)
# print mac_to_port for verification
self.logger.info("MySimpleArp: self.mac_to_port")
for key, value in self.mac_to_port.items():
print "\t", self._hostname_Check(int(str(key), 16)), value
|
|
"""
voxel.py
-----------
Convert meshes to a simple voxel data structure and back again.
"""
import numpy as np
from . import ops
from . import transforms
from . import morphology
from . import encoding as enc
from .. import bounds as bounds_module
from .. import caching
from .. import transformations as tr
from ..parent import Geometry
from ..constants import log
class VoxelGrid(Geometry):
"""
Store 3D voxels.
"""
def __init__(self, encoding, transform=None, metadata=None):
if transform is None:
transform = np.eye(4)
if isinstance(encoding, np.ndarray):
encoding = enc.DenseEncoding(encoding.astype(bool))
if encoding.dtype != bool:
raise ValueError('encoding must have dtype bool')
self._data = caching.DataStore()
self.encoding = encoding
self._data['transform'] = transforms.Transform(transform)
self._cache = caching.Cache(id_function=self._data.crc)
self.metadata = dict()
# update the mesh metadata with passed metadata
if isinstance(metadata, dict):
self.metadata.update(metadata)
elif metadata is not None:
raise ValueError(
'metadata should be a dict or None, got %s' % str(metadata))
def md5(self):
return self._data.md5()
def crc(self):
return self._data.crc()
@property
def encoding(self):
"""
`Encoding` object providing the occupancy grid.
See `trimesh.voxel.encoding` for implementations.
"""
return self._data['encoding']
@encoding.setter
def encoding(self, encoding):
if isinstance(encoding, np.ndarray):
encoding = enc.DenseEncoding(encoding)
elif not isinstance(encoding, enc.Encoding):
raise ValueError(
'encoding must be an Encoding, got %s' % str(encoding))
if len(encoding.shape) != 3:
raise ValueError(
'encoding must be rank 3, got shape %s' % str(encoding.shape))
if encoding.dtype != bool:
raise ValueError(
'encoding must be binary, got %s' % encoding.dtype)
self._data['encoding'] = encoding
@property
def _transform(self):
return self._data['transform']
@property
def transform(self):
"""4x4 homogeneous transformation matrix."""
return self._transform.matrix
@transform.setter
def transform(self, matrix):
"""4x4 homogeneous transformation matrix."""
self._transform.matrix = matrix
@property
def translation(self):
"""Location of voxel at [0, 0, 0]."""
return self._transform.translation
@property
def origin(self):
"""Deprecated. Use `self.translation`."""
# DEPRECATED. Use translation instead
return self.translation
@property
def scale(self):
"""
3-element float representing per-axis scale.
Raises a `RuntimeError` if `self.transform` has rotation or
shear components.
"""
return self._transform.scale
@property
def pitch(self):
"""
Uniform scaling factor representing the side length of
each voxel.
Returns
-----------
pitch : float
Pitch of the voxels.
Raises
------------
`RuntimeError`
If `self.transformation` has rotation or shear
components of has non-uniform scaling.
"""
return self._transform.pitch
@property
def element_volume(self):
return self._transform.unit_volume
def apply_transform(self, matrix):
self._transform.apply_transform(matrix)
return self
def strip(self):
"""
Mutate self by stripping leading/trailing planes of zeros.
Returns
--------
self after mutation occurs in-place
"""
encoding, padding = self.encoding.stripped
self.encoding = encoding
self._transform.matrix[:3, 3] = self.indices_to_points(padding[:, 0])
return self
@caching.cache_decorator
def bounds(self):
indices = self.sparse_indices
# get all 8 corners of the AABB
corners = bounds_module.corners([indices.min(axis=0) - 0.5,
indices.max(axis=0) + 0.5])
# transform these corners to a new frame
corners = self._transform.transform_points(corners)
# get the AABB of corners in-frame
bounds = np.array([corners.min(axis=0), corners.max(axis=0)])
bounds.flags.writeable = False
return bounds
@caching.cache_decorator
def extents(self):
bounds = self.bounds
extents = bounds[1] - bounds[0]
extents.flags.writeable = False
return extents
@caching.cache_decorator
def is_empty(self):
return self.encoding.is_empty
@property
def shape(self):
"""3-tuple of ints denoting shape of occupancy grid."""
return self.encoding.shape
@caching.cache_decorator
def filled_count(self):
"""int, number of occupied voxels in the grid."""
return self.encoding.sum.item()
def is_filled(self, point):
"""
Query points to see if the voxel cells they lie in are
filled or not.
Parameters
----------
point : (n, 3) float
Points in space
Returns
---------
is_filled : (n,) bool
Is cell occupied or not for each point
"""
point = np.asanyarray(point)
indices = self.points_to_indices(point)
in_range = np.logical_and(
np.all(indices < np.array(self.shape), axis=-1),
np.all(indices >= 0, axis=-1))
is_filled = np.zeros_like(in_range)
is_filled[in_range] = self.encoding.gather_nd(indices[in_range])
return is_filled
def fill(self, method='holes', **kwargs):
"""
Mutates self by filling in the encoding according to `morphology.fill`.
Parameters
----------
method: implementation key, one of
`trimesh.voxel.morphology.fill.fillers` keys
**kwargs: additional kwargs passed to the keyed implementation
Returns
----------
self after replacing encoding with a filled version.
"""
self.encoding = morphology.fill(self.encoding, method=method, **kwargs)
return self
def hollow(self, structure=None):
"""
Mutates self by removing internal voxels leaving only surface elements.
Surviving elements are those in encoding that are adjacent to an empty
voxel, where adjacency is controlled by `structure`.
Parameters
----------
structure: adjacency structure. If None, square connectivity is used.
Returns
----------
self after replacing encoding with a surface version.
"""
self.encoding = morphology.surface(self.encoding)
return self
@caching.cache_decorator
def marching_cubes(self):
"""
A marching cubes Trimesh representation of the voxels.
No effort was made to clean or smooth the result in any way;
it is merely the result of applying the scikit-image
measure.marching_cubes function to self.encoding.dense.
Returns
---------
meshed: Trimesh object representing the current voxel
object, as returned by marching cubes algorithm.
"""
meshed = ops.matrix_to_marching_cubes(matrix=self.matrix)
return meshed
@property
def matrix(self):
"""
Return a DENSE matrix of the current voxel encoding
Returns
-------------
dense : (a, b, c) bool
Numpy array of dense matrix
Shortcut to voxel.encoding.dense
"""
return self.encoding.dense
@caching.cache_decorator
def volume(self):
"""
What is the volume of the filled cells in the current voxel object.
Returns
---------
volume: float, volume of filled cells
"""
return self.filled_count * self.element_volume
@caching.cache_decorator
def points(self):
"""
The center of each filled cell as a list of points.
Returns
----------
points: (self.filled, 3) float, list of points
"""
return self._transform.transform_points(
self.sparse_indices.astype(float))
@property
def sparse_indices(self):
"""(n, 3) int array of sparse indices of occupied voxels."""
return self.encoding.sparse_indices
def as_boxes(self, colors=None, **kwargs):
"""
A rough Trimesh representation of the voxels with a box
for each filled voxel.
Parameters
----------
colors : (3,) or (4,) float or uint8
(X, Y, Z, 3) or (X, Y, Z, 4) float or uint8
Where matrix.shape == (X, Y, Z)
Returns
---------
mesh : trimesh.Trimesh
Mesh with one box per filled cell.
"""
if colors is not None:
colors = np.asanyarray(colors)
if colors.ndim == 4:
encoding = self.encoding
if colors.shape[:3] == encoding.shape:
# TODO jackd: more efficient implementation?
# encoding.as_mask?
colors = colors[encoding.dense]
else:
log.warning('colors incorrect shape!')
colors = None
elif colors.shape not in ((3,), (4,)):
log.warning('colors incorrect shape!')
colors = None
mesh = ops.multibox(
centers=self.sparse_indices.astype(float), colors=colors)
mesh = mesh.apply_transform(self.transform)
return mesh
def points_to_indices(self, points):
"""
Convert points to indices in the matrix array.
Parameters
----------
points: (n, 3) float, point in space
Returns
---------
indices: (n, 3) int array of indices into self.encoding
"""
points = self._transform.inverse_transform_points(points)
return np.round(points).astype(int)
def indices_to_points(self, indices):
return self._transform.transform_points(indices.astype(float))
def show(self, *args, **kwargs):
"""
Convert the current set of voxels into a trimesh for visualization
and show that via its built- in preview method.
"""
return self.as_boxes(kwargs.pop(
'colors', None)).show(*args, **kwargs)
def copy(self):
return VoxelGrid(self.encoding.copy(),
self._transform.matrix.copy())
def revoxelized(self, shape):
"""
Create a new VoxelGrid without rotations, reflections or shearing.
Parameters
----------
shape: 3-tuple of ints denoting the shape of the returned VoxelGrid.
Returns
----------
VoxelGrid of the given shape with (possibly non-uniform) scale and
translation transformation matrix.
"""
from .. import util
shape = tuple(shape)
bounds = self.bounds.copy()
extents = self.extents
points = util.grid_linspace(bounds, shape).reshape(shape + (3,))
dense = self.is_filled(points)
scale = extents / np.asanyarray(shape)
translate = bounds[0]
return VoxelGrid(
dense,
transform=tr.scale_and_translate(scale, translate))
def __add__(self, other):
raise NotImplementedError("TODO : implement voxel concatenation")
|
|
"""
Converts a "TCGA format" tab-delimited ASCII feature matrix into
the binary format consumed by the pairwise executable.
Basically this just carries out a number of inspections, validations,
and conversions on each row of the data that would otherwise
1. be done redundantly in the C executable
2. and would be more error-prone to implement in C.
The most important aspects of this format are:
1. Every value in the original matrix is converted to a 4-byte
float or integer value in the result
2. Every row in the binary result is prefixed by a 32-bit word
that conveys attributes of that row to the pairwise
executable (the compiled C code, that is).
Row prefixes have the following format:
From the least to the most significant bit we have:
- The low-order 16 bits are the count of NAs
- The third byte is the category count for categorical variables.
- The high byte is unused
Data with a category count of 'one' is implicitly numeric.
Pictorially...
---------------------------------------
3322 2222 2222 1111 1111 1100 0000 0000
1098 7654 3210 9876 5432 1098 7654 3210
uuuu uuuD cccc cccC nnnn nnnn nnnn nnnn
---------------------------------------
u unused
D discrete flag
1 => binary/categorical variable
0 => numeric/scalar variable
c|C count of categories OR pseudo-count of numeric values
if D==0, then
C==0 => "OK"
C==1 => numeric variable is a constant vector (excluding NA's)
if D==1, then 'cccc cccC' is category count
n count of missing values
Notice that an intentional implication of the above definitions is
that if the 'cccc cccC' field == 1, the feature is degenerate
regardless of its type.
Note that the column count refers to DATA columns, excluding the
(currently) 32-bit row prefixes. The row headers are excluded
because they might change size in the future in which case
treating the row headers as a column would invalidate calculations
in associated C code since byte widths might not agree.
The overall format of the resulting file is:
1) HEADER
2) MATRIX
3) STRINGS
4) ROWMAP
...and STRINGS and ROWMAP are optional
Just FYI, in the outer 'with' clause at the bottom, the overall
call structure of the program is:
_verifyAbsence
_process
_processNumeric
_processCategorical
_appendStringTable (optionally)
"""
import sys
from struct import pack
import os.path
import subprocess
import optparse
assert sys.version_info.major >= 3
_SIG = "PAIRWISE"
_NAN_AS_UINT = 0x7FC00000
_NAN_AS_FLOAT = float('nan')
_NAN_STRINGS = frozenset(['na','nA','Na','NA'])
_SECTOR_SIZE = 512
_MAX_LEVELS = 32
parser = optparse.OptionParser( usage="usage: %prog [options]")
parser.add_option("-n","--no-names", action="store_false", dest="include_string_table",
default=True,
help="""Do not include row names in the binary file.
Excluding row names precludes feature lookup by name.
""")
parser.add_option("-H","--noheader", action="store_true", dest="noheader",
default=False,
help="""Do not expect a header line in the input file.
""")
parser.add_option("-O","--overwrite", action="store_true", dest="overwrite",
default=False,
help="""Overwrite previous existing matrix/map files (if any).
""")
parser.add_option("-C","--cat2num", action="store_true", dest="implicitNumeric",
default=False,
help="""Implicitly convert categorical features with >{} levels to numeric.
""".format( _MAX_LEVELS ) )
def _verifyAbsence( fname ):
if os.path.exists( fname ):
print( "{} exists, and I won't overwrite it!".format( fname ) )
sys.exit(-1)
def _processNumeric( fields, NC, fp, fnum, met ):
FORMAT = "I{}f".format(NC)
# Analyze the content of fields as strings before transforming it.
# Regarding the non-missing values...
PRESENT = [ float(v) for v in fields if v.upper() != "NA" ]
# ...are they all identical, meaning degenerate? And if there's only
# 1 non-missing, yes, that is considered degenerate, too!
isdegen = len(PRESENT) < 2 or all( [ PRESENT[0]==v for v in PRESENT[1:]] )
NA = len(fields) - len(PRESENT)
print( "NA", fnum, NA, sep="\t", file=met )
f = [ _NAN_AS_FLOAT if v.upper()=="NA" else float(v) for v in fields ]
b = pack( FORMAT, ( (0x00010000 if isdegen else 0) | (0x0000FFFF & NA) ), *f )
if fp.write( b ) != (NC+1)*4:
sys.exit( b )
return len(f)
def _processCategorical( fields, NC, fp, fnum, met, convertCatToNum=False ):
"""
Be aware of categorical features that actually are not!...
"""
FORMAT = "I{}I".format(NC)
NA = len([ 1 for v in fields if v.upper()=="NA" ])
print( "NA", fnum, NA, sep="\t", file=met )
S = set(fields).difference( _NAN_STRINGS )
# Special-case genuinely boolean categories (without labels)...
if S == frozenset(['0','1']):
f = [ _NAN_AS_UINT if v.upper()=="NA" else int(v) for v in fields ]
b = pack( FORMAT, ( 0x01020000 | (0x0000FFFF & NA) ), *f )
if fp.write( b ) != (NC+1)*4:
sys.exit( b )
NF = len(f)
elif len(S) <= _MAX_LEVELS:
S = sorted(list(S))
f = [ _NAN_AS_UINT if v.upper()=="NA" else S.index(v) for v in fields ]
b = pack( FORMAT,
( 0x01000000 | (0x00FF0000 & (len(S)<<16)) | (0x0000FFFF & NA) ), *f )
if fp.write( b ) != (NC+1)*4:
sys.exit( b )
# Write out the encoding
print( "FE\t{}\t{}".format( fnum, len(S) ), file=met )
for i in range(len(S)):
print( "CA\t{}\t{}".format(i,S[i]), file=met )
NF = len(f)
elif convertCatToNum:
# There are too many categories, and implicit conversion has been
# requested, so delegate this to _processNumeric with the data...
if all([ "NA"==v.upper() or v.isdigit() for v in fields ]):
# ...unchanged, if the labels are all integers (or NA), or...
NF = _processNumeric( fields, NC, fp, fnum, met )
conversionType = "natural"
else:
# ...converted to ARBITRARY integers if ANY of the labels
# are not already integers.
S = sorted(list(S))
NF = _processNumeric(
[ "NA" if v.upper()=="NA" else str(S.index(v)) for v in fields ],
NC, fp, fnum, met )
conversionType = "arbitrary"
# ...and warn in metadata that implicit conversion has occurred.
print( "IC\t{}\tcategorical -> numeric ({} order)".format( fnum, conversionType ), file=met )
else:
raise RuntimeError(
"error: factor with too many levels ({}).\n\tMaybe try the --cat2num option?".format(len(S)) )
return NF
def _process( idat, odat, ometa, expectheader=True, implicitNumeric=False ):
"""
Binarized matrix is written to odat.
Textual category encodings are written to fp_enc.
Returns number of DATA rows and DATA columns.
"""
loff = 0 # Feature offset and line offset need not correspond...
feat = 0 # ...because of comment lines.
rows = []
NC = 0
# Assume the first line is a header, but check for expected content.
if expectheader:
line = idat.readline()
fields = line.strip().split('\t') # both leading AND trailing ws.
NC = len(fields)-1
loff += 1
# Remaining lines are data.
line = idat.readline()
while len(line) > 0:
fields = line.strip().split('\t') # both leading AND trailing ws.
ty = line[0].upper()
rowname = fields[0] # Strip the TCGA label off the front...
datcols = fields[1:] # ...so NC remainder are data.
nc = len(datcols)
if NC == 0:
NC = nc # if a header missing, 1st data line sets column count.
elif NC != nc: # if column count doesn't match that from header.
raise RuntimeError(
"{} data columns on line {}, expected".format(
nc, loff+1, NC ) )
if 'N' == ty: # check most common first!
_processNumeric( datcols, nc, odat, feat, ometa )
elif 'B' == ty or 'C' == ty:
try:
_processCategorical( datcols, nc, odat, feat, ometa, implicitNumeric )
except Exception as x:
raise RuntimeError( "{} in row {}".format( x, rowname ) )
else:
raise RuntimeError("unexpected type at line "+str(loff+1) )
rows.append( [ rowname, feat ] )
feat += 1
loff += 1
line = idat.readline()
return ( nc, rows )
def _appendStringTable( strings, fp ):
"""
This appends
1) a packed string table (ragged array) sorted by string value and
2) a map of (string_offset,matrix_row_offset) pairs
...to the end of the file.
"""
# Pad the file to a 16-byte boundary (just to simplify debugging in
# hexdump).
NUL = bytes(1)
while fp.tell() % 16:
fp.write( NUL )
# This is where the string section will reside, so note the offset...
OFF_STR = fp.tell()
# Store the strings, noting their offsets FROM THE BASE of the section
# and after each is stored, REPLACE it in the pair with its offset.
ordered = sorted( strings, key=lambda x:x[0] )
for pair in ordered:
OFF = fp.tell() - OFF_STR
s = pair[0].encode('ascii')
fp.write( pack( "{}sc".format(len(s)), s, NUL ) )
pair[0] = OFF # REPLACE the string with its offset.
# ...pad to a disk sector size boundary. (See notes at top for why.)
while fp.tell() % _SECTOR_SIZE:
fp.write( NUL )
# This is where the row map will reside, so note its file offset.
OFF_MAP = fp.tell()
for pair in ordered:
fp.write( pack( "II", pair[0], pair[1] ) ) # string offset, matrix row
return ( OFF_STR, OFF_MAP )
############################################################################
opts,args = parser.parse_args()
INPUT = args[0]
OUTPUT = os.path.splitext(INPUT)[0] + ".bin"
CATEGS = os.path.splitext(INPUT)[0] + ".cat"
if not opts.overwrite:
_verifyAbsence( OUTPUT )
_verifyAbsence( CATEGS )
with open( INPUT ) as fp_in:
# Calculate the input matrix' MD5 hash to provide a hard tie to the
# resulting output matrix' source.
p = subprocess.Popen( [ "md5sum", INPUT ], stdout=subprocess.PIPE )
if p:
md5 = p.communicate()[0][:32].decode()
else:
raise RuntimeError("Is md5sum on your system?")
try:
with open( OUTPUT, "wb" ) as fp_out:
# Write out the header used by the C allpairs. Most of the values
# are unknown at this point, so we write 0's as placeholders then
# backup and fill them in at the very end when values are known.
SIGNATURE_BYTES = _SIG.encode('ascii')
HDR_SIZE = fp_out.write( pack(
"8sIIIIII32s",
SIGNATURE_BYTES,
0, # header size
0, # rows
0, # columns (of data)
0, # Offset to strings (0 => no row map/string table)
0, # Offset to row map (0 => no row map/string table)
0, # Unused
md5.encode('ascii') ) )
assert HDR_SIZE == 64
# Open another file to receive (textual) metadata.
with open( CATEGS, "w" ) as fp_cat:
print( "# for matrix {} with MD5 {}".format( INPUT, md5 ), file=fp_cat )
# ...and here begins the processing in earnest.
data_column_count,row_names = \
_process( fp_in, fp_out, fp_cat, not opts.noheader, opts.implicitNumeric )
if opts.include_string_table:
off_str, off_map = _appendStringTable( row_names, fp_out )
else:
off_str, off_map = 0,0
if off_str > 0xFFFFFFFF or off_map > 0xFFFFFFFF:
raise RuntimeError( "File too large" )
# Now back up to header and fill in header size, rows, and columns.
fp_out.seek( len(SIGNATURE_BYTES) )
fp_out.write( pack( "IIIII",
HDR_SIZE,
len(row_names),
data_column_count,
off_str,
off_map ) )
except Exception as x:
print( "Failed processing file: {}".format(INPUT), file=sys.stderr )
print( sys.exc_info() )
# In the event of failure, don't leave ANYTHING behind that might
# allow a parent script to continue.
if os.path.isfile( OUTPUT ):
print( "Removing", OUTPUT, file=sys.stderr )
os.remove( OUTPUT )
if os.path.isfile( CATEGS ):
print( "Removing", CATEGS, file=sys.stderr )
os.remove( CATEGS )
if __debug__:
print( HDR_SIZE, "header bytes" ) # just informational/sanity check
|
|
""" Testing DKI microstructure """
import numpy as np
import random
import dipy.reconst.dki_micro as dki_micro
from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
assert_, assert_raises)
from dipy.sims.voxel import (multi_tensor_dki, _check_directions, multi_tensor)
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.data import get_fnames
from dipy.reconst.dti import (eig_from_lo_tri)
from dipy.data import default_sphere, get_sphere
fimg, fbvals, fbvecs = get_fnames('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
# 2 shells for techniques that requires multishell data
bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
gtab_2s = gradient_table(bvals_2s, bvecs_2s)
# single fiber simulate (which is the assumption of our model)
FIE = np.array([[[0.30, 0.32], [0.74, 0.51]],
[[0.47, 0.21], [0.80, 0.63]]])
RDI = np.zeros((2, 2, 2))
ADI = np.array([[[1e-3, 1.3e-3], [0.8e-3, 1e-3]],
[[0.9e-3, 0.99e-3], [0.89e-3, 1.1e-3]]])
ADE = np.array([[[2.2e-3, 2.3e-3], [2.8e-3, 2.1e-3]],
[[1.9e-3, 2.5e-3], [1.89e-3, 2.1e-3]]])
Tor = np.array([[[2.6, 2.4], [2.8, 2.1]],
[[2.9, 2.5], [2.7, 2.3]]])
RDE = ADE / Tor
# prepare simulation:
DWIsim = np.zeros((2, 2, 2, gtab_2s.bvals.size))
# Diffusion microstructural model assumes that signal does not have Taylor
# approximation components larger than the fourth order. Thus parameter
# estimates are only equal to the ground truth values of the simulation
# if signals taylor components larger than the fourth order are removed.
# Signal without this taylor components can be generated using the
# multi_tensor_dki simulations. Therefore we used this function to test the
# expected estimates of the model.
DWIsim_all_taylor = np.zeros((2, 2, 2, gtab_2s.bvals.size))
# Signal with all taylor components can be simulated using the function
# multi_tensor. Generating this signals will be useful to test the prediction
# procedures of DKI-based microstructural model.
for i in range(2):
for j in range(2):
for k in range(2):
ADi = ADI[i, j, k]
RDi = RDI[i, j, k]
ADe = ADE[i, j, k]
RDe = RDE[i, j, k]
fie = FIE[i, j, k]
mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
frac = [fie*100, (1 - fie)*100]
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals,
angles=angles,
fractions=frac, snr=None)
DWIsim[i, j, k, :] = signal
signal, sticks = multi_tensor(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
DWIsim_all_taylor[i, j, k, :] = signal
def test_single_fiber_model():
# single fiber simulate (which is the assumption of our model)
fie = 0.49
ADi = 0.00099
ADe = 0.00226
RDi = 0
RDe = 0.00087
# prepare simulation:
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
frac = [fie*100, (1 - fie)*100]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
# DKI fit
dkiM = dki_micro.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
dkiF = dkiM.fit(signal)
# Axonal Water Fraction
AWF = dki_micro.axonal_water_fraction(dkiF.model_params, default_sphere,
mask=None, gtol=1e-5)
assert_almost_equal(AWF, fie)
# Extra-cellular and intra-cellular components
edt, idt = dki_micro.diffusion_components(dkiF.model_params,
default_sphere)
EDT = eig_from_lo_tri(edt)
IDT = eig_from_lo_tri(idt)
# check eigenvalues
assert_array_almost_equal(EDT[0:3], np.array([ADe, RDe, RDe]))
assert_array_almost_equal(IDT[0:3], np.array([ADi, RDi, RDi]))
# first eigenvalue should be the direction of the fibers
fiber_direction = _check_directions([(theta, phi)])
f_norm = abs(np.dot(fiber_direction, np.array((EDT[3], EDT[6], EDT[9]))))
assert_almost_equal(f_norm, 1.)
f_norm = abs(np.dot(fiber_direction, np.array((IDT[3], IDT[6], IDT[9]))))
assert_almost_equal(f_norm, 1.)
# Test model and fit objects
wmtiM = dki_micro.KurtosisMicrostructureModel(gtab_2s, fit_method="WLS")
wmtiF = wmtiM.fit(signal)
assert_almost_equal(wmtiF.awf, AWF)
assert_array_almost_equal(wmtiF.hindered_evals,
np.array([ADe, RDe, RDe]))
assert_array_almost_equal(wmtiF.restricted_evals,
np.array([ADi, RDi, RDi]))
assert_almost_equal(wmtiF.hindered_ad, ADe)
assert_almost_equal(wmtiF.hindered_rd, RDe)
assert_almost_equal(wmtiF.axonal_diffusivity, ADi)
assert_almost_equal(wmtiF.tortuosity, ADe/RDe, decimal=4)
# Test diffusion_components when a kurtosis tensors is associated with
# negative kurtosis values. E.g of this cases is given below:
dkiparams = np.array([1.67135726e-03, 5.03651205e-04, 9.35365328e-05,
-7.11167583e-01, 6.23186820e-01, -3.25390313e-01,
-1.75247376e-02, -4.78415563e-01, -8.77958674e-01,
7.02804064e-01, 6.18673368e-01, -3.51154825e-01,
2.18384153, -2.76378153e-02, 2.22893297,
-2.68306546e-01, -1.28411610, -1.56557645e-01,
-1.80850619e-01, -8.33152110e-01, -3.62410766e-01,
1.57775442e-01, 8.73775381e-01, 2.77188975e-01,
-3.67415502e-02, -1.56330984e-01, -1.62295407e-02])
edt, idt = dki_micro.diffusion_components(dkiparams)
assert_(np.all(np.isfinite(edt)))
def test_wmti_model_multi_voxel():
# DKI fit
dkiM = dki_micro.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
dkiF = dkiM.fit(DWIsim)
# Axonal Water Fraction
sphere = get_sphere()
AWF = dki_micro.axonal_water_fraction(dkiF.model_params, sphere, mask=None,
gtol=1e-5)
assert_almost_equal(AWF, FIE)
# Extra-cellular and intra-cellular components
edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere)
EDT = eig_from_lo_tri(edt)
IDT = eig_from_lo_tri(idt)
# check eigenvalues
assert_array_almost_equal(EDT[..., 0], ADE, decimal=3)
assert_array_almost_equal(EDT[..., 1], RDE, decimal=3)
assert_array_almost_equal(EDT[..., 2], RDE, decimal=3)
assert_array_almost_equal(IDT[..., 0], ADI, decimal=3)
assert_array_almost_equal(IDT[..., 1], RDI, decimal=3)
assert_array_almost_equal(IDT[..., 2], RDI, decimal=3)
# Test methods performance when a signal with all zeros is present
FIEc = FIE.copy()
RDIc = RDI.copy()
ADIc = ADI.copy()
ADEc = ADE.copy()
Torc = Tor.copy()
RDEc = RDE.copy()
DWIsimc = DWIsim.copy()
FIEc[0, 0, 0] = 0
RDIc[0, 0, 0] = 0
ADIc[0, 0, 0] = 0
ADEc[0, 0, 0] = 0
Torc[0, 0, 0] = 0
RDEc[0, 0, 0] = 0
DWIsimc[0, 0, 0, :] = 0
mask = np.ones((2, 2, 2))
mask[0, 0, 0] = 0
dkiF = dkiM.fit(DWIsimc)
awf = dki_micro.axonal_water_fraction(dkiF.model_params, sphere,
gtol=1e-5)
assert_almost_equal(awf, FIEc)
# Extra-cellular and intra-cellular components
edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere,
awf=awf)
EDT = eig_from_lo_tri(edt)
IDT = eig_from_lo_tri(idt)
assert_array_almost_equal(EDT[..., 0], ADEc, decimal=3)
assert_array_almost_equal(EDT[..., 1], RDEc, decimal=3)
assert_array_almost_equal(EDT[..., 2], RDEc, decimal=3)
assert_array_almost_equal(IDT[..., 0], ADIc, decimal=3)
assert_array_almost_equal(IDT[..., 1], RDIc, decimal=3)
assert_array_almost_equal(IDT[..., 2], RDIc, decimal=3)
# Check when mask is given
dkiF = dkiM.fit(DWIsim)
awf = dki_micro.axonal_water_fraction(dkiF.model_params, sphere,
gtol=1e-5, mask=mask)
assert_almost_equal(awf, FIEc, decimal=3)
# Extra-cellular and intra-cellular components
edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere,
awf=awf, mask=mask)
EDT = eig_from_lo_tri(edt)
IDT = eig_from_lo_tri(idt)
assert_array_almost_equal(EDT[..., 0], ADEc, decimal=3)
assert_array_almost_equal(EDT[..., 1], RDEc, decimal=3)
assert_array_almost_equal(EDT[..., 2], RDEc, decimal=3)
assert_array_almost_equal(IDT[..., 0], ADIc, decimal=3)
assert_array_almost_equal(IDT[..., 1], RDIc, decimal=3)
assert_array_almost_equal(IDT[..., 2], RDIc, decimal=3)
# Check class object
wmtiM = dki_micro.KurtosisMicrostructureModel(gtab_2s, fit_method="WLS")
wmtiF = wmtiM.fit(DWIsim, mask=mask)
assert_almost_equal(wmtiF.awf, FIEc, decimal=3)
assert_almost_equal(wmtiF.axonal_diffusivity, ADIc, decimal=3)
assert_almost_equal(wmtiF.hindered_ad, ADEc, decimal=3)
assert_almost_equal(wmtiF.hindered_rd, RDEc, decimal=3)
assert_almost_equal(wmtiF.tortuosity, Torc, decimal=3)
def test_dki_micro_predict_single_voxel():
# single fiber simulate (which is the assumption of our model)
fie = 0.49
ADi = 0.00099
ADe = 0.00226
RDi = 0
RDe = 0.00087
# prepare simulation:
theta = random.uniform(0, 180)
phi = random.uniform(0, 320)
angles = [(theta, phi), (theta, phi)]
mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
frac = [fie*100, (1 - fie)*100]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
signal_gt, da = multi_tensor(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
# Defined DKI microstrutural model
dkiM = dki_micro.KurtosisMicrostructureModel(gtab_2s)
# Fit single voxel signal
dkiF = dkiM.fit(signal)
# Check predict of KurtosisMicrostruturalModel
pred = dkiM.predict(dkiF.model_params)
assert_array_almost_equal(pred, signal_gt, decimal=4)
pred = dkiM.predict(dkiF.model_params, S0=100)
assert_array_almost_equal(pred, signal_gt * 100, decimal=4)
# Check predict of KurtosisMicrostruturalFit
pred = dkiF.predict(gtab_2s, S0=100)
assert_array_almost_equal(pred, signal_gt * 100, decimal=4)
def test_dki_micro_predict_multi_voxel():
dkiM = dki_micro.KurtosisMicrostructureModel(gtab_2s)
dkiF = dkiM.fit(DWIsim)
# Check predict of KurtosisMicrostruturalModel
pred = dkiM.predict(dkiF.model_params)
assert_array_almost_equal(pred, DWIsim_all_taylor, decimal=3)
pred = dkiM.predict(dkiF.model_params, S0=100)
assert_array_almost_equal(pred, DWIsim_all_taylor * 100, decimal=3)
# Check predict of KurtosisMicrostruturalFit
pred = dkiF.predict(gtab_2s, S0=100)
assert_array_almost_equal(pred, DWIsim_all_taylor * 100, decimal=3)
def _help_test_awf_only(dkimicrofit, string):
exec(string)
def test_dki_micro_awf_only():
dkiM = dki_micro.KurtosisMicrostructureModel(gtab_2s)
dkiF = dkiM.fit(DWIsim, awf_only=True)
awf = dkiF.awf
assert_almost_equal(awf, FIE, decimal=3)
# assert_raises(dkiF.hindered_evals)
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.hindered_evals')
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.restricted_evals')
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.axonal_diffusivity')
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.hindered_ad')
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.hindered_rd')
assert_raises(ValueError, _help_test_awf_only, dkiF,
'dkimicrofit.tortuosity')
def additional_tortuosity_tests():
# Test tortuosity when rd is zero
# single voxel
t = dki_micro.tortuosity(1.7e-3, 0.0)
assert_almost_equal(t, 0.0)
# multi-voxel
RDEc = RDE.copy()
Torc = Tor.copy()
RDEc[1, 1, 1] = 0.0
Torc[1, 1, 1] = 0.0
t = dki_micro.tortuosity(ADE, RDEc)
assert_almost_equal(Torc, t)
|
|
"""
Django settings for gge_storage project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..'))
from ConfigParser import RawConfigParser
config = RawConfigParser()
config_file = os.path.expanduser('~/.gge_storage/settings.ini')
if os.path.exists(config_file):
config.read(os.path.expanduser(config_file))
else:
config.read(os.path.join((ROOT_DIR, 'settings-example.ini')))
ADMINS = tuple(config.items('error mail'))
MANAGERS = tuple(config.items('404 mail'))
DEBUG = config.getboolean('debug', 'DEBUG')
SECRET_KEY = config.get('secrets', 'SECRET_KEY')
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
SITE_ID = 1
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sites',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
#'djangocms_file',
#'djangocms_flash',
#'djangocms_googlemap',
#'djangocms_inherit',
#'djangocms_picture',
#'djangocms_teaser',
#'djangocms_video',
# 'djangocms_link',
#'djangocms_snippet',
#'djangocms_grid',
#'djangocms_column',
'djangocms_text_ckeditor',
'cms',
'menus',
'reversion',
'djangocms_admin_style',
'mptt',
'easy_thumbnails',
'filer',
'djcelery',
'lib.socket',
'lib.proxy',
'lib.messaging',
'pushover',
'gge_proxy_manager',
'frontend',
'authentication',
'intern',
'npc',
'templated_forms',
'sekizai',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.middleware.common.CommonMiddleware',
'intern.middlewares.player.EnrichPlayerMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.i18n',
'django.core.context_processors.request',
'django.core.context_processors.media',
'django.core.context_processors.static',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
)
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
#'easy_thumbnails.processors.scale_and_crop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
MIGRATION_MODULES = {
'filer': 'filer.migrations_django',
'cmsplugin_filer_file': 'cmsplugin_filer_file.migrations_django',
'cmsplugin_filer_folder': 'cmsplugin_filer_folder.migrations_django',
'cmsplugin_filer_image': 'cmsplugin_filer_image.migrations_django',
'cmsplugin_filer_teaser': 'cmsplugin_filer_teaser.migrations_django',
'cmsplugin_filer_video': 'cmsplugin_filer_video.migrations_django',
'cms': 'cms.migrations_django',
'menus': 'menus.migrations_django',
# Add also the following modules if you're using these plugins:
'djangocms_file': 'djangocms_file.migrations_django',
'djangocms_flash': 'djangocms_flash.migrations_django',
'djangocms_googlemap': 'djangocms_googlemap.migrations_django',
'djangocms_inherit': 'djangocms_inherit.migrations_django',
'djangocms_link': 'djangocms_link.migrations_django',
'djangocms_picture': 'djangocms_picture.migrations_django',
'djangocms_snippet': 'djangocms_snippet.migrations_django',
'djangocms_teaser': 'djangocms_teaser.migrations_django',
'djangocms_video': 'djangocms_video.migrations_django',
'djangocms_text_ckeditor': 'djangocms_text_ckeditor.migrations_django',
}
ROOT_URLCONF = 'gge_storage.urls'
WSGI_APPLICATION = 'gge_storage.wsgi.application'
CMS_TEMPLATES = (
('_base.html', 'Base Layout'),
)
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': config.get('database', 'DATABASE_ENGINE'),
'NAME': config.get('database', 'DATABASE_NAME'),
'USER': config.get('database', 'DATABASE_USER'),
'PASSWORD': config.get('database', 'DATABASE_PASSWORD'),
'HOST': config.get('database', 'DATABASE_HOST')
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGES = (
('de', 'German'),
)
LANGUAGE_CODE = 'de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
INTERNAL_IPS = tuple(config.get('debug', 'INTERNAL_IPS').split())
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, '../static'))
STATIC_URL = config.get('common', 'STATIC_URL')
#STATICFILES_DIRS = (
# os.path.join(ROOT_DIR, "static_basic"),
#)
MEDIA_ROOT = os.path.abspath(os.path.join(BASE_DIR, '../media'))
MEDIA_URL = '/media/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "../templates"),
)
LOGIN_URL = '/auth/login'
LOGIN_REDIRECT_URL = '/intern'
SOCKET_MIDDLEWARE_CLASSES = (
'lib.socket.middleware.auth.AuthMiddleware',
'lib.socket.middleware.game_recognition.GameRecognitionMiddleware',
'lib.socket.middleware.global_data.GlobalDataMiddleware',
'lib.socket.middleware.collector.CollectorMiddleware',
'lib.socket.middleware.goods_count.GoodsCollectedMiddleware',
'lib.socket.middleware.resource_count.ResourceCountMiddleware',
'lib.socket.middleware.logistic_job.LogisticJobMiddleware',
'lib.socket.middleware.ping.PingMiddleware',
'lib.socket.middleware.resource_key.ResourceKeyMiddleware',
'lib.socket.middleware.production.ProductionMiddleware',
'lib.socket.middleware.geotracking.GeotrackingMiddleware',
'lib.socket.middleware.afk.AfkMiddleware',
'lib.socket.middleware.alliance.AllianceMiddleware',
'lib.socket.middleware.attack.AttackLogMiddleware',
'lib.socket.middleware.map_explorer.MapExplorerMiddleware',
'lib.socket.middleware.castle_economy.CastleEconomyMiddleware',
'lib.socket.middleware.log.LogMiddleware',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
#'django': {
# 'handlers': ['null'],
# 'propagate': True,
# 'level': 'INFO',
#},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'gge_proxy_manager': {
'handlers': ['console'],
'level': 'DEBUG',
#'filters': ['special']
},
'lib': {
'handlers': ['console'],
'level': 'DEBUG',
#'filters': ['special']
}
}
}
# DEFAULT_FROM_EMAIL = '[email protected]'
# SERVER_EMAIL = DEFAULT_FROM_EMAIL
# EMAIL_HOST = 'post.bam.st'
# EMAIL_HOST_PASSWORD = 'Buwuta81Godafo66'
# EMAIL_HOST_USER = '[email protected]'
# EMAIL_SUBJECT_PREFIX = '[GGE] '
# EMAIL_USE_TLS = True
# EMAIL_USE_SSL = False
DEFAULT_FROM_EMAIL = config.get('email', 'FROM_EMAIL')
SERVER_EMAIL = config.get('email', 'SERVER_EMAIL')
EMAIL_HOST = config.get('email', 'EMAIL_HOST')
EMAIL_HOST_PASSWORD = config.get('email', 'EMAIL_PASSWORD')
EMAIL_HOST_USER = config.get('email', 'EMAIL_USER')
EMAIL_SUBJECT_PREFIX = config.get('email', 'SUBJECT_PREFIX')
EMAIL_USE_TLS = config.getboolean('email', 'USE_TLS')
EMAIL_USE_SSL = config.getboolean('email', 'USE_SSL')
# Celery
import djcelery
djcelery.setup_loader()
BROKER_URL = 'amqp://guest:guest@localhost:5672//'
CELERY_RESULT_BACKEND = BROKER_URL # 'database'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
NOTIFY_NEW_RUIN = {
"EmpireEx_2": (
),
"EmpirefourkingdomsExGG": (
),
}
PUSHOVER_APP_TOKEN = config.get('pushover', 'APP_TOKEN')
PUSHOVER_NEW_RUIN_TOKEN = config.get('pushover', 'NEW_RUIN_TOKEN')
LOGIN_SERVICE_URL = config.get('login service', 'URL')
LOGIN_SERVICE_SECRET = config.get('login service', 'SECRET')
DEPLOYMENT_ID = '@dev'
|
|
from __future__ import unicode_literals
import requests
import time
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
from decimal import Decimal
from .exceptions import (
PageError, DisambiguationError, RedirectError, HTTPTimeoutError,
WikipediaException, ODD_ERROR_MESSAGE)
from .util import cache, stdout_encode, debug
import re
API_URL = 'http://en.wikipedia.org/w/api.php'
RATE_LIMIT = False
RATE_LIMIT_MIN_WAIT = None
RATE_LIMIT_LAST_CALL = None
USER_AGENT = 'wikipedia (https://github.com/goldsmith/Wikipedia/)'
def set_lang(prefix):
'''
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
'''
global API_URL
API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php'
for cached_func in (search, suggest, summary):
cached_func.clear_cache()
def set_user_agent(user_agent_string):
'''
Set the User-Agent string to be used for all requests.
Arguments:
* user_agent_string - (string) a string specifying the User-Agent header
'''
global USER_AGENT
USER_AGENT = user_agent_string
def set_rate_limiting(rate_limit, min_wait=timedelta(milliseconds=50)):
'''
Enable or disable rate limiting on requests to the Mediawiki servers.
If rate limiting is not enabled, under some circumstances (depending on
load on Wikipedia, the number of requests you and other `wikipedia` users
are making, and other factors), Wikipedia may return an HTTP timeout error.
Enabling rate limiting generally prevents that issue, but please note that
HTTPTimeoutError still might be raised.
Arguments:
* rate_limit - (Boolean) whether to enable rate limiting or not
Keyword arguments:
* min_wait - if rate limiting is enabled, `min_wait` is a timedelta describing the minimum time to wait before requests.
Defaults to timedelta(milliseconds=50)
'''
global RATE_LIMIT
global RATE_LIMIT_MIN_WAIT
global RATE_LIMIT_LAST_CALL
RATE_LIMIT = rate_limit
if not rate_limit:
RATE_LIMIT_MIN_WAIT = None
else:
RATE_LIMIT_MIN_WAIT = min_wait
RATE_LIMIT_LAST_CALL = None
@cache
def search(query, results=10, suggestion=False):
'''
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
'''
search_params = {
'list': 'search',
'srprop': '',
'srlimit': results,
'limit': results,
'srsearch': query
}
if suggestion:
search_params['srinfo'] = 'suggestion'
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError(query)
else:
raise WikipediaException(raw_results['error']['info'])
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return list(search_results), raw_results['query']['searchinfo']['suggestion']
else:
return list(search_results), None
return list(search_results)
@cache
def geosearch(latitude, longitude, title=None, results=10, radius=1000):
'''
Do a wikipedia geo search for `latitude` and `longitude`
using HTTP API described in http://www.mediawiki.org/wiki/Extension:GeoData
Arguments:
* latitude (float or decimal.Decimal)
* longitude (float or decimal.Decimal)
Keyword arguments:
* title - The title of an article to search for
* results - the maximum number of results returned
* radius - Search radius in meters. The value must be between 10 and 10000
'''
search_params = {
'list': 'geosearch',
'gsradius': radius,
'gscoord': '{0}|{1}'.format(latitude, longitude),
'gslimit': results
}
if title:
search_params['titles'] = title
raw_results = _wiki_request(search_params)
if 'error' in raw_results:
if raw_results['error']['info'] in ('HTTP request timed out.', 'Pool queue is full'):
raise HTTPTimeoutError('{0}|{1}'.format(latitude, longitude))
else:
raise WikipediaException(raw_results['error']['info'])
search_pages = raw_results['query'].get('pages', None)
if search_pages:
search_results = (v['title'] for k, v in search_pages.items() if k != '-1')
else:
search_results = (d['title'] for d in raw_results['query']['geosearch'])
return list(search_results)
@cache
def suggest(query):
'''
Get a Wikipedia search suggestion for `query`.
Returns a string or None if no suggestion was found.
'''
search_params = {
'list': 'search',
'srinfo': 'suggestion',
'srprop': '',
}
search_params['srsearch'] = query
raw_result = _wiki_request(search_params)
if raw_result['query'].get('searchinfo'):
return raw_result['query']['searchinfo']['suggestion']
return None
def random(pages=1):
'''
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
'''
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': 'random',
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(title, sentences=0, chars=0, auto_suggest=True, redirect=True):
'''
Plain text summary of the page.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default
Keyword arguments:
* sentences - if set, return the first `sentences` sentences (can be no greater than 10).
* chars - if set, return only the first `chars` characters (actual text returned may be slightly longer).
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
'''
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(title, auto_suggest=auto_suggest, redirect=redirect)
title = page_info.title
pageid = page_info.pageid
query_params = {
'prop': 'extracts',
'explaintext': '',
'titles': title
}
if sentences:
query_params['exsentences'] = sentences
elif chars:
query_params['exchars'] = chars
else:
query_params['exintro'] = ''
request = _wiki_request(query_params)
summary = request['query']['pages'][pageid]['extract']
return summary
def page(title=None, pageid=None, auto_suggest=True, redirect=True, preload=False):
'''
Get a WikipediaPage object for the page with title `title` or the pageid
`pageid` (mutually exclusive).
Keyword arguments:
* title - the title of the page to load
* pageid - the numeric pageid of the page to load
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
'''
if title is not None:
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
elif pageid is not None:
return WikipediaPage(pageid=pageid, preload=preload)
else:
raise ValueError("Either a title or a pageid must be specified")
class WikipediaPage(object):
'''
Contains data from a Wikipedia page.
Uses property methods to filter data from the raw HTML.
'''
def __init__(self, title=None, pageid=None, redirect=True, preload=False, original_title=''):
if title is not None:
self.title = title
self.original_title = original_title or title
elif pageid is not None:
self.pageid = pageid
else:
raise ValueError("Either a title or a pageid must be specified")
self.__load(redirect=redirect, preload=preload)
if preload:
for prop in ('content', 'summary', 'images', 'references', 'links', 'sections'):
getattr(self, prop)
def __repr__(self):
return stdout_encode(u'<WikipediaPage \'{}\'>'.format(self.title))
def __eq__(self, other):
try:
return (
self.pageid == other.pageid
and self.title == other.title
and self.url == other.url
)
except:
return False
def __load(self, redirect=True, preload=False):
'''
Load basic information from Wikipedia.
Confirm that page exists and is not a disambiguation/redirect.
Does not need to be called manually, should be called automatically during __init__.
'''
query_params = {
'prop': 'info|pageprops',
'inprop': 'url',
'ppprop': 'disambiguation',
'redirects': '',
}
if not getattr(self, 'pageid', None):
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
query = request['query']
pageid = list(query['pages'].keys())[0]
page = query['pages'][pageid]
# missing is present if the page is missing
if 'missing' in page:
if hasattr(self, 'title'):
raise PageError(self.title)
else:
raise PageError(pageid=self.pageid)
# same thing for redirect, except it shows up in query instead of page for
# whatever silly reason
elif 'redirects' in query:
if redirect:
redirects = query['redirects'][0]
if 'normalized' in query:
normalized = query['normalized'][0]
assert normalized['from'] == self.title, ODD_ERROR_MESSAGE
from_title = normalized['to']
else:
from_title = self.title
assert redirects['from'] == from_title, ODD_ERROR_MESSAGE
# change the title and reload the whole object
self.__init__(redirects['to'], redirect=redirect, preload=preload)
else:
raise RedirectError(getattr(self, 'title', page['title']))
# since we only asked for disambiguation in ppprop,
# if a pageprop is returned,
# then the page must be a disambiguation page
elif 'pageprops' in page:
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvparse': '',
'rvlimit': 1
}
if hasattr(self, 'pageid'):
query_params['pageids'] = self.pageid
else:
query_params['titles'] = self.title
request = _wiki_request(query_params)
html = request['query']['pages'][pageid]['revisions'][0]['*']
lis = BeautifulSoup(html).find_all('li')
filtered_lis = [li for li in lis if not 'tocsection' in ''.join(li.get('class', []))]
may_refer_to = [li.a.get_text() for li in filtered_lis if li.a]
raise DisambiguationError(getattr(self, 'title', page['title']), may_refer_to)
else:
self.pageid = pageid
self.title = page['title']
self.url = page['fullurl']
def __continued_query(self, query_params):
'''
Based on https://www.mediawiki.org/wiki/API:Query#Continuing_queries
'''
query_params.update(self.__title_query_param)
last_continue = {}
prop = query_params.get('prop', None)
while True:
params = query_params.copy()
params.update(last_continue)
request = _wiki_request(params)
if 'query' not in request:
break
pages = request['query']['pages']
if 'generator' in query_params:
for datum in pages.values(): # in python 3.3+: "yield from pages.values()"
yield datum
else:
for datum in pages[self.pageid][prop]:
yield datum
if 'continue' not in request:
break
last_continue = request['continue']
@property
def __title_query_param(self):
if getattr(self, 'title', None) is not None:
return {'titles': self.title}
else:
return {'pageids': self.pageid}
def html(self):
'''
Get full page HTML.
.. warning:: This can get pretty slow on long pages.
'''
if not getattr(self, '_html', False):
query_params = {
'prop': 'revisions',
'rvprop': 'content',
'rvlimit': 1,
'rvparse': '',
'titles': self.title
}
request = _wiki_request(query_params)
self._html = request['query']['pages'][self.pageid]['revisions'][0]['*']
return self._html
@property
def content(self):
'''
Plain text content of the page, excluding images, tables, and other data.
'''
if not getattr(self, '_content', False):
query_params = {
'prop': 'extracts|revisions',
'explaintext': '',
'rvprop': 'ids'
}
if not getattr(self, 'title', None) is None:
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
self._content = request['query']['pages'][self.pageid]['extract']
self._revision_id = request['query']['pages'][self.pageid]['revisions'][0]['revid']
self._parent_id = request['query']['pages'][self.pageid]['revisions'][0]['parentid']
return self._content
@property
def revision_id(self):
'''
Revision ID of the page.
The revision ID is a number that uniquely identifies the current
version of the page. It can be used to create the permalink or for
other direct API calls. See `Help:Page history
<http://en.wikipedia.org/wiki/Wikipedia:Revision>`_ for more
information.
'''
if not getattr(self, '_revid', False):
# fetch the content (side effect is loading the revid)
self.content
return self._revision_id
@property
def parent_id(self):
'''
Revision ID of the parent version of the current revision of this
page. See ``revision_id`` for more information.
'''
if not getattr(self, '_parentid', False):
# fetch the content (side effect is loading the revid)
self.content
return self._parent_id
@property
def summary(self):
'''
Plain text summary of the page.
'''
if not getattr(self, '_summary', False):
query_params = {
'prop': 'extracts',
'explaintext': '',
'exintro': '',
}
if not getattr(self, 'title', None) is None:
query_params['titles'] = self.title
else:
query_params['pageids'] = self.pageid
request = _wiki_request(query_params)
self._summary = request['query']['pages'][self.pageid]['extract']
return self._summary
@property
def images(self):
'''
List of URLs of images on the page.
'''
if not getattr(self, '_images', False):
self._images = [
page['imageinfo'][0]['url']
for page in self.__continued_query({
'generator': 'images',
'gimlimit': 'max',
'prop': 'imageinfo',
'iiprop': 'url',
})
if 'imageinfo' in page
]
return self._images
@property
def coordinates(self):
'''
Tuple of Decimals in the form of (lat, lon) or None
'''
if not getattr(self, '_coordinates', False):
query_params = {
'prop': 'coordinates',
'colimit': 'max',
'titles': self.title,
}
request = _wiki_request(query_params)
if 'query' in request:
coordinates = request['query']['pages'][self.pageid]['coordinates']
self._coordinates = (Decimal(coordinates[0]['lat']), Decimal(coordinates[0]['lon']))
else:
self._coordinates = None
return self._coordinates
@property
def references(self):
'''
List of URLs of external links on a page.
May include external links within page that aren't technically cited anywhere.
'''
if not getattr(self, '_references', False):
def add_protocol(url):
return url if url.startswith('http') else 'http:' + url
self._references = [
add_protocol(link['*'])
for link in self.__continued_query({
'prop': 'extlinks',
'ellimit': 'max'
})
]
return self._references
@property
def links(self):
'''
List of titles of Wikipedia page links on a page.
.. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
'''
if not getattr(self, '_links', False):
self._links = [
link['title']
for link in self.__continued_query({
'prop': 'links',
'plnamespace': 0,
'pllimit': 'max'
})
]
return self._links
@property
def categories(self):
'''
List of categories of a page.
'''
if not getattr(self, '_categories', False):
self._categories = [re.sub(r'^Category:', '', x) for x in
[link['title']
for link in self.__continued_query({
'prop': 'categories',
'cllimit': 'max'
})
]]
return self._categories
@property
def sections(self):
'''
List of section titles from the table of contents on the page.
'''
if not getattr(self, '_sections', False):
query_params = {
'action': 'parse',
'prop': 'sections',
}
query_params.update(self.__title_query_param)
request = _wiki_request(query_params)
self._sections = [section['line'] for section in request['parse']['sections']]
return self._sections
def section(self, section_title):
'''
Get the plain text content of a section from `self.sections`.
Returns None if `section_title` isn't found, otherwise returns a whitespace stripped string.
This is a convenience method that wraps self.content.
.. warning:: Calling `section` on a section that has subheadings will NOT return
the full text of all of the subsections. It only gets the text between
`section_title` and the next subheading, which is often empty.
'''
section = u"== {} ==".format(section_title)
try:
index = self.content.index(section) + len(section)
except ValueError:
return None
try:
next_index = self.content.index("==", index)
except ValueError:
next_index = len(self.content)
return self.content[index:next_index].lstrip("=").strip()
@cache
def languages():
'''
List all the currently supported language prefixes (usually ISO language code).
Can be inputted to `set_lang` to change the Mediawiki that `wikipedia` requests
results from.
Returns: dict of <prefix>: <local_lang_name> pairs. To get just a list of prefixes,
use `wikipedia.languages().keys()`.
'''
response = _wiki_request({
'meta': 'siteinfo',
'siprop': 'languages'
})
languages = response['query']['languages']
return {
lang['code']: lang['*']
for lang in languages
}
def donate():
'''
Open up the Wikimedia donate page in your favorite browser.
'''
import webbrowser
webbrowser.open('https://donate.wikimedia.org/w/index.php?title=Special:FundraiserLandingPage', new=2)
def _wiki_request(params):
'''
Make a request to the Wikipedia API using the given search parameters.
Returns a parsed dict of the JSON response.
'''
global RATE_LIMIT_LAST_CALL
global USER_AGENT
params['format'] = 'json'
if not 'action' in params:
params['action'] = 'query'
headers = {
'User-Agent': USER_AGENT
}
if RATE_LIMIT and RATE_LIMIT_LAST_CALL and \
RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT > datetime.now():
# it hasn't been long enough since the last API call
# so wait until we're in the clear to make the request
wait_time = (RATE_LIMIT_LAST_CALL + RATE_LIMIT_MIN_WAIT) - datetime.now()
time.sleep(int(wait_time.total_seconds()))
r = requests.get(API_URL, params=params, headers=headers)
if RATE_LIMIT:
RATE_LIMIT_LAST_CALL = datetime.now()
return r.json()
|
|
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import distutils.spawn
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
_quantized_inference_types = [_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]
# If the `inference_type` or the `inference_input_type` is the quantized type
# and it is not post training quantization, the input quantization stats is
# required.
def _requires_input_stats(toco_flags):
return ((toco_flags.inference_type in _quantized_inference_types or
toco_flags.inference_input_type in _quantized_inference_types) and
not toco_flags.post_training_quantize)
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
if distutils.spawn.find_executable(_toco_from_proto_bin) is None:
raise ConverterError("""Could not find toco_from_protos binary, make sure
your virtualenv bin directory or pip local bin directory is in your path.
In particular, if you have installed TensorFlow with --user, make sure you
add the install directory to your path.
For example:
Linux: export PATH=$PATH:~/.local/bin/
Mac: export PATH=$PATH:~/Library/Python/<version#>/bin
Alternative, use virtualenv.""")
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (None, None,
None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename
]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8, tf.int8}`. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization. Must be
`{tf.float32, tf.uint8, tf.int8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length as
`input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8` or `INT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver. (default
False)
custom_opdefs: List of strings representing custom ops OpDefs that are
included in the GraphDef. Required when using custom operations with the
MLIR-based converter. (default None)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy). (default False)
quantize_to_float16: Boolean indicating whether to convert float buffers to
float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet options
indicating which converter to use. (default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist or
are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if custom_opdefs:
toco.custom_opdefs.extend(custom_opdefs)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if _requires_input_stats(toco) and quantized_input_stats:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
# Create shapes with -1 for unknown dimensions.
dims = []
for dim in shape:
if (dim is None or
(isinstance(dim, tensor_shape.Dimension) and dim.value is None)):
dims.append(-1)
else:
dims.append(int(dim))
input_array.shape.dims.extend(dims)
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if _requires_input_stats(toco_flags):
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when inference_type "
"or inference_input_type is QUANTIZED_UINT8 or INT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs)
|
|
"""
This is the *abstract* django models for many of the database objects
in Evennia. A django abstract (obs, not the same as a Python metaclass!) is
a model which is not actually created in the database, but which only exists
for other models to inherit from, to avoid code duplication. Any model can
import and inherit from these classes.
Attributes are database objects stored on other objects. The implementing
class needs to supply a ForeignKey field attr_object pointing to the kind
of object being mapped. Attributes storing iterables actually store special
types of iterables named PackedList/PackedDict respectively. These make
sure to save changes to them to database - this is criticial in order to
allow for obj.db.mylist[2] = data. Also, all dbobjects are saved as
dbrefs but are also aggressively cached.
TypedObjects are objects 'decorated' with a typeclass - that is, the typeclass
(which is a normal Python class implementing some special tricks with its
get/set attribute methods, allows for the creation of all sorts of different
objects all with the same database object underneath. Usually attributes are
used to permanently store things not hard-coded as field on the database object.
The admin should usually not have to deal directly with the database object
layer.
This module also contains the Managers for the respective models; inherit from
these to create custom managers.
"""
from builtins import object
from django.db.models import signals
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from django.utils.encoding import smart_str
from evennia.typeclasses.attributes import Attribute, AttributeHandler, NAttributeHandler
from evennia.typeclasses.tags import Tag, TagHandler, AliasHandler, PermissionHandler
from evennia.utils.idmapper.models import SharedMemoryModel, SharedMemoryModelBase
from evennia.typeclasses import managers
from evennia.locks.lockhandler import LockHandler
from evennia.utils.utils import (
is_iter, inherits_from, lazy_property,
class_from_module)
from evennia.utils.logger import log_trace
from evennia.typeclasses.django_new_patch import patched_new
__all__ = ("TypedObject", )
TICKER_HANDLER = None
_PERMISSION_HIERARCHY = [p.lower() for p in settings.PERMISSION_HIERARCHY]
_TYPECLASS_AGGRESSIVE_CACHE = settings.TYPECLASS_AGGRESSIVE_CACHE
_GA = object.__getattribute__
_SA = object.__setattr__
#------------------------------------------------------------
#
# Typed Objects
#
#------------------------------------------------------------
#
# Meta class for typeclasses
#
# signal receivers. Assigned in __new__
def post_save(sender, instance, created, **kwargs):
"""
Receives a signal just after the object is saved.
"""
if created:
instance.at_first_save()
class TypeclassBase(SharedMemoryModelBase):
"""
Metaclass which should be set for the root of model proxies
that don't define any new fields, like Object, Script etc. This
is the basis for the typeclassing system.
"""
def __new__(cls, name, bases, attrs):
"""
We must define our Typeclasses as proxies. We also store the
path directly on the class, this is required by managers.
"""
# storage of stats
attrs["typename"] = name
attrs["path"] = "%s.%s" % (attrs["__module__"], name)
# typeclass proxy setup
if not "Meta" in attrs:
class Meta(object):
proxy = True
app_label = attrs.get("__applabel__", "typeclasses")
attrs["Meta"] = Meta
attrs["Meta"].proxy = True
# patch for django proxy multi-inheritance
# this is a copy of django.db.models.base.__new__
# with a few lines changed as per
# https://code.djangoproject.com/ticket/11560
new_class = patched_new(cls, name, bases, attrs)
# attach signal
signals.post_save.connect(post_save, sender=new_class)
return new_class
class DbHolder(object):
"Holder for allowing property access of attributes"
def __init__(self, obj, name, manager_name='attributes'):
_SA(self, name, _GA(obj, manager_name))
_SA(self, 'name', name)
def __getattribute__(self, attrname):
if attrname == 'all':
# we allow to overload our default .all
attr = _GA(self, _GA(self, 'name')).get("all")
return attr if attr else _GA(self, "all")
return _GA(self, _GA(self, 'name')).get(attrname)
def __setattr__(self, attrname, value):
_GA(self, _GA(self, 'name')).add(attrname, value)
def __delattr__(self, attrname):
_GA(self, _GA(self, 'name')).remove(attrname)
def get_all(self):
return _GA(self, _GA(self, 'name')).all()
all = property(get_all)
#
# Main TypedObject abstraction
#
class TypedObject(SharedMemoryModel):
"""
Abstract Django model.
This is the basis for a typed object. It also contains all the
mechanics for managing connected attributes.
The TypedObject has the following properties:
key - main name
name - alias for key
typeclass_path - the path to the decorating typeclass
typeclass - auto-linked typeclass
date_created - time stamp of object creation
permissions - perm strings
dbref - #id of object
db - persistent attribute storage
ndb - non-persistent attribute storage
"""
#
# TypedObject Database Model setup
#
#
# These databse fields are all accessed and set using their corresponding
# properties, named same as the field, but without the db_* prefix
# (no separate save() call is needed)
# Main identifier of the object, for searching. Is accessed with self.key
# or self.name
db_key = models.CharField('key', max_length=255, db_index=True)
# This is the python path to the type class this object is tied to. The
# typeclass is what defines what kind of Object this is)
db_typeclass_path = models.CharField('typeclass', max_length=255, null=True,
help_text="this defines what 'type' of entity this is. This variable holds a Python path to a module with a valid Evennia Typeclass.")
# Creation date. This is not changed once the object is created.
db_date_created = models.DateTimeField('creation date', editable=False, auto_now_add=True)
# Lock storage
db_lock_storage = models.TextField('locks', blank=True,
help_text="locks limit access to an entity. A lock is defined as a 'lock string' on the form 'type:lockfunctions', defining what functionality is locked and how to determine access. Not defining a lock means no access is granted.")
# many2many relationships
db_attributes = models.ManyToManyField(Attribute, null=True,
help_text='attributes on this object. An attribute can hold any pickle-able python object (see docs for special cases).')
db_tags = models.ManyToManyField(Tag, null=True,
help_text='tags on this object. Tags are simple string markers to identify, group and alias objects.')
# Database manager
objects = managers.TypedObjectManager()
# quick on-object typeclass cache for speed
_cached_typeclass = None
# typeclass mechanism
def __init__(self, *args, **kwargs):
"""
The `__init__` method of typeclasses is the core operational
code of the typeclass system, where it dynamically re-applies
a class based on the db_typeclass_path database field rather
than use the one in the model.
Args:
Passed through to parent.
Kwargs:
Passed through to parent.
Notes:
The loading mechanism will attempt the following steps:
1. Attempt to load typeclass given on command line
2. Attempt to load typeclass stored in db_typeclass_path
3. Attempt to load `__settingsclasspath__`, which is by the
default classes defined to be the respective user-set
base typeclass settings, like `BASE_OBJECT_TYPECLASS`.
4. Attempt to load `__defaultclasspath__`, which is the
base classes in the library, like DefaultObject etc.
5. If everything else fails, use the database model.
Normal operation is to load successfully at either step 1
or 2 depending on how the class was called. Tracebacks
will be logged for every step the loader must take beyond
2.
"""
typeclass_path = kwargs.pop("typeclass", None)
super(TypedObject, self).__init__(*args, **kwargs)
if typeclass_path:
try:
self.__class__ = class_from_module(typeclass_path, defaultpaths=settings.TYPECLASS_PATHS)
except Exception:
log_trace()
try:
self.__class__ = class_from_module(self.__settingsclasspath__)
except Exception:
log_trace()
try:
self.__class__ = class_from_module(self.__defaultclasspath__)
except Exception:
log_trace()
self.__class__ = self._meta.proxy_for_model or self.__class__
finally:
self.db_typclass_path = typeclass_path
elif self.db_typeclass_path:
try:
self.__class__ = class_from_module(self.db_typeclass_path)
except Exception:
log_trace()
try:
self.__class__ = class_from_module(self.__defaultclasspath__)
except Exception:
log_trace()
self.__dbclass__ = self._meta.proxy_for_model or self.__class__
else:
self.db_typeclass_path = "%s.%s" % (self.__module__, self.__class__.__name__)
# important to put this at the end since _meta is based on the set __class__
self.__dbclass__ = self._meta.proxy_for_model or self.__class__
# initialize all handlers in a lazy fashion
@lazy_property
def attributes(self):
return AttributeHandler(self)
@lazy_property
def locks(self):
return LockHandler(self)
@lazy_property
def tags(self):
return TagHandler(self)
@lazy_property
def aliases(self):
return AliasHandler(self)
@lazy_property
def permissions(self):
return PermissionHandler(self)
@lazy_property
def nattributes(self):
return NAttributeHandler(self)
class Meta(object):
"""
Django setup info.
"""
abstract = True
verbose_name = "Evennia Database Object"
ordering = ['-db_date_created', 'id', 'db_typeclass_path', 'db_key']
# wrapper
# Wrapper properties to easily set database fields. These are
# @property decorators that allows to access these fields using
# normal python operations (without having to remember to save()
# etc). So e.g. a property 'attr' has a get/set/del decorator
# defined that allows the user to do self.attr = value,
# value = self.attr and del self.attr respectively (where self
# is the object in question).
# name property (alias to self.key)
def __name_get(self):
return self.key
def __name_set(self, value):
self.key = value
def __name_del(self):
raise Exception("Cannot delete name")
name = property(__name_get, __name_set, __name_del)
# key property (overrides's the idmapper's db_key for the at_rename hook)
@property
def key(self):
return self.db_key
@key.setter
def key(self, value):
oldname = str(self.db_key)
self.db_key = value
self.save(update_fields=["db_key"])
self.at_rename(oldname, value)
#
#
# TypedObject main class methods and properties
#
#
def __eq__(self, other):
return other and hasattr(other, 'dbid') and self.dbid == other.dbid
def __str__(self):
return smart_str("%s" % self.db_key)
def __unicode__(self):
return u"%s" % self.db_key
#@property
def __dbid_get(self):
"""
Caches and returns the unique id of the object.
Use this instead of self.id, which is not cached.
"""
return self.id
def __dbid_set(self, value):
raise Exception("dbid cannot be set!")
def __dbid_del(self):
raise Exception("dbid cannot be deleted!")
dbid = property(__dbid_get, __dbid_set, __dbid_del)
#@property
def __dbref_get(self):
"""
Returns the object's dbref on the form #NN.
"""
return "#%s" % self.id
def __dbref_set(self):
raise Exception("dbref cannot be set!")
def __dbref_del(self):
raise Exception("dbref cannot be deleted!")
dbref = property(__dbref_get, __dbref_set, __dbref_del)
#
# Object manipulation methods
#
def is_typeclass(self, typeclass, exact=True):
"""
Returns true if this object has this type OR has a typeclass
which is an subclass of the given typeclass. This operates on
the actually loaded typeclass (this is important since a
failing typeclass may instead have its default currently
loaded) typeclass - can be a class object or the python path
to such an object to match against.
Args:
typeclass (str or class): A class or the full python path
to the class to check.
exact (bool, optional): Returns true only if the object's
type is exactly this typeclass, ignoring parents.
Returns:
is_typeclass (bool): If this typeclass matches the given
typeclass.
"""
if isinstance(typeclass, basestring):
typeclass = [typeclass] + ["%s.%s" % (prefix, typeclass) for prefix in settings.TYPECLASS_PATHS]
else:
typeclass = [typeclass.path]
selfpath = self.path
if exact:
# check only exact match
return selfpath in typeclass
else:
# check parent chain
return any(hasattr(cls, "path") and cls.path in typeclass for cls in self.__class__.mro())
def swap_typeclass(self, new_typeclass, clean_attributes=False,
run_start_hooks=True, no_default=True):
"""
This performs an in-situ swap of the typeclass. This means
that in-game, this object will suddenly be something else.
Player will not be affected. To 'move' a player to a different
object entirely (while retaining this object's type), use
self.player.swap_object().
Note that this might be an error prone operation if the
old/new typeclass was heavily customized - your code
might expect one and not the other, so be careful to
bug test your code if using this feature! Often its easiest
to create a new object and just swap the player over to
that one instead.
Args:
new_typeclass (str or classobj): Type to switch to.
clean_attributes (bool or list, optional): Will delete all
attributes stored on this object (but not any of the
database fields such as name or location). You can't get
attributes back, but this is often the safest bet to make
sure nothing in the new typeclass clashes with the old
one. If you supply a list, only those named attributes
will be cleared.
run_start_hooks (bool, optional): Trigger the start hooks
of the object, as if it was created for the first time.
no_default (bool, optiona): If set, the swapper will not
allow for swapping to a default typeclass in case the
given one fails for some reason. Instead the old one will
be preserved.
Returns:
result (bool): True/False depending on if the swap worked
or not.
"""
if not callable(new_typeclass):
# this is an actual class object - build the path
new_typeclass = class_from_module(new_typeclass, defaultpaths=settings.TYPECLASS_PATHS)
# if we get to this point, the class is ok.
if inherits_from(self, "evennia.scripts.models.ScriptDB"):
if self.interval > 0:
raise RuntimeError("Cannot use swap_typeclass on time-dependent " \
"Script '%s'.\nStop and start a new Script of the " \
"right type instead." % self.key)
self.typeclass_path = new_typeclass.path
self.__class__ = new_typeclass
if clean_attributes:
# Clean out old attributes
if is_iter(clean_attributes):
for attr in clean_attributes:
self.attributes.remove(attr)
for nattr in clean_attributes:
if hasattr(self.ndb, nattr):
self.nattributes.remove(nattr)
else:
self.attributes.clear()
self.nattributes.clear()
if run_start_hooks:
# fake this call to mimic the first save
self.at_first_save()
#
# Lock / permission methods
#
def access(self, accessing_obj, access_type='read', default=False, no_superuser_bypass=False, **kwargs):
"""
Determines if another object has permission to access this one.
Args:
accessing_obj (str): Object trying to access this one.
access_type (str, optional): Type of access sought.
default (bool, optional): What to return if no lock of
access_type was found
no_superuser_bypass (bool, optional): Turn off the
superuser lock bypass (be careful with this one).
Kwargs:
kwargs (any): Ignored, but is there to make the api
consistent with the object-typeclass method access, which
use it to feed to its hook methods.
"""
return self.locks.check(accessing_obj, access_type=access_type, default=default,
no_superuser_bypass=no_superuser_bypass)
def check_permstring(self, permstring):
"""
This explicitly checks if we hold particular permission
without involving any locks.
Args:
permstring (str): The permission string to check against.
Returns:
result (bool): If the permstring is passed or not.
"""
if hasattr(self, "player"):
if self.player and self.player.is_superuser:
return True
else:
if self.is_superuser:
return True
if not permstring:
return False
perm = permstring.lower()
perms = [p.lower() for p in self.permissions.all()]
if perm in perms:
# simplest case - we have a direct match
return True
if perm in _PERMISSION_HIERARCHY:
# check if we have a higher hierarchy position
ppos = _PERMISSION_HIERARCHY.index(perm)
return any(True for hpos, hperm in enumerate(_PERMISSION_HIERARCHY)
if hperm in perms and hpos > ppos)
return False
#
# Deletion methods
#
def _deleted(self, *args, **kwargs):
"""
Scrambling method for already deleted objects
"""
raise ObjectDoesNotExist("This object was already deleted!")
def delete(self):
"""
Cleaning up handlers on the typeclass level
"""
global TICKER_HANDLER
self.permissions.clear()
self.attributes.clear()
self.aliases.clear()
if hasattr(self, "nicks"):
self.nicks.clear()
# scrambling properties
self.delete = self._deleted
super(TypedObject, self).delete()
#
# Memory management
#
#def flush_from_cache(self):
# """
# Flush this object instance from cache, forcing an object reload.
# Note that this will kill all temporary attributes on this object
# since it will be recreated as a new Typeclass instance.
# """
# self.__class__.flush_cached_instance(self)
#
# Attribute storage
#
#@property db
def __db_get(self):
"""
Attribute handler wrapper. Allows for the syntax
obj.db.attrname = value
and
value = obj.db.attrname
and
del obj.db.attrname
and
all_attr = obj.db.all() (unless there is an attribute
named 'all', in which case that will be returned instead).
"""
try:
return self._db_holder
except AttributeError:
self._db_holder = DbHolder(self, 'attributes')
return self._db_holder
#@db.setter
def __db_set(self, value):
"Stop accidentally replacing the db object"
string = "Cannot assign directly to db object! "
string += "Use db.attr=value instead."
raise Exception(string)
#@db.deleter
def __db_del(self):
"Stop accidental deletion."
raise Exception("Cannot delete the db object!")
db = property(__db_get, __db_set, __db_del)
#
# Non-persistent (ndb) storage
#
#@property ndb
def __ndb_get(self):
"""
A non-attr_obj store (ndb: NonDataBase). Everything stored
to this is guaranteed to be cleared when a server is shutdown.
Syntax is same as for the _get_db_holder() method and
property, e.g. obj.ndb.attr = value etc.
"""
try:
return self._ndb_holder
except AttributeError:
self._ndb_holder = DbHolder(self, "nattrhandler", manager_name='nattributes')
return self._ndb_holder
#@db.setter
def __ndb_set(self, value):
"Stop accidentally replacing the ndb object"
string = "Cannot assign directly to ndb object! "
string += "Use ndb.attr=value instead."
raise Exception(string)
#@db.deleter
def __ndb_del(self):
"Stop accidental deletion."
raise Exception("Cannot delete the ndb object!")
ndb = property(__ndb_get, __ndb_set, __ndb_del)
def get_display_name(self, looker, **kwargs):
"""
Displays the name of the object in a viewer-aware manner.
Args:
looker (TypedObject): The object or player that is looking
at/getting inforamtion for this object.
Returns:
name (str): A string containing the name of the object,
including the DBREF if this user is privileged to control
said object.
Notes:
This function could be extended to change how object names
appear to users in character, but be wary. This function
does not change an object's keys or aliases when
searching, and is expected to produce something useful for
builders.
"""
if self.access(looker, access_type='controls'):
return "{}(#{})".format(self.name, self.id)
return self.name
def get_extra_info(self, looker, **kwargs):
"""
Used when an object is in a list of ambiguous objects as an
additional information tag.
For instance, if you had potions which could have varying
levels of liquid left in them, you might want to display how
many drinks are left in each when selecting which to drop, but
not in your normal inventory listing.
Args:
looker (TypedObject): The object or player that is looking
at/getting information for this object.
Returns:
info (str): A string with disambiguating information,
conventionally with a leading space.
"""
if self.location == looker:
return " (carried)"
return ""
def at_rename(self, oldname, newname):
"""
This Hook is called by @name on a successful rename.
Args:
oldname (str): The instance's original name.
newname (str): The new name for the instance.
"""
pass
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Quantum networks.
"""
import logging
import warnings
from django import shortcuts
from django import template
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from horizon import api
from horizon.dashboards.nova.networks.forms import (CreateNetwork,
DeleteNetwork, RenameNetwork, AttachPort, CreatePort, DeletePort,
DetachPort, TogglePort)
LOG = logging.getLogger(__name__)
def index(request):
tenant_id = request.user.tenant_id
delete_form, delete_handled = DeleteNetwork.maybe_handle(request)
networks = []
instances = []
try:
networks_list = api.quantum_list_networks(request)
details = []
for network in networks_list['networks']:
net_stats = _calc_network_stats(request, network['id'])
# Get network details like name and id
details = api.quantum_network_details(request, network['id'])
networks.append({
'name': details['network']['name'],
'id': network['id'],
'total': net_stats['total'],
'available': net_stats['available'],
'used': net_stats['used'],
'tenant': tenant_id})
except Exception, e:
LOG.exception("Unable to get network list.")
messages.error(request,
_('Unable to get network list: %s') % e.message)
return shortcuts.render(request,
'nova/networks/index.html', {
'networks': networks,
'delete_form': delete_form})
def create(request):
network_form, handled = CreateNetwork.maybe_handle(request)
if handled:
return shortcuts.redirect('horizon:nova:networks:index')
return shortcuts.render(request,
'nova/networks/create.html',
{'network_form': network_form})
def detail(request, network_id):
tenant_id = request.user.tenant_id
delete_port_form, delete_handled = DeletePort.maybe_handle(request,
initial={"network": network_id})
detach_port_form, detach_handled = DetachPort.maybe_handle(request,
initial={"network": network_id})
toggle_port_form, port_toggle_handled = TogglePort.maybe_handle(request,
initial={"network": network_id})
network = {}
network['id'] = network_id
try:
network_details = api.quantum_network_details(request, network_id)
network['name'] = network_details['network']['name']
network['ports'] = _get_port_states(request, network_id)
except Exception, e:
LOG.exception("Unable to get network details.")
messages.error(request,
_('Unable to get network details: %s') % e.message)
return shortcuts.redirect("horizon:nova:networks:index")
return shortcuts.render(request,
'nova/networks/detail.html',
{'network': network,
'tenant': tenant_id,
'delete_port_form': delete_port_form,
'detach_port_form': detach_port_form,
'toggle_port_form': toggle_port_form})
def rename(request, network_id):
network_details = api.quantum_network_details(request, network_id)
network = network_details['network']
rename_form, handled = RenameNetwork.maybe_handle(request, initial={
'network': network['id'],
'new_name': network['name']})
if handled:
return shortcuts.redirect('horizon:nova:networks:index')
return shortcuts.render(request,
'nova/networks/rename.html', {
'network': network,
'rename_form': rename_form})
def _get_port_states(request, network_id):
"""
Helper method to find port states for a network
"""
network_ports = []
# Get all vifs for comparison with port attachments
vifs = api.get_vif_ids(request)
# Get all ports on this network
ports = api.quantum_list_ports(request, network_id)
for port in ports['ports']:
port_details = api.quantum_port_details(request,
network_id, port['id'])
# Get port attachments
port_attachment = api.quantum_port_attachment(request,
network_id, port['id'])
# Find instance the attachment belongs to
connected_instance = None
if port_attachment['attachment']:
for vif in vifs:
if str(vif['id']) == str(port_attachment['attachment']['id']):
connected_instance = vif['id']
break
network_ports.append({
'id': port_details['port']['id'],
'state': port_details['port']['state'],
'attachment': port_attachment['attachment'],
'instance': connected_instance})
return network_ports
def _calc_network_stats(request, network_id):
"""
Helper method to calculate statistics for a network
"""
# Get all ports statistics for the network
total = 0
available = 0
used = 0
ports = api.quantum_list_ports(request, network_id)
for port in ports['ports']:
total += 1
# Get port attachment
port_attachment = api.quantum_port_attachment(request,
network_id, port['id'])
if port_attachment['attachment']:
used += 1
else:
available += 1
return {'total': total, 'used': used, 'available': available}
def port_create(request, network_id):
create_form, handled = CreatePort.maybe_handle(request, initial={
"network": network_id})
if handled:
return shortcuts.redirect('horizon:nova:networks:detail',
network_id=network_id)
return shortcuts.render(request,
'nova/ports/create.html', {
'network_id': network_id,
'create_form': create_form})
def port_attach(request, network_id, port_id):
attach_form, handled = AttachPort.maybe_handle(request, initial={
"network": network_id,
"port": port_id})
if handled:
return shortcuts.redirect('horizon:nova:networks:detail',
network_id=network_id)
return shortcuts.render(request,
'nova/ports/attach.html', {
'network': network_id,
'port': port_id,
'attach_form': attach_form})
|
|
#!/usr/bin/env python
#
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""A build script which (thus far) works on Ubuntu 14."""
from __future__ import print_function
import argparse
import glob
import logging
import os
import platform
import re
import subprocess
import sys
def Die(msg):
"""Prints error and exits with status 1.
Args:
msg: The error message to emit
"""
print(msg, file=sys.stderr)
sys.exit(1)
def EnsureNodeJsIsInstalled():
"""Ensure Node.js is installed and that 'node' is the command to run."""
logging.info('entering ...')
try:
output = subprocess.check_output(['node', '--eval', 'console.log("42")'])
if b'42' == output.strip():
return
except (subprocess.CalledProcessError, OSError):
pass
Die('Node.js not found. Try "apt-get install nodejs" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
def CheckPrereqs():
"""Checks that various prerequisites for this script are satisfied."""
logging.info('entering ...')
if platform.system() != 'Linux' and platform.system() != 'Darwin':
Die('Sorry, this script assumes Linux or Mac OS X thus far. '
'Please feel free to edit the source and fix it to your needs.')
# Ensure source files are available.
for f in [
'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',
'package.json', 'engine/validator.js', 'engine/validator_test.js',
'engine/validator-in-browser.js', 'engine/tokenize-css.js',
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js'
]:
if not os.path.exists(f):
Die('%s not found. Must run in amp_validator source directory.' % f)
# Ensure protoc is available.
try:
libprotoc_version = subprocess.check_output(['protoc', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Protobuf compiler not found. Try "apt-get install protobuf-compiler" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation.')
# Ensure 'libprotoc 2.5.0' or newer.
m = re.search(b'^(\\w+) (\\d+)\\.(\\d+)\\.(\\d+)', libprotoc_version)
if (m.group(1) != b'libprotoc' or
(int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):
Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)
# Ensure that the Python protobuf package is installed.
for m in ['descriptor', 'text_format', 'json_format']:
module = 'google.protobuf.%s' % m
try:
__import__(module)
except ImportError:
# Python3 needs pip3. Python 2 needs pip.
if sys.version_info < (3, 0):
Die('%s not found. Try "pip install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
else:
Die('%s not found. Try "pip3 install protobuf" or follow the install '
'instructions at https://github.com/ampproject/amphtml/blob/master/'
'validator/README.md#installation' % module)
# Ensure that yarn is installed.
try:
subprocess.check_output(['yarn', '--version'])
except (subprocess.CalledProcessError, OSError):
Die('Yarn package manager not found. Run '
'"curl -o- -L https://yarnpkg.com/install.sh | bash" '
'or see https://yarnpkg.com/docs/install.')
# Ensure JVM installed. TODO: Check for version?
try:
subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
Die('Java missing. Try "apt-get install openjdk-7-jre" or follow the install instructions at https://github.com/ampproject/amphtml/blob/master/validator/README.md#installation')
logging.info('... done')
def SetupOutDir(out_dir):
"""Sets up a clean output directory.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
if os.path.exists(out_dir):
subprocess.check_call(['rm', '-rf', out_dir])
os.mkdir(out_dir)
logging.info('... done')
def InstallNodeDependencies():
"""Installs the dependencies using yarn."""
logging.info('entering ...')
# Install the project dependencies specified in package.json into
# node_modules.
logging.info('installing AMP Validator engine dependencies ...')
subprocess.check_call(
['yarn', 'install'],
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('installing AMP Validator nodejs dependencies ...')
subprocess.check_call(
['yarn', 'install'],
cwd='nodejs',
stdout=(open(os.devnull, 'wb') if os.environ.get('TRAVIS') else sys.stdout))
logging.info('... done')
def GenValidatorPb2Py(out_dir):
"""Calls the proto compiler to generate validator_pb2.py.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
subprocess.check_call(
['protoc', 'validator.proto', '--python_out=%s' % out_dir])
open('%s/__init__.py' % out_dir, 'w').close()
logging.info('... done')
def GenValidatorProtoascii(out_dir):
"""Assembles the validator protoascii file from the main and extensions.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
protoascii_segments = [open('validator-main.protoascii').read()]
extensions = glob.glob('extensions/*/validator-*.protoascii')
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not extensions:
extensions = glob.glob('../extensions/*/validator-*.protoascii')
extensions.sort()
for extension in extensions:
protoascii_segments.append(open(extension).read())
f = open('%s/validator.protoascii' % out_dir, 'w')
f.write(''.join(protoascii_segments))
f.close()
logging.info('... done')
def GenValidatorProtoGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-proto-generated.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile=None,
validator_pb2=validator_pb2,
generate_proto_only=True,
generate_spec_only=False,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-proto-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def GenValidatorGeneratedJs(out_dir):
"""Calls validator_gen_js to generate validator-generated.js and validator-generated.json.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
assert re.match(r'^[a-zA-Z_\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir
# These imports happen late, within this method because they don't necessarily
# exist when the module starts running, and the ones that probably do
# are checked by CheckPrereqs.
# pylint: disable=g-import-not-at-top
from google.protobuf import text_format
from google.protobuf import json_format
from google.protobuf import descriptor
from dist import validator_pb2
import validator_gen_js
# pylint: enable=g-import-not-at-top
out = []
validator_gen_js.GenerateValidatorGeneratedJs(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
generate_proto_only=False,
generate_spec_only=True,
text_format=text_format,
html_format=None,
descriptor=descriptor,
out=out)
out.append('')
f = open('%s/validator-generated.js' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
out = []
validator_gen_js.GenerateValidatorGeneratedJson(
specfile='%s/validator.protoascii' % out_dir,
validator_pb2=validator_pb2,
text_format=text_format,
json_format=json_format,
out=out)
out.append('')
f = open('%s/validator-generated.json' % out_dir, 'w')
f.write('\n'.join(out))
f.close()
logging.info('... done')
def CompileWithClosure(js_files, definitions, entry_points, output_file):
"""Compiles the arguments with the Closure compiler for transpilation to ES5.
Args:
js_files: list of files to compile
definitions: list of definitions flags to closure compiler
entry_points: entry points (these won't be minimized)
output_file: name of the Javascript output file
"""
cmd = [
'java', '-jar', 'node_modules/google-closure-compiler-java/compiler.jar',
'--language_out=ES5_STRICT', '--dependency_mode=STRICT',
'--js_output_file=%s' % output_file
]
cmd += ['--entry_point=%s' % e for e in entry_points]
cmd += ['--output_manifest=%s' % ('%s.manifest' % output_file)]
cmd += [
'node_modules/google-closure-library/closure/**.js',
'!node_modules/google-closure-library/closure/**_test.js',
'node_modules/google-closure-library/third_party/closure/**.js',
'!node_modules/google-closure-library/third_party/closure/**_test.js'
]
cmd += js_files
cmd += definitions
subprocess.check_call(cmd)
def CompileValidatorMinified(out_dir):
"""Generates a minified validator script, which can be imported to validate.
Args:
out_dir: output directory
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js'
],
definitions=[],
entry_points=[
'amp.validator.validateString',
'amp.validator.renderValidationResult',
'amp.validator.renderErrorMessage'
],
output_file='%s/validator_minified.js' % out_dir)
logging.info('... done')
def RunSmokeTest(out_dir):
"""Runs a smoke test (minimum valid AMP and empty html file).
Args:
out_dir: output directory
"""
logging.info('entering ...')
# Run index.js on the minimum valid amp and observe that it passes.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/minimum_valid_amp.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if (b'testdata/feature_tests/minimum_valid_amp.html: PASS\n', b'',
p.returncode) != (stdout, stderr, 0):
Die('Smoke test failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
# Run index.js on an empty file and observe that it fails.
p = subprocess.Popen(
[
'node', 'nodejs/index.js', '--validator_js',
'%s/validator_minified.js' % out_dir,
'testdata/feature_tests/empty.html', '--format=text'
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode != 1:
Die('smoke test failed. Expected p.returncode==1, saw: %s' % p.returncode)
if not stderr.startswith(b'testdata/feature_tests/empty.html:1:0 '
b'The mandatory tag \'html'):
Die('smoke test failed; stderr was: "%s"' % stderr)
logging.info('... done')
def RunIndexTest():
"""Runs the index_test.js, which tests the NodeJS API.
"""
logging.info('entering ...')
p = subprocess.Popen(
['node', './index_test.js'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd='nodejs')
(stdout, stderr) = p.communicate()
if p.returncode != 0:
Die('index_test.js failed. returncode=%d stdout="%s" stderr="%s"' %
(p.returncode, stdout, stderr))
logging.info('... done')
def CompileValidatorTestMinified(out_dir):
"""Runs closure compiler for validator_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/htmlparser.js',
'engine/parse-css.js', 'engine/parse-srcset.js',
'engine/parse-url.js', 'engine/tokenize-css.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir,
'engine/validator-in-browser.js', 'engine/validator.js',
'engine/amp4ads-parse-css.js', 'engine/keyframes-parse-css.js',
'engine/htmlparser-interface.js', 'engine/validator_test.js'
],
definitions=[],
entry_points=['amp.validator.ValidatorTest'],
output_file='%s/validator_test_minified.js' % out_dir)
logging.info('... success')
def CompileHtmlparserTestMinified(out_dir):
"""Runs closure compiler for htmlparser_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/htmlparser.js', 'engine/htmlparser-interface.js',
'engine/htmlparser_test.js'
],
definitions=[],
entry_points=['amp.htmlparser.HtmlParserTest'],
output_file='%s/htmlparser_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseCssTestMinified(out_dir):
"""Runs closure compiler for parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-css.js', 'engine/parse-url.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-css_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.ParseCssTest'],
output_file='%s/parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseUrlTestMinified(out_dir):
"""Runs closure compiler for parse-url_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-url.js', 'engine/parse-css.js',
'engine/tokenize-css.js', 'engine/css-selectors.js',
'engine/json-testutil.js', 'engine/parse-url_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_url.ParseURLTest'],
output_file='%s/parse-url_test_minified.js' % out_dir)
logging.info('... success')
def CompileAmp4AdsParseCssTestMinified(out_dir):
"""Runs closure compiler for amp4ads-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/amp4ads-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/amp4ads-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.Amp4AdsParseCssTest'],
output_file='%s/amp4ads-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileKeyframesParseCssTestMinified(out_dir):
"""Runs closure compiler for keyframes-parse-css_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/keyframes-parse-css_test.js',
'engine/parse-css.js', 'engine/parse-url.js',
'engine/keyframes-parse-css.js', 'engine/tokenize-css.js',
'engine/css-selectors.js', 'engine/json-testutil.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_css.KeyframesParseCssTest'],
output_file='%s/keyframes-parse-css_test_minified.js' % out_dir)
logging.info('... success')
def CompileParseSrcsetTestMinified(out_dir):
"""Runs closure compiler for parse-srcset_test.js.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
CompileWithClosure(
js_files=[
'engine/definitions.js', 'engine/parse-srcset.js',
'engine/json-testutil.js', 'engine/parse-srcset_test.js',
'%s/validator-generated.js' % out_dir,
'%s/validator-proto-generated.js' % out_dir
],
definitions=[],
entry_points=['parse_srcset.ParseSrcsetTest'],
output_file='%s/parse-srcset_test_minified.js' % out_dir)
logging.info('... success')
def GenerateTestRunner(out_dir):
"""Generates a test runner: a nodejs script that runs our minified tests.
Args:
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
f = open('%s/test_runner' % out_dir, 'w')
extensions_dir = 'extensions'
# In the Github project, the extensions are located in a sibling directory
# to the validator rather than a child directory.
if not os.path.isdir(extensions_dir):
extensions_dir = '../extensions'
f.write("""#!/usr/bin/env node
global.assert = require('assert');
global.fs = require('fs');
global.path = require('path');
var JasmineRunner = require('jasmine');
var jasmine = new JasmineRunner();
process.env.TESTDATA_ROOTS = 'testdata:%s'
require('./validator_test_minified');
require('./htmlparser_test_minified');
require('./parse-css_test_minified');
require('./parse-url_test_minified');
require('./amp4ads-parse-css_test_minified');
require('./keyframes-parse-css_test_minified');
require('./parse-srcset_test_minified');
jasmine.onComplete(function (passed) {
process.exit(passed ? 0 : 1);
});
jasmine.execute();
""" % extensions_dir)
os.chmod('%s/test_runner' % out_dir, 0o750)
logging.info('... success')
def RunTests(update_tests, out_dir):
"""Runs all the minified tests.
Args:
update_tests: a boolean indicating whether or not to update the test
output files.
out_dir: directory name of the output directory. Must not have slashes,
dots, etc.
"""
logging.info('entering ...')
env = os.environ.copy()
if update_tests:
env['UPDATE_VALIDATOR_TEST'] = '1'
subprocess.check_call(['node', '%s/test_runner' % out_dir], env=env)
logging.info('... success')
def Main(parsed_args):
"""The main method, which executes all build steps and runs the tests."""
logging.basicConfig(
format='[[%(filename)s %(funcName)s]] - %(message)s',
level=(logging.ERROR if os.environ.get('TRAVIS') else logging.INFO))
EnsureNodeJsIsInstalled()
CheckPrereqs()
InstallNodeDependencies()
SetupOutDir(out_dir='dist')
GenValidatorProtoascii(out_dir='dist')
GenValidatorPb2Py(out_dir='dist')
GenValidatorProtoGeneratedJs(out_dir='dist')
GenValidatorGeneratedJs(out_dir='dist')
CompileValidatorMinified(out_dir='dist')
RunSmokeTest(out_dir='dist')
RunIndexTest()
CompileValidatorTestMinified(out_dir='dist')
CompileHtmlparserTestMinified(out_dir='dist')
CompileParseCssTestMinified(out_dir='dist')
CompileParseUrlTestMinified(out_dir='dist')
CompileAmp4AdsParseCssTestMinified(out_dir='dist')
CompileKeyframesParseCssTestMinified(out_dir='dist')
CompileParseSrcsetTestMinified(out_dir='dist')
GenerateTestRunner(out_dir='dist')
RunTests(update_tests=parsed_args.update_tests, out_dir='dist')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Build script for the AMP Validator.')
parser.add_argument(
'--update_tests',
action='store_true',
help=('If True, validator_test will overwrite the .out test files with '
'the encountered test output.'))
Main(parser.parse_args())
|
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import is_state
from charms.reactive import set_state
from charms.reactive import when
from charms.reactive import when_not
from charms.reactive.helpers import data_changed
from charmhelpers.core import hookenv, unitdata
from shlex import split
from subprocess import check_call
from subprocess import check_output
db = unitdata.kv()
USER = 'system:e2e'
@hook('upgrade-charm')
def reset_delivery_states():
''' Remove the state set when resources are unpacked. '''
install_snaps()
@when('kubernetes-e2e.installed')
def report_status():
''' Report the status of the charm. '''
messaging()
def messaging():
''' Probe our relations to determine the propper messaging to the
end user '''
missing_services = []
if not is_state('kubernetes-master.available'):
missing_services.append('kubernetes-master:http')
if not is_state('certificates.available'):
missing_services.append('certificates')
if not is_state('kubeconfig.ready'):
missing_services.append('kubernetes-master:kube-control')
if missing_services:
if len(missing_services) > 1:
subject = 'relations'
else:
subject = 'relation'
services = ','.join(missing_services)
message = 'Missing {0}: {1}'.format(subject, services)
hookenv.status_set('blocked', message)
return
hookenv.status_set('active', 'Ready to test.')
@when('config.changed.channel')
def channel_changed():
install_snaps()
def install_snaps():
''' Deliver the e2e and kubectl components from the binary resource stream
packages declared in the charm '''
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubernetes-test snap')
snap.install('kubernetes-test', channel=channel, classic=True)
set_state('kubernetes-e2e.installed')
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped')
@when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master):
''' Prepare the data to feed to create the kubeconfig file. '''
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
servers = get_kube_api_servers(master)
# pedantry
kubeconfig_path = '/home/ubuntu/.kube/config'
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/root/.kube/config', servers[0], ca,
token=creds['client_token'], user='root')
create_kubeconfig(kubeconfig_path, servers[0], ca,
token=creds['client_token'], user='ubuntu')
# Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
messaging()
set_state('kubeconfig.ready')
@when('kube-control.connected')
def request_credentials(kube_control):
""" Request authorization creds."""
# Ask for a user, although we will be using the 'client_token'
kube_control.set_auth_request(USER)
@when('kube-control.auth.available')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
creds = kube_control.get_auth_credentials(USER)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == USER:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('e2e.auth.bootstrapped')
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubectl', 'version', '--client']
from subprocess import CalledProcessError
try:
version = check_output(cmd).decode('utf-8')
except CalledProcessError:
message = "Missing kubeconfig causes errors. Skipping version set."
hookenv.log(message)
return
git_version = version.split('GitVersion:"v')[-1]
version_from = git_version.split('",')[0]
hookenv.application_version_set(version_from.rstrip())
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_kube_api_servers(master):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in master.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def determine_arch():
''' dpkg wrapper to surface the architecture we are tied to'''
cmd = ['dpkg', '--print-architecture']
output = check_output(cmd).decode('utf-8')
return output.rstrip()
|
|
from utils import *
from sensationdriver.pattern import BezierPath
from sensationdriver.pattern import Track
class Point(object):
def __init__(self, time, value):
self.time = time
self.value = value
class Keyframe(object):
def __init__(self, control_point, out_tangent_end=None, in_tangent_start=None):
self.control_point = control_point
self.out_tangent_end = out_tangent_end
self.in_tangent_start = in_tangent_start
class TestBezierPath(unittest.TestCase):
def setUp(self):
self.logger = TestLogger()
def test_bezier_calculation(self):
start = Point(0, 0)
end = Point(1.8, 2)
start_out_tangent_end = Point(0.6, 0)
end_in_tangent_start = Point(1.2, -1.279795)
expected_values = [
(0.0, 0.0),
(0.055555, -0.01084869),
(0.111111, -0.03938968),
(0.166666, -0.0796154),
(0.222222, -0.1255182),
(0.277777, -0.1710906),
(0.333333, -0.2103248),
(0.388888, -0.2372134),
(0.444444, -0.2457487),
(0.5, -0.2299231),
(0.555555, -0.1837291),
(0.611111, -0.101159),
(0.666666, 0.02379485),
(0.722222, 0.1971399),
(0.777777, 0.4248839),
(0.833333, 0.7130347),
(0.888888, 1.067599),
(0.944444, 1.494586),
(1.0, 2)]
for t, expected_value in expected_values:
value = BezierPath.calculate_bezier_value(t, start, start_out_tangent_end, end_in_tangent_start, end)
self.assertAlmostEqual(value, expected_value, delta=0.00001)
def test_timeline(self):
start = Point(0, 0)
end = Point(1.8, 2)
start_out_tangent_end = Point(0.6, 0)
end_in_tangent_start = Point(1.2, -1.279795)
keyframes = [Keyframe(start, out_tangent_end=start_out_tangent_end), Keyframe(end, in_tangent_start=end_in_tangent_start)]
path = BezierPath(keyframes=keyframes)
expected_values = [
0.0,
-0.01084869,
-0.03938968,
-0.0796154,
-0.1255182,
-0.1710906,
-0.2103248,
-0.2372134,
-0.2457487,
-0.2299231,
-0.1837291,
-0.101159,
0.02379485,
0.1971399,
0.4248839,
0.7130347,
1.067599,
1.494586]
# preparation
timeline = path.timeline()
next(timeline)
i = 0
value = timeline.send(0)
while True:
self.assertAlmostEqual(value, expected_values[i], delta=0.00001)
try:
value = timeline.send(0.1)
i += 1
except StopIteration:
break
def test_multiple_keyframes(self):
# 0 - 0.3
# 0.1325325 - 0.3
# 0.2650649 - 1.161977
# 0.3975974 - 1.333954
# 0.8650649 - 1.940549
# 1.332533 - -0.5553294
# 1.8 - 2
p0 = Point(0, 0.3)
c1 = Point(0.1325325, 0.3)
c2 = Point(0.2650649, 1.161977)
p3 = Point(0.3975974, 1.333954)
c4 = Point(0.8650649, 1.940549)
c5 = Point(1.332533, -0.5553294)
p6 = Point(1.8, 2)
keyframes = [Keyframe(p0, out_tangent_end=c1),
Keyframe(p3, in_tangent_start=c2, out_tangent_end=c4),
Keyframe(p6, in_tangent_start=c5)]
path = BezierPath(keyframes=keyframes)
expected_values = [
0.3,
0.438888,
0.7567842,
1.105537,
1.337044,
1.420382,
1.427235,
1.37534,
1.282435,
1.166256,
1.044542,
0.9350283,
0.8554535,
0.8235546,
0.8570688,
0.9737334,
1.191285,
1.527464]
# preparation
timeline = path.timeline()
next(timeline)
i = 0
value = timeline.send(0)
while True:
self.assertAlmostEqual(value, expected_values[i], delta=0.00001)
try:
value = timeline.send(0.1)
i += 1
except StopIteration:
break
def test_calculate_bounds(self):
p0 = Point(532,333)
p1 = Point(117,305)
p2 = Point(28,93)
p3 = Point(265,42)
bounds = BezierPath._calculate_bounds(p0, p1, p2, p3)
self.assertAlmostEqual(bounds['left'], 135.77684049079755)
self.assertAlmostEqual(bounds['bottom'], 42)
self.assertAlmostEqual(bounds['right'], 532)
self.assertAlmostEqual(bounds['top'], 333)
def test_min_max_value(self):
# 0 - 0.3
# 0.1325325 - -0.184255
# 0.2650649 - 1.161977
# 0.3975974 - 1.333954
# 0.8650649 - 1.940549
# 1.332533 - -0.5553294
# 1.8 - 2
p0 = Point(0, 0.3)
c1 = Point(0.1325325, -0.184255)
c2 = Point(0.2650649, 1.161977)
p3 = Point(0.3975974, 1.333954)
c4 = Point(0.8650649, 1.940549)
c5 = Point(1.332533, -0.5553294)
p6 = Point(1.8, 2)
keyframes = [Keyframe(p0, out_tangent_end=c1),
Keyframe(p3, in_tangent_start=c2, out_tangent_end=c4),
Keyframe(p6, in_tangent_start=c5)]
path = BezierPath(keyframes=keyframes)
self.assertAlmostEqual(path.max_value, 2)
self.assertAlmostEqual(path.min_value, 0.19549810687379637)
class TestTrack(unittest.TestCase):
def setUp(self):
# 0 - 0.3
# 0.1325325 - 0.3
# 0.2650649 - 1.161977
# 0.3975974 - 1.333954
# 0.8650649 - 1.940549
# 1.332533 - -0.5553294
# 1.8 - 2
p0 = Point(0, 0.3)
c1 = Point(0.1325325, 0.3)
c2 = Point(0.2650649, 1.161977)
p3 = Point(0.3975974, 1.333954)
c4 = Point(0.8650649, 1.940549)
c5 = Point(1.332533, -0.5553294)
p6 = Point(1.8, 2)
keyframes = [Keyframe(p0, out_tangent_end=c1),
Keyframe(p3, in_tangent_start=c2, out_tangent_end=c4),
Keyframe(p6, in_tangent_start=c5)]
self.track = Track(target_region='region', actor_index='actor_index', keyframes=keyframes, priority=4)
def test_is_finished(self):
self.assertFalse(self.track.is_finished)
self.track.advance(0.5)
self.assertFalse(self.track.is_finished)
self.track.advance(1.3001)
self.assertTrue(self.track.is_finished)
self.assertIsNone(self.track.advance(0.01))
def test_returns_last_value(self):
value = 0
while not self.track.is_finished:
value = self.track.advance(0.3)
self.assertAlmostEqual(value, 1)
def test_returns_last_value_immediatly(self):
value = self.track.advance(4)
self.assertAlmostEqual(value, 1, delta=0.00001)
def test_initial_value(self):
self.assertAlmostEqual(self.track.value, 0)
def test_advance(self):
value = self.track.advance(0.4)
self.assertAlmostEqual(value, 0.610026, delta=0.000001)
value = self.track.advance(0.4)
self.assertAlmostEqual(value, 0.577903, delta=0.000001)
value = self.track.advance(0.4)
self.assertAlmostEqual(value, 0.326737, delta=0.000001)
if __name__ == '__main__':
unittest.main()
|
|
import xmlrpclib
import time
import solr
import os
import json
import urllib2
import logging
class WorkflowManagerClient(object):
'''
Python client used to interact with a remote Labcas Workflow Manager.
Available methods are defined in Java class org.apache.oodt.cas.workflow.system.XmlRpcWorkflowManager.
'''
def __init__(self,
workflowManagerUrl='http://localhost:9001/',
fileManagerUrl='http://localhost:9000/',
solrUrl='http://localhost:8983/solr/oodt-fm',
verbose=False):
self.workflowManagerServerProxy = xmlrpclib.ServerProxy(workflowManagerUrl, verbose=verbose, allow_none=True)
self.fileManaferServerProxy = xmlrpclib.ServerProxy(fileManagerUrl, verbose=verbose, allow_none=True)
self.solrServerProxy = solr.SolrConnection(solrUrl)
def getWorkflowsByEvent(self, eventName):
'''Retrieve a specific workflow by the triggering event.'''
workflows = self.workflowManagerServerProxy.workflowmgr.getWorkflowsByEvent(eventName)
for workflow in workflows:
self.printWorkflow(workflow)
def getWorkflowById(self, workflowId):
'''Retrieve a specific workflow by its unique identifier.'''
workflow = self.workflowManagerServerProxy.workflowmgr.getWorkflowById(workflowId)
self.printWorkflow(workflow)
def executeWorkflow(self, tasks, metadata):
'''Submits a dynamic workflow composed of the specified tasks, using the specified metadata.'''
# FIXME: pass metadata through: s.encode('ascii',errors='ignore')
return self.workflowManagerServerProxy.workflowmgr.executeDynamicWorkflow(tasks, metadata)
def waitForCompletion(self, wInstId, debug=False):
''' Monitors a workflow instance until it completes.'''
# wait for the server to instantiate this workflow before querying it
time.sleep(4)
# now use the workflow instance id to check for status, wait until completed
running_status = ['CREATED', 'QUEUED', 'STARTED', 'PAUSED']
pge_task_status = ['STAGING INPUT', 'BUILDING CONFIG FILE', 'PGE EXEC', 'CRAWLING']
finished_status = ['FINISHED', 'ERROR', 'METMISS']
while (True):
try:
response = self.workflowManagerServerProxy.workflowmgr.getWorkflowInstanceById(wInstId)
status = response['status']
if status in running_status or status in pge_task_status:
logging.info('Workflow instance=%s running with status=%s' % (wInstId, status))
time.sleep(1)
elif status in finished_status:
logging.info('Workflow instance=%s ended with status=%s' % (wInstId, status))
break
else:
logging.info('UNRECOGNIZED WORKFLOW STATUS: %s' % status)
break
except xmlrpclib.Fault as e:
# must ignore XML-RPC exeptions that often arise when querying OODT for a specific workflow
# just try again with the same workflow identifier
logging.error(e)
if debug:
logging.debug(response)
def uploadDataset(self, metadata, update_dataset=True, in_place=False, debug=False):
if not update_dataset:
metadata['UpdateDataset'] = "false"
# NOTE: currently, if you start a named workflow, the XMLRPC interface only returns True/False, not a workflow instance identifier...
#tf = serverProxy.workflowmgr.handleEvent('labcas-upload', { 'DatasetId':'mydata' } )
# ... consequently, you must submit an equivalent dynamic workflow, which does return the workflow instance id
if in_place:
wInstId = self.workflowManagerServerProxy.workflowmgr.executeDynamicWorkflow( ['urn:edrn:LabcasUploadInitTask','urn:edrn:LabcasUpload2ExecuteTask'],
metadata )
else:
wInstId = self.workflowManagerServerProxy.workflowmgr.executeDynamicWorkflow( ['urn:edrn:LabcasUploadInitTask','urn:edrn:LabcasUploadExecuteTask'],
metadata )
# monitor workflow instance
self.waitForCompletion(wInstId, debug=debug)
def getProductTypeByName(self, datasetName):
# retrieve a specific product type by name
productTypeDict = self.fileManaferServerProxy.filemgr.getProductTypeByName(datasetName)
self.printProductType(productTypeDict)
def getProductTypeById(self, datasetId):
# retrieve a specific product type by name
productTypeDict = self.fileManaferServerProxy.filemgr.getProductTypeById(datasetId)
self.printProductType(productTypeDict)
def listProductTypes(self):
# list all supported product types
productTypes = self.fileManaferServerProxy.filemgr.getProductTypes()
for productTypeDict in productTypes:
self.printProductType(productTypeDict)
def listTopLevelProductTypes(self):
# roots of product type hierarchy (NOT be displayed directly)
BUILTIN_PRODUCTS = ['GenericFile', 'LabcasProduct','EcasProduct']
# dictionary of top level product types:
# key = DatasetId, value = dictionary of properties
topLevelProductTypes = {}
# list all supported product types
productTypes = self.fileManaferServerProxy.filemgr.getProductTypes()
for productTypeDict in productTypes:
# assemble information to be displayed by UI
name = productTypeDict['name']
# do NOT include these types in display list
if name not in BUILTIN_PRODUCTS:
typeMetadata = productTypeDict['typeMetadata']
datasetId = typeMetadata['DatasetId'][0]
try:
datasetName = typeMetadata['DatasetName'][0]
except KeyError:
datasetName = None
description = productTypeDict['description']
if typeMetadata.get('OrganSite', None):
organSite = typeMetadata['OrganSite'][0]
else:
organSite = None
if typeMetadata.get('LeadPI', None):
leadPI = typeMetadata['LeadPI'][0]
else:
leadPI = None
topLevelProductTypes[datasetId] = { 'name': name,
'description': description,
'datasetId': datasetId,
'datasetName': datasetName,
'organSite': organSite,
'leadPI':leadPI }
return topLevelProductTypes
def listProductTypesByParent(self, parentDatasetId):
childrenProductTypes = []
# list all supported product types
productTypes = self.fileManaferServerProxy.filemgr.getProductTypes()
for productTypeDict in productTypes:
try:
if parentDatasetId in productTypeDict['typeMetadata']['ParentDatasetId']:
datasetId = productTypeDict['typeMetadata']['DatasetId'][0]
childrenProductTypes.append(datasetId)
except KeyError:
pass
return childrenProductTypes
def printProductType(self, productTypeDict):
logging.info('PRODUCT TYPE: %s' % productTypeDict['name'])
for key, value in productTypeDict.items():
# dictionary: typeMetadata = {'DataCustodianEmail': ['[email protected]'], 'DataDisclaimer': [...], ..}
if key=='typeMetadata':
logging.info('\t%s =' % key)
for _key, _value in value.items():
logging.info('\t\t%s = %s' % (_key, _value))
else:
logging.info('\t%s = %s' % (key, value))
def listProducts(self, productType):
# query for all products of this type (i.e. all files of this dataset), all versions
#response = self.solrServerProxy.query('*:*', fq=['DatasetId:%s' % datasetId], start=0)
#print "\nNumber of files found: %s (all versions)" % response.numFound
#for result in response.results:
# self.printProduct(result)
# query for all possible versions of this dataset
response = self.solrServerProxy.query('*:*', fq=['CAS.ProductTypeName:%s' % productType], start=0, rows=0, facet='true', facet_field='DatasetVersion')
versions = response.facet_counts['facet_fields']['DatasetVersion']
last_version = 0
for key, value in versions.items():
# NOTE: facet keys span the whole index, but their counts are specific to this search
if int(value)>0:
logging.info("\nDataset Version number %s has %s files" % (key, value))
if int(key) > last_version:
last_version = int(key)
# query for all files for a specific version
response = self.solrServerProxy.query('*:*', fq=['CAS.ProductTypeName:%s' % productType,'DatasetVersion:%s' % last_version ], start=0)
logging.info("\nLatest version: %s, number of files: %s, listing them all:" % (last_version, response.numFound))
for result in response.results:
self.printProduct(result)
def printProduct(self, result):
'''Utility function to print out the product metadata'''
logging.info('\nProduct ID=%s' % result['id'])
for key, values in result.items():
logging.info('\tProduct metadata key=%s values=%s' % (key, values))
def printWorkflow(self, workflowDict):
'''Utiliyu function to print out a workflow.'''
logging.info(workflowDict)
logging.info("Workflow id=%s name=%s" % (workflowDict['id'], workflowDict['name']))
for task in workflowDict['tasks']:
logging.info("Task: %s" % task)
|
|
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from attr import attrib, attrs
from twisted.python import failure
from twisted.internet.task import Cooperator
from zope.interface import implementer
from ._boss import Boss
from ._dilation.manager import DILATION_VERSIONS
from ._dilation.connector import Connector
from ._interfaces import IDeferredWormhole, IWormhole
from ._key import derive_key
from .errors import NoKeyError, WormholeClosed
from .eventual import EventualQueue
from .journal import ImmediateJournal
from .observer import OneShotObserver, SequenceObserver
from .timing import DebugTiming
from .util import bytes_to_hexstr, to_bytes
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# We can provide different APIs to different apps:
# * Deferreds
# w.get_code().addCallback(print_code)
# w.send_message(data)
# w.get_message().addCallback(got_data)
# w.close().addCallback(closed)
# * delegate callbacks (better for journaled environments)
# w = wormhole(delegate=app)
# w.send_message(data)
# app.wormhole_got_code(code)
# app.wormhole_got_verifier(verifier)
# app.wormhole_got_versions(versions)
# app.wormhole_got_message(data)
# w.close()
# app.wormhole_closed()
#
# * potential delegate options
# wormhole(delegate=app, delegate_prefix="wormhole_",
# delegate_args=(args, kwargs))
@attrs
@implementer(IWormhole)
class _DelegatedWormhole(object):
_delegate = attrib()
def __attrs_post_init__(self):
self._key = None
def _set_boss(self, boss):
self._boss = boss
# from above
def allocate_code(self, code_length=2):
self._boss.allocate_code(code_length)
def input_code(self):
return self._boss.input_code()
def set_code(self, code):
self._boss.set_code(code)
# def serialize(self):
# s = {"serialized_wormhole_version": 1,
# "boss": self._boss.serialize(),
# }
# return s
def send_message(self, plaintext):
self._boss.send(plaintext)
def derive_key(self, purpose, length):
"""Derive a new key from the established wormhole channel for some
other purpose. This is a deterministic randomized function of the
session key and the 'purpose' string (unicode/py3-string). This
cannot be called until when_verifier() has fired, nor after close()
was called.
"""
if not isinstance(purpose, type("")):
raise TypeError(type(purpose))
if not self._key:
raise NoKeyError()
return derive_key(self._key, to_bytes(purpose), length)
def close(self):
self._boss.close()
def debug_set_trace(self,
client_name,
which="B N M S O K SK R RC L C T",
file=sys.stderr):
self._boss._set_trace(client_name, which, file)
# from below
def got_welcome(self, welcome):
self._delegate.wormhole_got_welcome(welcome)
def got_code(self, code):
self._delegate.wormhole_got_code(code)
def got_key(self, key):
self._delegate.wormhole_got_unverified_key(key)
self._key = key # for derive_key()
def got_verifier(self, verifier):
self._delegate.wormhole_got_verifier(verifier)
def got_versions(self, versions):
self._delegate.wormhole_got_versions(versions)
def received(self, plaintext):
self._delegate.wormhole_got_message(plaintext)
def closed(self, result):
self._delegate.wormhole_closed(result)
@implementer(IWormhole, IDeferredWormhole)
class _DeferredWormhole(object):
def __init__(self, reactor, eq, _enable_dilate=False):
self._reactor = reactor
self._welcome_observer = OneShotObserver(eq)
self._code_observer = OneShotObserver(eq)
self._key = None
self._key_observer = OneShotObserver(eq)
self._verifier_observer = OneShotObserver(eq)
self._version_observer = OneShotObserver(eq)
self._received_observer = SequenceObserver(eq)
self._closed = False
self._closed_observer = OneShotObserver(eq)
self._enable_dilate = _enable_dilate
def _set_boss(self, boss):
self._boss = boss
# from above
def get_code(self):
# TODO: consider throwing error unless one of allocate/set/input_code
# was called first. It's legit to grab the Deferred before triggering
# the process that will cause it to fire, but forbidding that
# ordering would make it easier to cause programming errors that
# forget to trigger it entirely.
return self._code_observer.when_fired()
def get_welcome(self):
return self._welcome_observer.when_fired()
def get_unverified_key(self):
return self._key_observer.when_fired()
def get_verifier(self):
return self._verifier_observer.when_fired()
def get_versions(self):
return self._version_observer.when_fired()
def get_message(self):
return self._received_observer.when_next_event()
def allocate_code(self, code_length=2):
self._boss.allocate_code(code_length)
def input_code(self):
return self._boss.input_code()
def set_code(self, code):
self._boss.set_code(code)
# no .serialize in Deferred-mode
def send_message(self, plaintext):
self._boss.send(plaintext)
def derive_key(self, purpose, length):
"""Derive a new key from the established wormhole channel for some
other purpose. This is a deterministic randomized function of the
session key and the 'purpose' string (unicode/py3-string). This
cannot be called until when_verified() has fired, nor after close()
was called.
"""
if not isinstance(purpose, type("")):
raise TypeError(type(purpose))
if not self._key:
raise NoKeyError()
return derive_key(self._key, to_bytes(purpose), length)
def dilate(self, transit_relay_location=None, no_listen=False):
if not self._enable_dilate:
raise NotImplementedError
return self._boss.dilate(transit_relay_location, no_listen) # fires with (endpoints)
def close(self):
# fails with WormholeError unless we established a connection
# (state=="happy"). Fails with WrongPasswordError (a subclass of
# WormholeError) if state=="scary".
d = self._closed_observer.when_fired() # maybe Failure
if not self._closed:
self._boss.close() # only need to close if it wasn't already
return d
def debug_set_trace(self,
client_name,
which="B N M S O K SK R RC L A I C T",
file=sys.stderr):
self._boss._set_trace(client_name, which, file)
# from below
def got_welcome(self, welcome):
self._welcome_observer.fire_if_not_fired(welcome)
def got_code(self, code):
self._code_observer.fire_if_not_fired(code)
def got_key(self, key):
self._key = key # for derive_key()
self._key_observer.fire_if_not_fired(key)
def got_verifier(self, verifier):
self._verifier_observer.fire_if_not_fired(verifier)
def got_versions(self, versions):
self._version_observer.fire_if_not_fired(versions)
def received(self, plaintext):
self._received_observer.fire(plaintext)
def closed(self, result):
self._closed = True
# print("closed", result, type(result), file=sys.stderr)
if isinstance(result, Exception):
# everything pending gets an error, including close()
f = failure.Failure(result)
self._closed_observer.error(f)
else:
# everything pending except close() gets an error:
# w.get_code()/welcome/unverified_key/verifier/versions/message
f = failure.Failure(WormholeClosed(result))
# but w.close() only gets error if we're unhappy
self._closed_observer.fire_if_not_fired(result)
self._welcome_observer.error(f)
self._code_observer.error(f)
self._key_observer.error(f)
self._verifier_observer.error(f)
self._version_observer.error(f)
self._received_observer.fire(f)
def create(
appid,
relay_url,
reactor, # use keyword args for everything else
versions={},
delegate=None,
journal=None,
tor=None,
timing=None,
stderr=sys.stderr,
_eventual_queue=None,
_enable_dilate=False):
timing = timing or DebugTiming()
side = bytes_to_hexstr(os.urandom(5))
journal = journal or ImmediateJournal()
eq = _eventual_queue or EventualQueue(reactor)
cooperator = Cooperator(scheduler=eq.eventually)
if delegate:
w = _DelegatedWormhole(delegate)
else:
w = _DeferredWormhole(reactor, eq, _enable_dilate=_enable_dilate)
# this indicates Wormhole capabilities
wormhole_versions = {
"can-dilate": DILATION_VERSIONS,
"dilation-abilities": Connector.get_connection_abilities(),
}
if not _enable_dilate:
wormhole_versions = {} # don't advertise Dilation yet: not ready
wormhole_versions["app_versions"] = versions # app-specific capabilities
v = __version__
if isinstance(v, type(b"")):
v = v.decode("utf-8", errors="replace")
client_version = ("python", v)
b = Boss(w, side, relay_url, appid, wormhole_versions, client_version,
reactor, eq, cooperator, journal, tor, timing)
w._set_boss(b)
b.start()
return w
# def from_serialized(serialized, reactor, delegate,
# journal=None, tor=None,
# timing=None, stderr=sys.stderr):
# assert serialized["serialized_wormhole_version"] == 1
# timing = timing or DebugTiming()
# w = _DelegatedWormhole(delegate)
# # now unpack state machines, including the SPAKE2 in Key
# b = Boss.from_serialized(w, serialized["boss"], reactor, journal, timing)
# w._set_boss(b)
# b.start() # ??
# raise NotImplemented
# # should the new Wormhole call got_code? only if it wasn't called before.
|
|
from __future__ import division, unicode_literals
import warnings
import unittest as unittest
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen import Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.analysis.phase_diagram import PhaseDiagram, \
GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.analysis.interface_reactions import InterfacialReactivity
class InterfaceReactionTest(unittest.TestCase):
def setUp(self):
self.entries = [ComputedEntry(Composition('Li'), 0),
ComputedEntry(Composition('Mn'), 0),
ComputedEntry(Composition('O2'), 0),
ComputedEntry(Composition('MnO2'), -10),
ComputedEntry(Composition('Mn2O4'), -60),
ComputedEntry(Composition('MnO3'), 20),
ComputedEntry(Composition('Li2O'), -10),
ComputedEntry(Composition('Li2O2'), -8),
ComputedEntry(Composition('LiMnO2'), -30)
]
self.pd = PhaseDiagram(self.entries)
chempots = {'Li': -3}
self.gpd = GrandPotentialPhaseDiagram(self.entries, chempots)
self.ir = []
# ir[0]
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[1]
self.ir.append(
InterfacialReactivity(Composition('MnO2'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[2]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[3]
self.ir.append(
InterfacialReactivity(Composition('Li2O'), Composition('Mn'),
self.gpd, norm=0, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[4]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('O2'),
self.gpd, norm=1, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[5]
self.ir.append(
InterfacialReactivity(Composition('Mn'), Composition('Li2O'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[6]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=True))
# ir[7]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=0, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[8]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=True))
# ir[9]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('MnO2'),
self.gpd, norm=0, include_no_mixing_energy=0,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[10]
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.pd, norm=1, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
# ir[11]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li2O2'),
self.gpd, norm=1, include_no_mixing_energy=1,
pd_non_grand=self.pd, use_hull_energy=False))
# ir[12]
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li2O2'),
self.pd, norm=1, include_no_mixing_energy=0,
pd_non_grand=None, use_hull_energy=False))
with self.assertRaises(Exception) as context1:
self.ir.append(
InterfacialReactivity(Composition('Li2O2'), Composition('Li'),
self.pd, norm=1,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide grand phase diagram '
'to compute no_mixing_energy!' == str(context1.exception))
with self.assertRaises(Exception) as context2:
self.ir.append(
InterfacialReactivity(Composition('O2'), Composition('Mn'),
self.gpd, norm=0,
include_no_mixing_energy=1,
pd_non_grand=None))
self.assertTrue(
'Please provide non-grand phase diagram '
'to compute no_mixing_energy!' == str(context2.exception))
def test_get_entry_energy(self):
# Test warning message.
comp = Composition('MnO3')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
energy = InterfacialReactivity._get_entry_energy(self.pd, comp)
self.assertTrue(len(w) == 1)
self.assertTrue("The reactant MnO3 has no matching entry with"
" negative formation energy, instead convex "
"hull energy for this composition will be used"
" for reaction energy calculation."
in str(w[-1].message))
test1 = np.isclose(energy, -30, atol=1e-03)
self.assertTrue(test1,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
# Test normal functionality
comp = Composition('MnO2')
test2 = np.isclose(
InterfacialReactivity._get_entry_energy(self.pd, comp),
-30, atol=1e-03)
self.assertTrue(test2,
'_get_entry_energy: energy for {} is wrong!'.format(
comp.reduced_formula))
def test_get_grand_potential(self):
comp = Composition('LiMnO2')
# Test non-normalized case
test1 = np.isclose(self.ir[1]._get_grand_potential(comp), -27,
atol=1e-03)
self.assertTrue(test1,
'_get_grand_potential: '
'Non-normalized case gets error!')
# Test normalized case
test2 = np.isclose(self.ir[2]._get_grand_potential(comp), -9,
atol=1e-03)
self.assertTrue(test2,
'_get_grand_potential: '
'Normalized case gets error!')
comp2 = Composition('Li2O2')
# Test use_hull_energy option.
test3 = np.isclose(self.ir[8]._get_grand_potential(comp2), -4,
atol=1e-03)
self.assertTrue(test3,
'_get_grand_potential: '
'get hull energy gets error!')
test4 = np.isclose(self.ir[9]._get_grand_potential(comp2), -2,
atol=1e-03)
self.assertTrue(test4,
'_get_grand_potential: '
'gets error for {}!'.format(comp2.reduced_formula))
def test_get_energy(self):
test1 = (np.isclose(self.ir[0]._get_energy(0.5), -15, atol=1e-03))
self.assertTrue(test1, '_get_energy: phase diagram gets error!')
test2 = (
np.isclose(self.ir[3]._get_energy(0.6666666),
-7.333333, atol=1e-03))
self.assertTrue(test2,
'_get_energy: '
'grand canonical phase diagram gets error!')
test3 = (
np.isclose(self.ir[6]._get_energy(0.3333333),
-3.333333, atol=1e-03))
self.assertTrue(test3,
'_get_energy: convex hull energy gets error. ')
test4 = (
np.isclose(self.ir[7]._get_energy(0.3333333),
-4, atol=1e-03))
self.assertTrue(test4,
'_get_energy: gets error. ')
def test_get_reaction(self):
test1 = str(self.ir[0]._get_reaction(0.5)) == '0.5 O2 + 0.5 Mn -> ' \
'0.5 MnO2'
self.assertTrue(test1,
'_get_reaction: '
'reaction not involving chempots species gets error!')
test2 = str(self.ir[3]._get_reaction(0.666666)) \
== '0.5 Mn + 0.5 Li2O -> Li + 0.25 MnO2 + 0.25 Mn' \
or str(self.ir[3]._get_reaction(0.666666)) \
== '0.5 Mn + 0.5 Li2O -> Li + 0.25 Mn + 0.25 MnO2'
self.assertTrue(test2,
'_get_reaction: '
'reaction involving chempots species gets error!')
def test_get_get_elmt_amt_in_rxt(self):
rxt1 = Reaction(
[Composition('Mn'), Composition('O2'), Composition('Li')],
[Composition('LiMnO2')])
test1 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt1), 3)
self.assertTrue(test1,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt2 = rxt1
rxt2.normalize_to(Composition('Li'), 0.5)
test2 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt2), 1.5)
self.assertTrue(test2,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
rxt3 = Reaction([Composition('O2'), Composition('Li')],
[Composition('Li2O')])
# Li is not counted
test3 = np.isclose(self.ir[2]._get_elmt_amt_in_rxt(rxt3), 1)
self.assertTrue(test3,
'_get_get_elmt_amt_in_rxt: '
'gpd elements amounts gets error!')
# Li is counted
test4 = np.isclose(self.ir[6]._get_elmt_amt_in_rxt(rxt3), 3)
self.assertTrue(test4,
'_get_get_elmt_amt_in_rxt: '
'pd elements amounts gets error!')
def test_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._convert(x, f1, f2)
for x, f1, f2 in test_array]
answer = [0.75, 0.5, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_reverse_convert(self):
test_array = [(0.5, 1, 3), (0.4, 2, 3), (0, 1, 9), (1, 2, 7)]
result = [InterfacialReactivity._reverse_convert(x, f1, f2)
for x, f1, f2 in test_array]
answer = [0.25, 0.3076923, 0, 1]
self.assertTrue(np.allclose(result, answer),
'_convert: conversion gets error! {0} expected,'
' but gets {1}'.format(answer, result))
def test_get_products(self):
test1 = sorted(self.ir[0].get_products()) == sorted(
['MnO2', 'O2', 'Mn'])
self.assertTrue(test1,
'get_products: decomposition products gets error '
'for reaction not involving chempots species!')
test2 = sorted(self.ir[3].get_products()) == sorted(
['Li', 'MnO2', 'Mn', 'Li2O'])
self.assertTrue(test2,
'get_decomp: decomposition products gets error '
'for reaction involving chempots species!')
def test_get_kinks(self):
def test_get_kinks_helper(ir, index_expect,
x_kink_expect, energy_kink_expect,
react_kink_expect,
energy_per_rxt_kink_expect):
lst = list(ir.get_kinks())
index = [i[0] for i in lst]
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
react_kink = [str(i[3]) for i in lst]
energy_per_rxt_kink = [i[4] for i in lst]
test1 = index == index_expect
self.assertTrue(test1, 'get_kinks:index gets error!')
test2 = np.allclose(x_kink, x_kink_expect)
self.assertTrue(test2, 'get_kinks:x kinks gets error!')
test3 = np.allclose(energy_kink, energy_kink_expect)
self.assertTrue(test3, 'get_kinks:energy kinks gets error!')
# Testing reaction strings are hard,
# as species could be arranged in random order.
test4 = len(react_kink) == len(react_kink_expect)
self.assertTrue(test4,
'get_kinks: reaction kinks '
'gets error for {0} and {1} reaction!'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
test5 = np.allclose(energy_per_rxt_kink,
energy_per_rxt_kink_expect)
self.assertTrue(test5,
'get_kinks: energy_per_rxt_kinks gets error!')
test_get_kinks_helper(self.ir[0], [1, 2, 3], [0, 0.5, 1],
[0, -15, 0],
['Mn -> Mn', '0.5 O2 + 0.5 Mn -> 0.5 MnO2',
'O2 -> O2'],
[0,
-15 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
test_get_kinks_helper(self.ir[10], [1, 2, 3], [0, 0.66667, 1],
[0, -10, 0],
['Mn -> Mn', '0.5 O2 + 0.5 Mn -> 0.5 MnO2',
'O2 -> O2'],
[0,
-15 * InterfacialReactivity.EV_TO_KJ_PER_MOL,
0])
test_get_kinks_helper(self.ir[11], [1, 2], [0, 1], [-3, -3],
['Li2O2 + 2 Li -> 2 Li2O',
'Li2O2 + 2 Li -> 2 Li2O'],
[-6 * InterfacialReactivity.EV_TO_KJ_PER_MOL] *
2)
test_get_kinks_helper(self.ir[12], [1, 2], [0, 1], [-0.5, -0.5],
['Li2O2 -> Li2O + 0.5 O2',
'Li2O2 -> Li2O + 0.5 O2'],
[-2 * InterfacialReactivity.EV_TO_KJ_PER_MOL] *
2)
def test_convexity(self):
def test_convexity_helper(ir):
lst = list(ir.get_kinks())
x_kink = [i[1] for i in lst]
energy_kink = [i[2] for i in lst]
points = list(zip(x_kink, energy_kink))
if len(points) >= 3:
# To test convexity of the plot, construct convex hull from
# the kinks and make sure
# 1. all points are below the end points
# 2. all points are on the convex hull.
relative_vectors_1 = [(x - x_kink[0], e - energy_kink[0])
for x, e in points]
relative_vectors_2 = [(x - x_kink[-1], e - energy_kink[-1])
for x, e in points]
relative_vectors = zip(relative_vectors_1, relative_vectors_2)
positions = [np.cross(v1, v2) for v1, v2 in relative_vectors]
test1 = np.all(np.array(positions) <= 0)
hull = ConvexHull(points)
test2 = len(hull.vertices) == len(points)
self.assertTrue(test1 and test2,
'Error: Generating non-convex plot!')
test_convexity_helper(self.ir[0])
test_convexity_helper(self.ir[1])
test_convexity_helper(self.ir[2])
test_convexity_helper(self.ir[3])
test_convexity_helper(self.ir[4])
test_convexity_helper(self.ir[5])
test_convexity_helper(self.ir[6])
test_convexity_helper(self.ir[7])
test_convexity_helper(self.ir[8])
test_convexity_helper(self.ir[9])
test_convexity_helper(self.ir[10])
test_convexity_helper(self.ir[11])
test_convexity_helper(self.ir[12])
def test_get_original_composition_ratio(self):
# expected reaction1: 0.5 O2 + 0.5 Mn -> 0.5 MnO2
reaction1 = self.ir[0]._get_reaction(0.5)
test1 = np.isclose(self.ir[0]._get_original_composition_ratio(
reaction1), 0.5)
self.assertTrue(test1,
'_get_original_composition_ratio: '
'reaction not involving chempots species gets error!')
# expected reaction2: 0.5 Mn + 0.5 Li2O -> Li + 0.25 MnO2 + 0.25 Mn
reaction2 = self.ir[3]._get_reaction(0.666666)
test2 = np.isclose(self.ir[3]._get_original_composition_ratio(
reaction2), 0.5)
self.assertTrue(test2,
'_get_original_composition_ratio: '
'reaction involving chempots species gets error!')
def test_get_critical_original_kink_ratio(self):
test1 = np.allclose(self.ir[0].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test1, 'get_critical_original_kink_ratio:'
' gets error!')
test2 = np.allclose(self.ir[10].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test2, 'get_critical_original_kink_ratio:'
' gets error!')
test3 = np.allclose(self.ir[11].get_critical_original_kink_ratio(),
[0, 1])
self.assertTrue(test3, 'get_critical_original_kink_ratio:'
' gets error!')
test4 = np.allclose(self.ir[2].get_critical_original_kink_ratio(),
[0, 0.5, 1])
self.assertTrue(test4, 'get_critical_original_kink_ratio:'
' gets error!')
test5 = np.allclose(self.ir[3].get_critical_original_kink_ratio(),
[0, 0.66666, 1])
self.assertTrue(test5, 'get_critical_original_kink_ratio:'
' gets error!')
def test_labels(self):
ir = self.ir[0]
dict = ir.labels()
test1 = dict == {1: 'x= 0.0 energy in eV/atom = 0.0 Mn -> Mn',
2: 'x= 0.5 energy in eV/atom = -15.0 0.5 O2 + 0.5 '
'Mn -> 0.5 MnO2',
3: 'x= 1.0 energy in eV/atom = 0.0 O2 -> O2'}
self.assertTrue(test1,
'labels:label does not match for interfacial system '
'with {0} and {1}.'.format(
ir.c1_original.reduced_formula,
ir.c2_original.reduced_formula))
def test_plot(self):
# Test plot is hard. Here just to call the plot function to see if any
# error occurs.
for i in self.ir:
i.plot()
def test_minimum(self):
answer = [
(0.5, -15),
(0, 0),
(0.3333333, -10),
(0.6666666, -7.333333),
(0.3333333, -7.333333),
(0.1428571, -7.333333),
(0.3333333, -3.333333),
(0.3333333, -4.0),
]
for i, j in zip(self.ir, answer):
self.assertTrue(np.allclose(i.minimum(), j),
'minimum: the system with {0} and {1} '
'gets error!{2} expected, but gets {3}'.format(
i.c1_original.reduced_formula,
i.c2_original.reduced_formula, str(j),
str(i.minimum())))
def test_get_no_mixing_energy(self):
with self.assertRaises(Exception) as context1:
self.ir[0].get_no_mixing_energy()
self.assertTrue(
'Please provide grand potential phase diagram'
' for computing no_mixing_energy!' == str(context1.exception))
answer = [
[(u'MnO2 (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Li2O (eV/f.u.)', 0.0), (u'Mn (eV/f.u.)', 0.0)],
[(u'Mn (eV/atom)', 0.0), (u'O2 (eV/atom)', -4.0)],
[(u'Mn (eV/atom)', 0.0), (u'Li2O (eV/atom)', 0.0)]
]
def name_lst(lst):
return (lst[0][0], lst[1][0])
def energy_lst(lst):
return (lst[0][1], lst[1][1])
result_info = [i.get_no_mixing_energy() for i in self.ir if i.grand]
for i, j in zip(result_info, answer):
self.assertTrue(name_lst(i) == name_lst(j),
'get_no_mixing_energy: names get error,'
' {0} expected but gets {1}'.format(
name_lst(j), name_lst(i)))
self.assertTrue(np.allclose(energy_lst(i), energy_lst(j)),
'get_no_mixing_energy: '
'no_mixing energies get error, '
'{0} expected but gets {1}'.format(
energy_lst(j), energy_lst(i)))
def test_get_chempot_correction(self):
# test data from fig. 6 in ref:
# Prediction of A2BX4 metal-chalcogenide compounds via
# first-principles thermodynamics, PHYSICAL REVIEW B 86, 014109 (2012)
# test pressure effect.
actual = InterfacialReactivity.get_chempot_correction("O", 298.15,
100E5)
expect = 0.05916
self.assertTrue(np.isclose(actual, expect, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect,
actual))
# test temperature effect.
actual_2 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E5)
expect_2 = -0.82352
self.assertTrue(np.isclose(actual_2, expect_2, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_2,
actual_2))
actual_3 = InterfacialReactivity.get_chempot_correction("O", 500,
1E5)
expect_3 = -0.223
self.assertTrue(np.isclose(actual_3, expect_3, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_3,
actual_3))
# test mixed effect.
actual_4 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E-25)
expect_4 = -3.800
self.assertTrue(np.isclose(actual_4, expect_4, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_4,
actual_4))
actual_5 = InterfacialReactivity.get_chempot_correction("O", 1250,
1E-25)
expect_5 = -4.86
self.assertTrue(np.isclose(actual_5, expect_5, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_5,
actual_5))
actual_6 = InterfacialReactivity.get_chempot_correction("O", 1500,
1E-25)
expect_6 = -5.928
self.assertTrue(np.isclose(actual_6, expect_6, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_6,
actual_6))
actual_7 = InterfacialReactivity.get_chempot_correction("O", 1000,
1E-15)
expect_7 = -2.808
self.assertTrue(np.isclose(actual_7, expect_7, atol=1E-2),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_7,
actual_7))
# test non-gas phase.
actual_8 = InterfacialReactivity.get_chempot_correction("Li", 1000,
1E15)
expect_8 = 0
self.assertTrue(np.isclose(actual_8, expect_8, atol=1E-5),
"get_chempot_correction gets "
"error, {0} expected but gets {1}".format(expect_8,
actual_8))
if __name__ == '__main__':
unittest.main()
|
|
import numpy as np
import pytest
from pandas import DataFrame
pytest.importorskip("jinja2")
def bar_grad(a=None, b=None, c=None, d=None):
"""Used in multiple tests to simplify formatting of expected result"""
ret = [("width", "10em")]
if all(x is None for x in [a, b, c, d]):
return ret
return ret + [
(
"background",
f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})",
)
]
def no_bar():
return bar_grad()
def bar_to(x, color="#d65f5f"):
return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%")
def bar_from_to(x, y, color="#d65f5f"):
return bar_grad(
f" transparent {x:.1f}%",
f" {color} {x:.1f}%",
f" {color} {y:.1f}%",
f" transparent {y:.1f}%",
)
@pytest.fixture
def df_pos():
return DataFrame([[1], [2], [3]])
@pytest.fixture
def df_neg():
return DataFrame([[-1], [-2], [-3]])
@pytest.fixture
def df_mix():
return DataFrame([[-3], [1], [2]])
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(50), bar_to(100)]),
("right", [bar_to(100), bar_from_to(50, 100), no_bar()]),
("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]),
("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]),
("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]),
(2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
(np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
],
)
def test_align_positive_cases(df_pos, align, exp):
# test different align cases for all positive values
result = df_pos.style.bar(align=align)._compute().ctx
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
("left", [bar_to(100), bar_to(50), no_bar()]),
("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]),
("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]),
("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]),
("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]),
(-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
(np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
],
)
def test_align_negative_cases(df_neg, align, exp):
# test different align cases for all negative values
result = df_neg.style.bar(align=align)._compute().ctx
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(80), bar_to(100)]),
("right", [bar_to(100), bar_from_to(80, 100), no_bar()]),
("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]),
("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
(-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
(np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]),
],
)
@pytest.mark.parametrize("nans", [True, False])
def test_align_mixed_cases(df_mix, align, exp, nans):
# test different align cases for mixed positive and negative values
# also test no impact of NaNs and no_bar
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
if nans:
df_mix.loc[3, :] = np.nan
expected.update({(3, 0): no_bar()})
result = df_mix.style.bar(align=align)._compute().ctx
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
(
"left",
{
"index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]],
"columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]],
"none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]],
},
),
(
"mid",
{
"index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]],
"columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]],
"none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]],
},
),
(
"zero",
{
"index": [
[bar_from_to(50, 66.66), bar_from_to(50, 75)],
[bar_from_to(50, 100), bar_from_to(50, 100)],
],
"columns": [
[bar_from_to(50, 75), bar_from_to(50, 100)],
[bar_from_to(50, 87.5), bar_from_to(50, 100)],
],
"none": [
[bar_from_to(50, 62.5), bar_from_to(50, 75)],
[bar_from_to(50, 87.5), bar_from_to(50, 100)],
],
},
),
(
2,
{
"index": [
[bar_to(50), no_bar()],
[bar_from_to(50, 100), bar_from_to(50, 100)],
],
"columns": [
[bar_to(50), no_bar()],
[bar_from_to(50, 75), bar_from_to(50, 100)],
],
"none": [
[bar_from_to(25, 50), no_bar()],
[bar_from_to(50, 75), bar_from_to(50, 100)],
],
},
),
],
)
@pytest.mark.parametrize("axis", ["index", "columns", "none"])
def test_align_axis(align, exp, axis):
# test all axis combinations with positive values and different aligns
data = DataFrame([[1, 2], [3, 4]])
result = (
data.style.bar(align=align, axis=None if axis == "none" else axis)
._compute()
.ctx
)
expected = {
(0, 0): exp[axis][0][0],
(0, 1): exp[axis][0][1],
(1, 0): exp[axis][1][0],
(1, 1): exp[axis][1][1],
}
assert result == expected
@pytest.mark.parametrize(
"values, vmin, vmax",
[
("positive", 1.5, 2.5),
("negative", -2.5, -1.5),
("mixed", -2.5, 1.5),
],
)
@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
# test that clipping occurs if any vmin > data_values or vmax < data_values
if align == "mid": # mid acts as left or right in each case
if values == "positive":
align = "left"
elif values == "negative":
align = "right"
df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
vmin = None if nullify == "vmin" else vmin
vmax = None if nullify == "vmax" else vmax
clip_df = df.where(df <= (vmax if vmax else 999), other=vmax)
clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin)
result = (
df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
._compute()
.ctx
)
expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result == expected
@pytest.mark.parametrize(
"values, vmin, vmax",
[
("positive", 0.5, 4.5),
("negative", -4.5, -0.5),
("mixed", -4.5, 4.5),
],
)
@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
# test that widening occurs if any vmax > data_values or vmin < data_values
if align == "mid": # mid acts as left or right in each case
if values == "positive":
align = "left"
elif values == "negative":
align = "right"
df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
vmin = None if nullify == "vmin" else vmin
vmax = None if nullify == "vmax" else vmax
expand_df = df.copy()
expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax
result = (
df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
._compute()
.ctx
)
expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result.items() <= expected.items()
def test_numerics():
# test data is pre-selected for numeric values
data = DataFrame([[1, "a"], [2, "b"]])
result = data.style.bar()._compute().ctx
assert (0, 1) not in result
assert (1, 1) not in result
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(100, "green")]),
("right", [bar_to(100, "red"), no_bar()]),
("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]),
("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]),
],
)
def test_colors_mixed(align, exp):
data = DataFrame([[-1], [3]])
result = data.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result == {(0, 0): exp[0], (1, 0): exp[1]}
def test_bar_align_height():
# test when keyword height is used 'no-repeat center' and 'background-size' present
data = DataFrame([[1], [2]])
result = data.style.bar(align="left", height=50)._compute().ctx
bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center"
expected = {
(0, 0): [("width", "10em")],
(1, 0): [
("width", "10em"),
("background", bg_s),
("background-size", "100% 50.0%"),
],
}
assert result == expected
def test_bar_value_error_raises():
df = DataFrame({"A": [-100, -60, -30, -20]})
msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or"
with pytest.raises(ValueError, match=msg):
df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html()
msg = r"`width` must be a value in \[0, 100\]"
with pytest.raises(ValueError, match=msg):
df.style.bar(width=200).to_html()
msg = r"`height` must be a value in \[0, 100\]"
with pytest.raises(ValueError, match=msg):
df.style.bar(height=200).to_html()
|
|
import csv
def count(fileName):
numRows = 0
with open(fileName, 'r') as f:
reader = csv.reader(f,delimiter = ",")
data = list(reader)
row_count = len(data)-1
return row_count
def lm_predict(a,b,intercept,fileName):
rr_values = []
leastSquared = 0
acceptedPredictions = 0
total = count(fileName)
with open(fileName,'r') as f:
reader = csv.DictReader(f,delimiter = ',')
for line in reader:
if line['resprate'] == "":
#predict value and fill in
hr = float(line['heartrate'])
bp = float(line['bloodpressure'])
actual = float(line['removed'])
upRange = actual*1.1 #1.20#*1.05
lwRange = actual*.90 #.8 #.95
observed = a * hr + b * bp + intercept
line['resprate'] = observed
rr_values.append(observed)
leastSquared += (actual-observed)**2
# # If in acceptable range
# if lwRange <= observed and observed <= upRange:
# acceptedPredictions += 1
else:
rr_values.append(float(line['resprate']))
return fileName[-7:], leastSquared
def lm_predict_joined(a,b,c,d,intercept,fileName):
acceptedPredictions = 0
total = count(fileName)
leastSquared = 0
with open(fileName,'r') as f:
reader = csv.DictReader(f,delimiter = ',')
for line in reader:
if line['resprate'] == "":
#predict value and fill in
hr = float(line['heartrate'])
bp = float(line['bloodpressure'])
age = float(line['age'])
gender = (line['gender'])
if gender == "F":
gender = 1
else:
gender = 0
actual = float(line['removed'])
upRange = actual*1.1 #1.20#*1.05
lwRange = actual*.90 #.8 #.95
observed = a * hr + b * bp + c * age + gender + intercept
line['resprate'] = observed
leastSquared += (actual-observed)**2
# # If in acceptable range
# if lwRange <= observed and observed <= upRange:
# acceptedPredictions += 1
return fileName[-7:],leastSquared
# 95% TRAIN | 05% TEST
# NOT JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 5.830201 0.370034 15.756 < 2e-16 ***
# heartrate 0.144626 0.004437 32.598 < 2e-16 ***
# bloodpressure 0.011313 0.003305 3.423 0.000628 ***
# JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.392385 0.884775 3.834 0.000129 ***
# heartrate 0.145465 0.004441 32.759 < 2e-16 ***
# bloodpressure 0.010847 0.003304 3.283 0.001042 **
# age 0.031139 0.011126 2.799 0.005170 **
# factor(gender)M 0.469527 0.315705 1.487 0.137079
if __name__=='__main__':
joined_files = ['induced_dead_ace_joined_test_05_0.1.csv','induced_dead_ace_joined_test_05_0.3.csv','induced_dead_ace_joined_test_05_0.5.csv']
files = ['induced_dead_ace_test_05_0.1.csv','induced_dead_ace_test_05_0.3.csv','induced_dead_ace_test_05_0.5.csv']
print 'No Join'
for f in files:
# resprate ~heartrate + bloodpressure
(name,leastsquared) = lm_predict(0.144626,0.011313,5.830201,f)
print (name,leastsquared)
#print rr_values
print '###########################################'
print 'Joined'
for jf in joined_files:
(name,leastsquared) = lm_predict_joined(0.145465, 0.010847, 0.031139 , 0.469527, 3.3923854,jf)
print (name,leastsquared)
# Dead Patient Ace Joined Table -> 2643
# Create the data for the chart.
# not_joined <- c(4932.343740123179,2905.2458388668874,1342.5335549972406,264.63339070904397)
# distributions <- c(70,80,90,95)
# plot(not_joined, distributions, type="l", lwd=2, col="blue", ylim=c(0, 12), xaxs="i", yaxs="i")
# joined <- c(5149.982721025402,3105.213382732825,1086.4348315361267,318.32722098806966)
# # Give the chart file a name.
# png(file = "Least_Squared_Varying_Train_Test_Distributions.jpg")
# # Plot the bar chart.
# plot(v,type = "o",col = "red", xlab = "Train", ylab = "Rain fall",
# main = "Rain fall chart")
# lines(t, type = "o", col = "blue")
# # Save the file.
# dev.off()
######## Different train/test distributions, least squared
# 2644 Rows
# 1850
# 2115
# 2379
# 2511
# train 70%, test 30%
# No Join
# ('0.1.csv', 4932.343740123179)
# ('0.3.csv', 14672.663873677118)
# ('0.5.csv', 23214.70125550916)
# Joined
# ('0.1.csv', 5149.982721025402)
# ('0.3.csv', 12015.438090824508)
# ('0.5.csv', 23446.298130530235)
# train 80%, test 20%
# No Join
# ('0.1.csv', 2905.2458388668874)
# ('0.3.csv', 7859.365402605208)
# ('0.5.csv', 17159.443412678604)
# Joined
# ('0.1.csv', 3105.213382732825)
# ('0.3.csv', 8720.008280001373)
# ('0.5.csv', 15475.274385173194)
# train 90%, test 10%
# No Join
# ('0.1.csv', 1342.5335549972406)
# ('0.3.csv', 4365.742528772348)
# ('0.1.csv', 1342.5335549972406)
# Joined
# ('0.1.csv', 1086.4348315361267)
# ('0.3.csv', 4528.863447116168)
# ('0.5.csv', 7970.889865116953)
# train 95%, test 05%
# No Join
# ('0.1.csv', 264.63339070904397)
# ('0.3.csv', 2662.3915099226106)
# ('0.5.csv', 3529.902271957436)
# Joined
# ('0.1.csv', 318.32722098806966)
# ('0.3.csv', 2271.2817881885603)
# ('0.5.csv', 3643.205256700546)
# Linear Regression Equations
# 70% TRAIN | 30% TEST
# NOT JOINED
# p-value: < 2.2e-16
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 5.602958 0.434759 12.888 < 2e-16 ***
# heartrate 0.145658 0.005201 28.005 < 2e-16 ***
# bloodpressure 0.011643 0.003778 3.082 0.00209 *
# Residual standard error: 7.83 on 1847 degrees of freedom
# Multiple R-squared: 0.3294, Adjusted R-squared: 0.3287
# F-statistic: 453.6 on 2 and 1847 DF, p-value: < 2.2e-16
# JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.242482 1.017738 3.186 0.00147 **
# heartrate 0.146254 0.005204 28.102 < 2e-16 ***
# bloodpressure 0.011184 0.003779 2.960 0.00312 **
# age 0.029881 0.012798 2.335 0.01966 *
# factor(gender)M 0.516674 0.369022 1.400 0.16165
# Residual standard error: 7.819 on 1845 degrees of freedom
# Multiple R-squared: 0.332, Adjusted R-squared: 0.3305
# F-statistic: 229.2 on 4 and 1845 DF, p-value: < 2.2e-16
# 80% TRAIN | 30% TEST
# NOT JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 5.895305 0.407430 14.469 < 2e-16 ***
# heartrate 0.141619 0.004890 28.962 < 2e-16 ***
# bloodpressure 0.012837 0.003598 3.568 0.000367 ***
# Residual standard error: 7.864 on 2111 degrees of freedom
# Multiple R-squared: 0.3188, Adjusted R-squared: 0.3181
# F-statistic: 493.9 on 2 and 2111 DF, p-value: < 2.2e-16
# JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.967024 0.960521 4.130 3.77e-05 ***
# heartrate 0.142039 0.004894 29.026 < 2e-16 ***
# bloodpressure 0.012514 0.003600 3.476 0.000519 ***
# age 0.023831 0.012124 1.966 0.049463 *
# factor(gender)M 0.496224 0.347086 1.430 0.152955
# Residual standard error: 7.858 on 2109 degrees of freedom
# Multiple R-squared: 0.3206, Adjusted R-squared: 0.3193
# F-statistic: 248.8 on 4 and 2109 DF, p-value: < 2.2e-16
# 90% TRAIN | 10% TEST
# NOT JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 5.825879 0.378400 15.396 < 2e-16 ***
# heartrate 0.143485 0.004542 31.592 < 2e-16 ***
# bloodpressure 0.011735 0.003366 3.486 0.000499 ***
# Residual standard error: 7.825 on 2375 degrees of freedom
# Multiple R-squared: 0.3298, Adjusted R-squared: 0.3292
# F-statistic: 584.4 on 2 and 2375 DF, p-value: < 2.2e-16
# JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.967024 0.960521 4.130 3.77e-05 ***
# heartrate 0.142039 0.004894 29.026 < 2e-16 ***
# bloodpressure 0.012514 0.003600 3.476 0.000519 ***
# age 0.023831 0.012124 1.966 0.049463 *
# factor(gender)M 0.496224 0.347086 1.430 0.152955
# Residual standard error: 7.815 on 2373 degrees of freedom
# Multiple R-squared: 0.332, Adjusted R-squared: 0.3309
# F-statistic: 294.9 on 4 and 2373 DF, p-value: < 2.2e-16
# 95% TRAIN | 05% TEST
# NOT JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 5.830201 0.370034 15.756 < 2e-16 ***
# heartrate 0.144626 0.004437 32.598 < 2e-16 ***
# bloodpressure 0.011313 0.003305 3.423 0.000628 ***
# Residual standard error: 7.812 on 2507 degrees of freedom
# Multiple R-squared: 0.3312, Adjusted R-squared: 0.3307
# F-statistic: 620.9 on 2 and 2507 DF, p-value: < 2.2e-16
# JOINED
# p-value: < 2.2e-16
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 3.392385 0.884775 3.834 0.000129 ***
# heartrate 0.145465 0.004441 32.759 < 2e-16 ***
# bloodpressure 0.010847 0.003304 3.283 0.001042 **
# age 0.031139 0.011126 2.799 0.005170 **
# factor(gender)M 0.469527 0.315705 1.487 0.137079
# Residual standard error: 7.8 on 2505 degrees of freedom
# Multiple R-squared: 0.3338, Adjusted R-squared: 0.3328
# F-statistic: 313.8 on 4 and 2505 DF, p-value: < 2.2e-16
######## SCRAP, WRONG SCORE METRIC
#(1.05,.95) we see fewer differences between joined and unjoined performance
# No Join
# ('0.1.csv', 11, 0.1387137452711223)
# ('0.3.csv', 38, 0.15973097940311057)
# ('0.5.csv', 51, 0.12862547288776796)
# ###########################################
# Joined
# ('0.1.csv', 11, 0.1387137452711223)
# ('0.3.csv', 22, 0.09247583018074822)
# ('0.5.csv', 50, 0.12610340479192939)
# (1.03,.97) shows at least that joined performs as expected
# No Join
# ('0.1.csv', 7, 0.08827238335435056)
# ('0.3.csv', 26, 0.10928961748633881)
# ('0.5.csv', 33, 0.0832282471626734)
# ###########################################
# Joined
# ('0.1.csv', 9, 0.11349306431273642)
# ('0.3.csv', 13, 0.054644808743169404)
# ('0.5.csv', 26, 0.06557377049180328)
# The fact that these values don't show the gradient we expected,
# may suggest overfitting as we expected poorer performance
# No Join vs Join for 0.1 (fewest missing values), the joined approach
# does at least as well as unjoined if not a little better
# The more values that are missing, it just does worse possibly due ill-fit
# in the model and random correlation between our chosen variables.
|
|
from collections.abc import Iterable
from numbers import Real, Integral
from warnings import warn
import numpy as np
import openmc.checkvalue as cv
from openmc.stats import Tabular, Univariate, Discrete, Mixture
from .function import Tabulated1D, INTERPOLATION_SCHEME
from .angle_energy import AngleEnergy
from .data import EV_PER_MEV
from .endf import get_list_record, get_tab2_record
class KalbachMann(AngleEnergy):
"""Kalbach-Mann distribution
Parameters
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
precompound : Iterable of openmc.data.Tabulated1D
Precompound factor 'r' as a function of outgoing energy for each
incoming energy
slope : Iterable of openmc.data.Tabulated1D
Kalbach-Chadwick angular distribution slope value 'a' as a function of
outgoing energy for each incoming energy
Attributes
----------
breakpoints : Iterable of int
Breakpoints defining interpolation regions
interpolation : Iterable of int
Interpolation codes
energy : Iterable of float
Incoming energies at which distributions exist
energy_out : Iterable of openmc.stats.Univariate
Distribution of outgoing energies corresponding to each incoming energy
precompound : Iterable of openmc.data.Tabulated1D
Precompound factor 'r' as a function of outgoing energy for each
incoming energy
slope : Iterable of openmc.data.Tabulated1D
Kalbach-Chadwick angular distribution slope value 'a' as a function of
outgoing energy for each incoming energy
"""
def __init__(self, breakpoints, interpolation, energy, energy_out,
precompound, slope):
super().__init__()
self.breakpoints = breakpoints
self.interpolation = interpolation
self.energy = energy
self.energy_out = energy_out
self.precompound = precompound
self.slope = slope
@property
def breakpoints(self):
return self._breakpoints
@property
def interpolation(self):
return self._interpolation
@property
def energy(self):
return self._energy
@property
def energy_out(self):
return self._energy_out
@property
def precompound(self):
return self._precompound
@property
def slope(self):
return self._slope
@breakpoints.setter
def breakpoints(self, breakpoints):
cv.check_type('Kalbach-Mann breakpoints', breakpoints,
Iterable, Integral)
self._breakpoints = breakpoints
@interpolation.setter
def interpolation(self, interpolation):
cv.check_type('Kalbach-Mann interpolation', interpolation,
Iterable, Integral)
self._interpolation = interpolation
@energy.setter
def energy(self, energy):
cv.check_type('Kalbach-Mann incoming energy', energy,
Iterable, Real)
self._energy = energy
@energy_out.setter
def energy_out(self, energy_out):
cv.check_type('Kalbach-Mann distributions', energy_out,
Iterable, Univariate)
self._energy_out = energy_out
@precompound.setter
def precompound(self, precompound):
cv.check_type('Kalbach-Mann precompound factor', precompound,
Iterable, Tabulated1D)
self._precompound = precompound
@slope.setter
def slope(self, slope):
cv.check_type('Kalbach-Mann slope', slope, Iterable, Tabulated1D)
self._slope = slope
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('kalbach-mann')
dset = group.create_dataset('energy', data=self.energy)
dset.attrs['interpolation'] = np.vstack((self.breakpoints,
self.interpolation))
# Determine total number of (E,p,r,a) tuples and create array
n_tuple = sum(len(d) for d in self.energy_out)
distribution = np.empty((5, n_tuple))
# Create array for offsets
offsets = np.empty(len(self.energy_out), dtype=int)
interpolation = np.empty(len(self.energy_out), dtype=int)
n_discrete_lines = np.empty(len(self.energy_out), dtype=int)
j = 0
# Populate offsets and distribution array
for i, (eout, km_r, km_a) in enumerate(zip(
self.energy_out, self.precompound, self.slope)):
n = len(eout)
offsets[i] = j
if isinstance(eout, Mixture):
discrete, continuous = eout.distribution
n_discrete_lines[i] = m = len(discrete)
interpolation[i] = 1 if continuous.interpolation == 'histogram' else 2
distribution[0, j:j+m] = discrete.x
distribution[1, j:j+m] = discrete.p
distribution[2, j:j+m] = discrete.c
distribution[0, j+m:j+n] = continuous.x
distribution[1, j+m:j+n] = continuous.p
distribution[2, j+m:j+n] = continuous.c
else:
if isinstance(eout, Tabular):
n_discrete_lines[i] = 0
interpolation[i] = 1 if eout.interpolation == 'histogram' else 2
elif isinstance(eout, Discrete):
n_discrete_lines[i] = n
interpolation[i] = 1
distribution[0, j:j+n] = eout.x
distribution[1, j:j+n] = eout.p
distribution[2, j:j+n] = eout.c
distribution[3, j:j+n] = km_r.y
distribution[4, j:j+n] = km_a.y
j += n
# Create dataset for distributions
dset = group.create_dataset('distribution', data=distribution)
# Write interpolation as attribute
dset.attrs['offsets'] = offsets
dset.attrs['interpolation'] = interpolation
dset.attrs['n_discrete_lines'] = n_discrete_lines
@classmethod
def from_hdf5(cls, group):
"""Generate Kalbach-Mann distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy distribution
"""
interp_data = group['energy'].attrs['interpolation']
energy_breakpoints = interp_data[0, :]
energy_interpolation = interp_data[1, :]
energy = group['energy'][()]
data = group['distribution']
offsets = data.attrs['offsets']
interpolation = data.attrs['interpolation']
n_discrete_lines = data.attrs['n_discrete_lines']
energy_out = []
precompound = []
slope = []
n_energy = len(energy)
for i in range(n_energy):
# Determine length of outgoing energy distribution and number of
# discrete lines
j = offsets[i]
if i < n_energy - 1:
n = offsets[i+1] - j
else:
n = data.shape[1] - j
m = n_discrete_lines[i]
# Create discrete distribution if lines are present
if m > 0:
eout_discrete = Discrete(data[0, j:j+m], data[1, j:j+m])
eout_discrete.c = data[2, j:j+m]
p_discrete = eout_discrete.c[-1]
# Create continuous distribution
if m < n:
interp = INTERPOLATION_SCHEME[interpolation[i]]
eout_continuous = Tabular(data[0, j+m:j+n], data[1, j+m:j+n], interp)
eout_continuous.c = data[2, j+m:j+n]
# If both continuous and discrete are present, create a mixture
# distribution
if m == 0:
eout_i = eout_continuous
elif m == n:
eout_i = eout_discrete
else:
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
# Precompound factor and slope are on rows 3 and 4, respectively
km_r = Tabulated1D(data[0, j:j+n], data[3, j:j+n])
km_a = Tabulated1D(data[0, j:j+n], data[4, j:j+n])
energy_out.append(eout_i)
precompound.append(km_r)
slope.append(km_a)
return cls(energy_breakpoints, energy_interpolation,
energy, energy_out, precompound, slope)
@classmethod
def from_ace(cls, ace, idx, ldis):
"""Generate Kalbach-Mann energy-angle distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
ldis : int
Index in XSS array of the start of the energy distribution block
(e.g. JXS[11])
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy-angle distribution
"""
# Read number of interpolation regions and incoming energies
n_regions = int(ace.xss[idx])
n_energy_in = int(ace.xss[idx + 1 + 2*n_regions])
# Get interpolation information
idx += 1
if n_regions > 0:
breakpoints = ace.xss[idx:idx + n_regions].astype(int)
interpolation = ace.xss[idx + n_regions:idx + 2*n_regions].astype(int)
else:
breakpoints = np.array([n_energy_in])
interpolation = np.array([2])
# Incoming energies at which distributions exist
idx += 2*n_regions + 1
energy = ace.xss[idx:idx + n_energy_in]*EV_PER_MEV
# Location of distributions
idx += n_energy_in
loc_dist = ace.xss[idx:idx + n_energy_in].astype(int)
# Initialize variables
energy_out = []
km_r = []
km_a = []
# Read each outgoing energy distribution
for i in range(n_energy_in):
idx = ldis + loc_dist[i] - 1
# intt = interpolation scheme (1=hist, 2=lin-lin)
INTTp = int(ace.xss[idx])
intt = INTTp % 10
n_discrete_lines = (INTTp - intt)//10
if intt not in (1, 2):
warn("Interpolation scheme for continuous tabular distribution "
"is not histogram or linear-linear.")
intt = 2
n_energy_out = int(ace.xss[idx + 1])
data = ace.xss[idx + 2:idx + 2 + 5*n_energy_out].copy()
data.shape = (5, n_energy_out)
data[0,:] *= EV_PER_MEV
# Create continuous distribution
eout_continuous = Tabular(data[0][n_discrete_lines:],
data[1][n_discrete_lines:]/EV_PER_MEV,
INTERPOLATION_SCHEME[intt],
ignore_negative=True)
eout_continuous.c = data[2][n_discrete_lines:]
if np.any(data[1][n_discrete_lines:] < 0.0):
warn("Kalbach-Mann energy distribution has negative "
"probabilities.")
# If discrete lines are present, create a mixture distribution
if n_discrete_lines > 0:
eout_discrete = Discrete(data[0][:n_discrete_lines],
data[1][:n_discrete_lines])
eout_discrete.c = data[2][:n_discrete_lines]
if n_discrete_lines == n_energy_out:
eout_i = eout_discrete
else:
p_discrete = min(sum(eout_discrete.p), 1.0)
eout_i = Mixture([p_discrete, 1. - p_discrete],
[eout_discrete, eout_continuous])
else:
eout_i = eout_continuous
energy_out.append(eout_i)
km_r.append(Tabulated1D(data[0], data[3]))
km_a.append(Tabulated1D(data[0], data[4]))
return cls(breakpoints, interpolation, energy, energy_out, km_r, km_a)
@classmethod
def from_endf(cls, file_obj):
"""Generate Kalbach-Mann distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positioned at the start of the Kalbach-Mann distribution
Returns
-------
openmc.data.KalbachMann
Kalbach-Mann energy-angle distribution
"""
params, tab2 = get_tab2_record(file_obj)
lep = params[3]
ne = params[5]
energy = np.zeros(ne)
n_discrete_energies = np.zeros(ne, dtype=int)
energy_out = []
precompound = []
slope = []
for i in range(ne):
items, values = get_list_record(file_obj)
energy[i] = items[1]
n_discrete_energies[i] = items[2]
# TODO: split out discrete energies
n_angle = items[3]
n_energy_out = items[5]
values = np.asarray(values)
values.shape = (n_energy_out, n_angle + 2)
# Outgoing energy distribution at the i-th incoming energy
eout_i = values[:,0]
eout_p_i = values[:,1]
energy_out_i = Tabular(eout_i, eout_p_i, INTERPOLATION_SCHEME[lep])
energy_out.append(energy_out_i)
# Precompound and slope factors for Kalbach-Mann
r_i = values[:,2]
if n_angle == 2:
a_i = values[:,3]
else:
a_i = np.zeros_like(r_i)
precompound.append(Tabulated1D(eout_i, r_i))
slope.append(Tabulated1D(eout_i, a_i))
return cls(tab2.breakpoints, tab2.interpolation, energy,
energy_out, precompound, slope)
|
|
"""Provide a registry to track entity IDs.
The Entity Registry keeps a registry of entities. Entities are uniquely
identified by their domain, platform and a unique id provided by that platform.
The Entity Registry will persist itself 10 seconds after a new entity is
registered. Registering a new entity while a timer is in progress resets the
timer.
"""
from __future__ import annotations
from collections import OrderedDict
import logging
from typing import TYPE_CHECKING, Any, Callable, Iterable, cast
import attr
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_RESTORED,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
)
from homeassistant.core import (
Event,
HomeAssistant,
callback,
split_entity_id,
valid_entity_id,
)
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
from homeassistant.util.yaml import load_yaml
from .typing import UNDEFINED, UndefinedType
if TYPE_CHECKING:
from homeassistant.config_entries import ConfigEntry
PATH_REGISTRY = "entity_registry.yaml"
DATA_REGISTRY = "entity_registry"
EVENT_ENTITY_REGISTRY_UPDATED = "entity_registry_updated"
SAVE_DELAY = 10
_LOGGER = logging.getLogger(__name__)
DISABLED_CONFIG_ENTRY = "config_entry"
DISABLED_DEVICE = "device"
DISABLED_HASS = "hass"
DISABLED_INTEGRATION = "integration"
DISABLED_USER = "user"
STORAGE_VERSION = 1
STORAGE_KEY = "core.entity_registry"
# Attributes relevant to describing entity
# to external services.
ENTITY_DESCRIBING_ATTRIBUTES = {
"entity_id",
"name",
"original_name",
"capabilities",
"supported_features",
"device_class",
"unit_of_measurement",
}
@attr.s(slots=True, frozen=True)
class RegistryEntry:
"""Entity Registry Entry."""
entity_id: str = attr.ib()
unique_id: str = attr.ib()
platform: str = attr.ib()
name: str | None = attr.ib(default=None)
icon: str | None = attr.ib(default=None)
device_id: str | None = attr.ib(default=None)
area_id: str | None = attr.ib(default=None)
config_entry_id: str | None = attr.ib(default=None)
disabled_by: str | None = attr.ib(
default=None,
validator=attr.validators.in_(
(
DISABLED_CONFIG_ENTRY,
DISABLED_DEVICE,
DISABLED_HASS,
DISABLED_INTEGRATION,
DISABLED_USER,
None,
)
),
)
capabilities: dict[str, Any] | None = attr.ib(default=None)
supported_features: int = attr.ib(default=0)
device_class: str | None = attr.ib(default=None)
unit_of_measurement: str | None = attr.ib(default=None)
# As set by integration
original_name: str | None = attr.ib(default=None)
original_icon: str | None = attr.ib(default=None)
domain: str = attr.ib(init=False, repr=False)
@domain.default
def _domain_default(self) -> str:
"""Compute domain value."""
return split_entity_id(self.entity_id)[0]
@property
def disabled(self) -> bool:
"""Return if entry is disabled."""
return self.disabled_by is not None
@callback
def write_unavailable_state(self, hass: HomeAssistant) -> None:
"""Write the unavailable state to the state machine."""
attrs: dict[str, Any] = {ATTR_RESTORED: True}
if self.capabilities is not None:
attrs.update(self.capabilities)
if self.supported_features is not None:
attrs[ATTR_SUPPORTED_FEATURES] = self.supported_features
if self.device_class is not None:
attrs[ATTR_DEVICE_CLASS] = self.device_class
if self.unit_of_measurement is not None:
attrs[ATTR_UNIT_OF_MEASUREMENT] = self.unit_of_measurement
name = self.name or self.original_name
if name is not None:
attrs[ATTR_FRIENDLY_NAME] = name
icon = self.icon or self.original_icon
if icon is not None:
attrs[ATTR_ICON] = icon
hass.states.async_set(self.entity_id, STATE_UNAVAILABLE, attrs)
class EntityRegistry:
"""Class to hold a registry of entities."""
def __init__(self, hass: HomeAssistant):
"""Initialize the registry."""
self.hass = hass
self.entities: dict[str, RegistryEntry]
self._index: dict[tuple[str, str, str], str] = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self.hass.bus.async_listen(
EVENT_DEVICE_REGISTRY_UPDATED, self.async_device_modified
)
@callback
def async_get_device_class_lookup(self, domain_device_classes: set) -> dict:
"""Return a lookup for the device class by domain."""
lookup: dict[str, dict[tuple[Any, Any], str]] = {}
for entity in self.entities.values():
if not entity.device_id:
continue
domain_device_class = (entity.domain, entity.device_class)
if domain_device_class not in domain_device_classes:
continue
if entity.device_id not in lookup:
lookup[entity.device_id] = {domain_device_class: entity.entity_id}
else:
lookup[entity.device_id][domain_device_class] = entity.entity_id
return lookup
@callback
def async_is_registered(self, entity_id: str) -> bool:
"""Check if an entity_id is currently registered."""
return entity_id in self.entities
@callback
def async_get(self, entity_id: str) -> RegistryEntry | None:
"""Get EntityEntry for an entity_id."""
return self.entities.get(entity_id)
@callback
def async_get_entity_id(
self, domain: str, platform: str, unique_id: str
) -> str | None:
"""Check if an entity_id is currently registered."""
return self._index.get((domain, platform, unique_id))
@callback
def async_generate_entity_id(
self,
domain: str,
suggested_object_id: str,
known_object_ids: Iterable[str] | None = None,
) -> str:
"""Generate an entity ID that does not conflict.
Conflicts checked against registered and currently existing entities.
"""
preferred_string = f"{domain}.{slugify(suggested_object_id)}"
test_string = preferred_string
if not known_object_ids:
known_object_ids = {}
tries = 1
while (
test_string in self.entities
or test_string in known_object_ids
or not self.hass.states.async_available(test_string)
):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
@callback
def async_get_or_create(
self,
domain: str,
platform: str,
unique_id: str,
*,
# To influence entity ID generation
suggested_object_id: str | None = None,
known_object_ids: Iterable[str] | None = None,
# To disable an entity if it gets created
disabled_by: str | None = None,
# Data that we want entry to have
config_entry: ConfigEntry | None = None,
device_id: str | None = None,
area_id: str | None = None,
capabilities: dict[str, Any] | None = None,
supported_features: int | None = None,
device_class: str | None = None,
unit_of_measurement: str | None = None,
original_name: str | None = None,
original_icon: str | None = None,
) -> RegistryEntry:
"""Get entity. Create if it doesn't exist."""
config_entry_id = None
if config_entry:
config_entry_id = config_entry.entry_id
entity_id = self.async_get_entity_id(domain, platform, unique_id)
if entity_id:
return self._async_update_entity(
entity_id,
config_entry_id=config_entry_id or UNDEFINED,
device_id=device_id or UNDEFINED,
area_id=area_id or UNDEFINED,
capabilities=capabilities or UNDEFINED,
supported_features=supported_features or UNDEFINED,
device_class=device_class or UNDEFINED,
unit_of_measurement=unit_of_measurement or UNDEFINED,
original_name=original_name or UNDEFINED,
original_icon=original_icon or UNDEFINED,
# When we changed our slugify algorithm, we invalidated some
# stored entity IDs with either a __ or ending in _.
# Fix introduced in 0.86 (Jan 23, 2019). Next line can be
# removed when we release 1.0 or in 2020.
new_entity_id=".".join(
slugify(part) for part in entity_id.split(".", 1)
),
)
entity_id = self.async_generate_entity_id(
domain, suggested_object_id or f"{platform}_{unique_id}", known_object_ids
)
if (
disabled_by is None
and config_entry
and config_entry.system_options.disable_new_entities
):
disabled_by = DISABLED_INTEGRATION
entity = RegistryEntry(
entity_id=entity_id,
config_entry_id=config_entry_id,
device_id=device_id,
area_id=area_id,
unique_id=unique_id,
platform=platform,
disabled_by=disabled_by,
capabilities=capabilities,
supported_features=supported_features or 0,
device_class=device_class,
unit_of_measurement=unit_of_measurement,
original_name=original_name,
original_icon=original_icon,
)
self._register_entry(entity)
_LOGGER.info("Registered new %s.%s entity: %s", domain, platform, entity_id)
self.async_schedule_save()
self.hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "create", "entity_id": entity_id}
)
return entity
@callback
def async_remove(self, entity_id: str) -> None:
"""Remove an entity from registry."""
self._unregister_entry(self.entities[entity_id])
self.hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED, {"action": "remove", "entity_id": entity_id}
)
self.async_schedule_save()
@callback
def async_device_modified(self, event: Event) -> None:
"""Handle the removal or update of a device.
Remove entities from the registry that are associated to a device when
the device is removed.
Disable entities in the registry that are associated to a device when
the device is disabled.
"""
if event.data["action"] == "remove":
entities = async_entries_for_device(
self, event.data["device_id"], include_disabled_entities=True
)
for entity in entities:
self.async_remove(entity.entity_id)
return
if event.data["action"] != "update":
return
device_registry = dr.async_get(self.hass)
device = device_registry.async_get(event.data["device_id"])
# The device may be deleted already if the event handling is late
if not device or not device.disabled:
entities = async_entries_for_device(
self, event.data["device_id"], include_disabled_entities=True
)
for entity in entities:
if entity.disabled_by != DISABLED_DEVICE:
continue
self.async_update_entity(entity.entity_id, disabled_by=None)
return
if device.disabled_by == dr.DISABLED_CONFIG_ENTRY:
# Handled by async_config_entry_disabled
return
# Fetch entities which are not already disabled
entities = async_entries_for_device(self, event.data["device_id"])
for entity in entities:
self.async_update_entity(entity.entity_id, disabled_by=DISABLED_DEVICE)
@callback
def async_update_entity(
self,
entity_id: str,
*,
name: str | None | UndefinedType = UNDEFINED,
icon: str | None | UndefinedType = UNDEFINED,
area_id: str | None | UndefinedType = UNDEFINED,
new_entity_id: str | UndefinedType = UNDEFINED,
new_unique_id: str | UndefinedType = UNDEFINED,
disabled_by: str | None | UndefinedType = UNDEFINED,
) -> RegistryEntry:
"""Update properties of an entity."""
return self._async_update_entity(
entity_id,
name=name,
icon=icon,
area_id=area_id,
new_entity_id=new_entity_id,
new_unique_id=new_unique_id,
disabled_by=disabled_by,
)
@callback
def _async_update_entity(
self,
entity_id: str,
*,
name: str | None | UndefinedType = UNDEFINED,
icon: str | None | UndefinedType = UNDEFINED,
config_entry_id: str | None | UndefinedType = UNDEFINED,
new_entity_id: str | UndefinedType = UNDEFINED,
device_id: str | None | UndefinedType = UNDEFINED,
area_id: str | None | UndefinedType = UNDEFINED,
new_unique_id: str | UndefinedType = UNDEFINED,
disabled_by: str | None | UndefinedType = UNDEFINED,
capabilities: dict[str, Any] | None | UndefinedType = UNDEFINED,
supported_features: int | UndefinedType = UNDEFINED,
device_class: str | None | UndefinedType = UNDEFINED,
unit_of_measurement: str | None | UndefinedType = UNDEFINED,
original_name: str | None | UndefinedType = UNDEFINED,
original_icon: str | None | UndefinedType = UNDEFINED,
) -> RegistryEntry:
"""Private facing update properties method."""
old = self.entities[entity_id]
new_values = {} # Dict with new key/value pairs
old_values = {} # Dict with old key/value pairs
for attr_name, value in (
("name", name),
("icon", icon),
("config_entry_id", config_entry_id),
("device_id", device_id),
("area_id", area_id),
("disabled_by", disabled_by),
("capabilities", capabilities),
("supported_features", supported_features),
("device_class", device_class),
("unit_of_measurement", unit_of_measurement),
("original_name", original_name),
("original_icon", original_icon),
):
if value is not UNDEFINED and value != getattr(old, attr_name):
new_values[attr_name] = value
old_values[attr_name] = getattr(old, attr_name)
if new_entity_id is not UNDEFINED and new_entity_id != old.entity_id:
if self.async_is_registered(new_entity_id):
raise ValueError("Entity with this ID is already registered")
if not valid_entity_id(new_entity_id):
raise ValueError("Invalid entity ID")
if split_entity_id(new_entity_id)[0] != split_entity_id(entity_id)[0]:
raise ValueError("New entity ID should be same domain")
self.entities.pop(entity_id)
entity_id = new_values["entity_id"] = new_entity_id
old_values["entity_id"] = old.entity_id
if new_unique_id is not UNDEFINED:
conflict_entity_id = self.async_get_entity_id(
old.domain, old.platform, new_unique_id
)
if conflict_entity_id:
raise ValueError(
f"Unique id '{new_unique_id}' is already in use by "
f"'{conflict_entity_id}'"
)
new_values["unique_id"] = new_unique_id
old_values["unique_id"] = old.unique_id
if not new_values:
return old
self._remove_index(old)
new = attr.evolve(old, **new_values)
self._register_entry(new)
self.async_schedule_save()
data = {"action": "update", "entity_id": entity_id, "changes": old_values}
if old.entity_id != entity_id:
data["old_entity_id"] = old.entity_id
self.hass.bus.async_fire(EVENT_ENTITY_REGISTRY_UPDATED, data)
return new
async def async_load(self) -> None:
"""Load the entity registry."""
async_setup_entity_restore(self.hass, self)
data = await self.hass.helpers.storage.async_migrator(
self.hass.config.path(PATH_REGISTRY),
self._store,
old_conf_load_func=load_yaml,
old_conf_migrate_func=_async_migrate,
)
entities: dict[str, RegistryEntry] = OrderedDict()
if data is not None:
for entity in data["entities"]:
# Some old installations can have some bad entities.
# Filter them out as they cause errors down the line.
# Can be removed in Jan 2021
if not valid_entity_id(entity["entity_id"]):
continue
entities[entity["entity_id"]] = RegistryEntry(
entity_id=entity["entity_id"],
config_entry_id=entity.get("config_entry_id"),
device_id=entity.get("device_id"),
area_id=entity.get("area_id"),
unique_id=entity["unique_id"],
platform=entity["platform"],
name=entity.get("name"),
icon=entity.get("icon"),
disabled_by=entity.get("disabled_by"),
capabilities=entity.get("capabilities") or {},
supported_features=entity.get("supported_features", 0),
device_class=entity.get("device_class"),
unit_of_measurement=entity.get("unit_of_measurement"),
original_name=entity.get("original_name"),
original_icon=entity.get("original_icon"),
)
self.entities = entities
self._rebuild_index()
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the entity registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict[str, Any]:
"""Return data of entity registry to store in a file."""
data = {}
data["entities"] = [
{
"entity_id": entry.entity_id,
"config_entry_id": entry.config_entry_id,
"device_id": entry.device_id,
"area_id": entry.area_id,
"unique_id": entry.unique_id,
"platform": entry.platform,
"name": entry.name,
"icon": entry.icon,
"disabled_by": entry.disabled_by,
"capabilities": entry.capabilities,
"supported_features": entry.supported_features,
"device_class": entry.device_class,
"unit_of_measurement": entry.unit_of_measurement,
"original_name": entry.original_name,
"original_icon": entry.original_icon,
}
for entry in self.entities.values()
]
return data
@callback
def async_clear_config_entry(self, config_entry: str) -> None:
"""Clear config entry from registry entries."""
for entity_id in [
entity_id
for entity_id, entry in self.entities.items()
if config_entry == entry.config_entry_id
]:
self.async_remove(entity_id)
@callback
def async_clear_area_id(self, area_id: str) -> None:
"""Clear area id from registry entries."""
for entity_id, entry in self.entities.items():
if area_id == entry.area_id:
self._async_update_entity(entity_id, area_id=None)
def _register_entry(self, entry: RegistryEntry) -> None:
self.entities[entry.entity_id] = entry
self._add_index(entry)
def _add_index(self, entry: RegistryEntry) -> None:
self._index[(entry.domain, entry.platform, entry.unique_id)] = entry.entity_id
def _unregister_entry(self, entry: RegistryEntry) -> None:
self._remove_index(entry)
del self.entities[entry.entity_id]
def _remove_index(self, entry: RegistryEntry) -> None:
del self._index[(entry.domain, entry.platform, entry.unique_id)]
def _rebuild_index(self) -> None:
self._index = {}
for entry in self.entities.values():
self._add_index(entry)
@callback
def async_get(hass: HomeAssistant) -> EntityRegistry:
"""Get entity registry."""
return cast(EntityRegistry, hass.data[DATA_REGISTRY])
async def async_load(hass: HomeAssistant) -> None:
"""Load entity registry."""
assert DATA_REGISTRY not in hass.data
hass.data[DATA_REGISTRY] = EntityRegistry(hass)
await hass.data[DATA_REGISTRY].async_load()
@bind_hass
async def async_get_registry(hass: HomeAssistant) -> EntityRegistry:
"""Get entity registry.
This is deprecated and will be removed in the future. Use async_get instead.
"""
return async_get(hass)
@callback
def async_entries_for_device(
registry: EntityRegistry, device_id: str, include_disabled_entities: bool = False
) -> list[RegistryEntry]:
"""Return entries that match a device."""
return [
entry
for entry in registry.entities.values()
if entry.device_id == device_id
and (not entry.disabled_by or include_disabled_entities)
]
@callback
def async_entries_for_area(
registry: EntityRegistry, area_id: str
) -> list[RegistryEntry]:
"""Return entries that match an area."""
return [entry for entry in registry.entities.values() if entry.area_id == area_id]
@callback
def async_entries_for_config_entry(
registry: EntityRegistry, config_entry_id: str
) -> list[RegistryEntry]:
"""Return entries that match a config entry."""
return [
entry
for entry in registry.entities.values()
if entry.config_entry_id == config_entry_id
]
@callback
def async_config_entry_disabled_by_changed(
registry: EntityRegistry, config_entry: ConfigEntry
) -> None:
"""Handle a config entry being disabled or enabled.
Disable entities in the registry that are associated with a config entry when
the config entry is disabled, enable entities in the registry that are associated
with a config entry when the config entry is enabled and the entities are marked
DISABLED_CONFIG_ENTRY.
"""
entities = async_entries_for_config_entry(registry, config_entry.entry_id)
if not config_entry.disabled_by:
for entity in entities:
if entity.disabled_by != DISABLED_CONFIG_ENTRY:
continue
registry.async_update_entity(entity.entity_id, disabled_by=None)
return
for entity in entities:
if entity.disabled:
# Entity already disabled, do not overwrite
continue
registry.async_update_entity(
entity.entity_id, disabled_by=DISABLED_CONFIG_ENTRY
)
async def _async_migrate(entities: dict[str, Any]) -> dict[str, list[dict[str, Any]]]:
"""Migrate the YAML config file to storage helper format."""
return {
"entities": [
{"entity_id": entity_id, **info} for entity_id, info in entities.items()
]
}
@callback
def async_setup_entity_restore(hass: HomeAssistant, registry: EntityRegistry) -> None:
"""Set up the entity restore mechanism."""
@callback
def cleanup_restored_states_filter(event: Event) -> bool:
"""Clean up restored states filter."""
return bool(event.data["action"] == "remove")
@callback
def cleanup_restored_states(event: Event) -> None:
"""Clean up restored states."""
state = hass.states.get(event.data["entity_id"])
if state is None or not state.attributes.get(ATTR_RESTORED):
return
hass.states.async_remove(event.data["entity_id"], context=event.context)
hass.bus.async_listen(
EVENT_ENTITY_REGISTRY_UPDATED,
cleanup_restored_states,
event_filter=cleanup_restored_states_filter,
)
if hass.is_running:
return
@callback
def _write_unavailable_states(_: Event) -> None:
"""Make sure state machine contains entry for each registered entity."""
existing = set(hass.states.async_entity_ids())
for entry in registry.entities.values():
if entry.entity_id in existing or entry.disabled:
continue
entry.write_unavailable_state(hass)
hass.bus.async_listen(EVENT_HOMEASSISTANT_START, _write_unavailable_states)
async def async_migrate_entries(
hass: HomeAssistant,
config_entry_id: str,
entry_callback: Callable[[RegistryEntry], dict | None],
) -> None:
"""Migrator of unique IDs."""
ent_reg = await async_get_registry(hass)
for entry in ent_reg.entities.values():
if entry.config_entry_id != config_entry_id:
continue
updates = entry_callback(entry)
if updates is not None:
ent_reg.async_update_entity(entry.entity_id, **updates)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""Extract parameters from the DMA operators in TIR."""
import tvm
from .utils import get_outer_loops, get_base_address, get_strides, get_op_attrs
from .spec import SerialFeatureMap, SerialPadding
def get_pad_params(stmt):
"""Get the padding parameters from a pad loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a pad loop nest.
Returns
-------
pad : SerialPadding
The serializable padding.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
_, body = get_op_attrs(stmt)
n, h, w, c, _, inner = get_outer_loops(body, "NHWC")
output_pointer = inner.buffer_var
pad = SerialPadding(top=0, left=0, bottom=0, right=0)
if isinstance(inner.value, tvm.tir.Call):
input_pointer = inner.value.args[1].buffer_var
else:
input_pointer = inner.value.buffer_var
return pad, input_pointer, output_pointer
padded_shape = [n.extent, h.extent, w.extent, c.extent]
def _visit(expr):
if isinstance(expr, tvm.tir.expr.LT):
var = expr.a
val = expr.b
if var == h.loop_var:
pad.bottom = padded_shape[1] - val
else:
pad.right = padded_shape[2] - val
elif isinstance(expr, tvm.tir.expr.LE):
var = expr.b
val = expr.a
if var == h.loop_var:
pad.top = val
else:
pad.left = val
cond = inner.value.args[0]
tvm.tir.stmt_functor.post_order_visit(cond, _visit)
return (
pad,
input_pointer,
output_pointer,
)
def get_convert_to_nhwc_params(stmt):
"""Get the true number of channels from a convert_to_nhwc loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhwc loop nest.
Returns
-------
int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, _, inner = get_outer_loops(body, "NHWC")
# Ignore the reduce sum operation inserted to ensure
# compute that is deemed uneccesary isn't removed by TVM.
if attrs["layout"] == "NHCWB16":
inner = inner.body
input_pointer = inner.value.b.buffer_var
else:
input_pointer = inner.value.buffer_var
output_pointer = inner.buffer_var
return c.extent, input_pointer, output_pointer
def get_convert_to_nhcwb16_params(stmt):
"""Get the true number of channels from a convert_to_nhcwb16 loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a convert_to_nhcwb16 loop nest.
Returns
-------
out_channels : int
The true number of channels.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, _, _, c, b, inner = get_outer_loops(body, attrs["layout"])
output_pointer = inner.buffer_var
if isinstance(inner.value, tvm.tir.Call):
cond = inner.value.args[0]
out_channels = cond.b.value
input_pointer = inner.value.args[1].buffer_var
else:
input_pointer = inner.value.buffer_var
out_channels = c.extent * b.extent if attrs["layout"] == "NHCWB16" else c.extent
return out_channels, input_pointer, output_pointer
def get_read_params(stmt):
"""Get the feature map parameters from a read loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a read loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer_var
output_pointer = inner.buffer_var
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.value.index, stride_vars)
base_address = get_base_address(inner.value.index)
data_type = inner.buffer_var.type_annotation.element_type.dtype
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=h.extent,
tile_height_1=0,
tile_width_0=w.extent,
tile_address_0=tvm.tir.Load(data_type, inner.value.buffer_var, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
input_pointer,
output_pointer,
)
def get_write_params(stmt):
"""Get the feature map parameters from a write loop nest.
Parameters
----------
stmt : tvm.tir.AttrStmt
The outermost attribute statement of a write loop nest.
Returns
-------
SerialFeatureMap
The serializable feature map.
input_pointer : tvm.tir.Var
The pointer consumed by the operation.
output_pointer : tvm.tir.Var
The pointer produced by the operation.
"""
attrs, body = get_op_attrs(stmt)
_, h, w, c, _, inner = get_outer_loops(body, attrs["layout"])
input_pointer = inner.value.buffer_var
output_pointer = inner.buffer_var
stride_vars = [h.loop_var, w.loop_var, c.loop_var]
strides = get_strides(inner.index, stride_vars)
base_address = get_base_address(inner.index)
data_type = inner.buffer_var.type_annotation.element_type.dtype
return (
SerialFeatureMap(
data_type=data_type,
height=h.extent,
width=w.extent,
channels=c.extent,
tile_height_0=h.extent,
tile_height_1=0,
tile_width_0=w.extent,
tile_address_0=tvm.tir.Load(data_type, inner.buffer_var, base_address),
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=attrs["scale"],
zero_point=attrs["zero_point"],
layout=attrs["layout"],
stride_h=strides[0],
stride_w=strides[1],
stride_c=strides[2],
),
input_pointer,
output_pointer,
)
def get_ifm_params(pointer, producers):
"""Get the parameters associated with the DMA capabilities for an IFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the IFM DMA pipeline produces.
producers : dict of tvm.tir.Var to tvm.tir.AttrStmt
A dictionary to associate pointers with the loop nest
that produces their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable IFM.
serial_padding : SerialPadding
The serializable padding.
"""
pad = producers[pointer]
serial_padding, input_pointer, _ = get_pad_params(pad)
convert_to_nhwc = producers[input_pointer]
in_channels, input_pointer, _ = get_convert_to_nhwc_params(convert_to_nhwc)
read = producers[input_pointer]
serial_ifm, _, _ = get_read_params(read)
serial_ifm.channels = in_channels
return serial_ifm, serial_padding
def get_ofm_params(pointer, consumers, producers):
"""Get the parameters associated with the DMA capabilities for an OFM.
Parameters
----------
pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline consumes.
consumers : dict of tvm.tir.Var to tvm.tir.AttrStmt
A dictionary to associate pointers with the loop nest
that consumes their values.
producers : dict of tvm.tir.Var to tvm.tir.AttrStmt
A dictionary to associate pointers with the loop nest
that produces their values.
Returns
-------
serial_ifm : SerialFeatureMap
The serializable OFM.
output_pointer : tvm.tir.Var
The pointer that the OFM DMA pipeline produces.
is_allocator : bool
Whether this operator allocates its output.
"""
convert_to_nhcwb16 = consumers[pointer]
out_channels, _, output_pointer = get_convert_to_nhcwb16_params(convert_to_nhcwb16)
write = consumers[output_pointer]
serial_ofm, _, output_pointer = get_write_params(write)
is_allocator = True
if output_pointer not in producers:
is_allocator = False
elif producers[output_pointer] != write:
is_allocator = False
serial_ofm.channels = out_channels
return serial_ofm, output_pointer, is_allocator
|
|
from __future__ import with_statement
import sys
import os
import subprocess
from contextlib import contextmanager
from cStringIO import StringIO
from virtstrap.log import logger as main_logger
# The following function is taken from werkzeug.utils
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
For better debugging we recommend the new :func:`import_module`
function to be used instead.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
# force the import name to automatically convert to strings
if isinstance(import_name, unicode):
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError, e:
if not silent:
raise ImportStringError(import_name, e), None, sys.exc_info()[2]
# The following class is taken from werkzeug.utils
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# The following function is modified from virtualenv
def call_subprocess(cmd, show_stdout=True,
filter_stdout=None, cwd=None,
raise_on_returncode=True, extra_env=None,
remove_from_env=None, logger=None,
python_unbuffered=False,
collect_stdout=False):
collected_stdout = None
stdout_receiver = None
if collect_stdout:
stdout_receiver = StringIO()
show_stdout = False
logger = logger or main_logger
cmd_parts = []
for part in cmd:
if len(part) > 45:
part = part[:20]+"..."+part[-20:]
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
if hasattr(part, 'decode'):
try:
part = part.decode(sys.getdefaultencoding())
except UnicodeDecodeError:
part = part.decode(sys.getfilesystemencoding())
cmd_parts.append(part)
cmd_desc = ' '.join(cmd_parts)
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
logger.debug("Running command %s" % cmd_desc)
if extra_env or remove_from_env or python_unbuffered:
env = os.environ.copy()
if extra_env:
env.update(extra_env)
if remove_from_env:
for varname in remove_from_env:
env.pop(varname, None)
if python_unbuffered:
# Set this if you'd like to process each line of code
# from the process immediately. This only works with
# software that is python.
env['PYTHONUNBUFFERED'] = 'unbuffered'
else:
env = None
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout,
cwd=cwd, env=env)
except Exception:
e = sys.exc_info()[1]
logger.critical(
"Error %s while executing command %s" % (e, cmd_desc))
raise
all_output = []
if stdout is not None:
stdout = proc.stdout
encoding = sys.getdefaultencoding()
fs_encoding = sys.getfilesystemencoding()
while 1:
line = stdout.readline()
try:
line = line.decode(encoding)
except UnicodeDecodeError:
line = line.decode(fs_encoding)
if not line:
break
line = line.rstrip()
all_output.append(line)
if filter_stdout:
level = filter_stdout(line)
if isinstance(level, tuple):
level, line = level
logger.log(level, line)
# FIXME This is virtualenv specific. We need to get rid
# of it
if not logger.stdout_level_matches(level):
logger.show_progress()
else:
if stdout_receiver: #Expects a file like object
#for stdout_receiver
stdout_receiver.write('%s\n' % line)
logger.debug(line)
else:
proc.communicate()
proc.wait()
if proc.returncode:
if raise_on_returncode:
if all_output:
logger.debug('Complete output from command %s:' % cmd_desc)
logger.debug('\n'.join(all_output) + '\n----------------------------------------')
raise OSError(
"Command %s failed with error code %s"
% (cmd_desc, proc.returncode))
else:
logger.warn(
"Command %s had error code %s"
% (cmd_desc, proc.returncode))
if stdout_receiver:
stdout_receiver.seek(0)
collected_stdout = stdout_receiver.read()
return collected_stdout
class ChangedWorkingDirectory(object):
def __init__(self, directory):
self._directory = directory
self._original_directory = os.getcwd()
def __enter__(self):
# Change the directory to the new cwd
directory = self._directory
# Change to the new directory
os.chdir(directory)
# Return the directory
return directory
def __exit__(self, ex_type, ex_value, traceback):
# Return back to normal
os.chdir(self._original_directory)
@contextmanager
def in_directory(directory):
"""Context manager for changing CWD to a directory
Don't use this if you plan on writing files to the directory.
This does not delete anything. It is purely to change the CWD
"""
with ChangedWorkingDirectory(directory) as directory:
yield directory
|
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Tests for the stats classes."""
import time
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import stats
from grr.lib import test_lib
class StatsTests(test_lib.GRRBaseTest):
"""Stats collection tests."""
def Sleep(self, n):
self.mock_time += n
def setUp(self):
super(StatsTests, self).setUp()
self.mock_time = 100.0
self.time_orig = time.time
time.time = lambda: self.mock_time
def tearDown(self):
time.time = self.time_orig
def testSimpleCounter(self):
stats.STATS.RegisterCounterMetric("test_counter")
self.assertEqual(0, stats.STATS.GetMetricValue("test_counter"))
for _ in range(5):
stats.STATS.IncrementCounter("test_counter")
self.assertEqual(5, stats.STATS.GetMetricValue("test_counter"))
stats.STATS.IncrementCounter("test_counter", 2)
self.assertEqual(7, stats.STATS.GetMetricValue("test_counter"))
def testDecrementingCounterRaises(self):
stats.STATS.RegisterCounterMetric("test_counter")
self.assertRaises(ValueError,
stats.STATS.IncrementCounter, "test_counter", -1)
def testCounterWithFields(self):
stats.STATS.RegisterCounterMetric("test_counter", [("dimension", str)])
# Test that default values for any fields values are 0."
self.assertEqual(0, stats.STATS.GetMetricValue("test_counter",
fields=["a"]))
self.assertEqual(0, stats.STATS.GetMetricValue("test_counter",
fields=["b"]))
for _ in range(5):
stats.STATS.IncrementCounter("test_counter", fields=["dimension_value_1"])
self.assertEqual(5, stats.STATS.GetMetricValue(
"test_counter", fields=["dimension_value_1"]))
stats.STATS.IncrementCounter("test_counter", 2,
fields=["dimension_value_1"])
self.assertEqual(7, stats.STATS.GetMetricValue(
"test_counter", fields=["dimension_value_1"]))
stats.STATS.IncrementCounter("test_counter", 2,
fields=["dimension_value_2"])
self.assertEqual(2, stats.STATS.GetMetricValue(
"test_counter", fields=["dimension_value_2"]))
# Check that previously set values with other fields are not affected.
self.assertEqual(7, stats.STATS.GetMetricValue(
"test_counter", fields=["dimension_value_1"]))
def testSimpleGauge(self):
stats.STATS.RegisterGaugeMetric("test_int_gauge", int)
stats.STATS.RegisterGaugeMetric("test_string_gauge", str)
self.assertEqual(0, stats.STATS.GetMetricValue("test_int_gauge"))
self.assertEqual("", stats.STATS.GetMetricValue("test_string_gauge"))
stats.STATS.SetGaugeValue("test_int_gauge", 42)
stats.STATS.SetGaugeValue("test_string_gauge", "some")
self.assertEqual(42, stats.STATS.GetMetricValue("test_int_gauge"))
self.assertEqual("some", stats.STATS.GetMetricValue("test_string_gauge"))
# At least default Python type checking is enfored in gauges:
# we can't assign string to int
self.assertRaises(ValueError,
stats.STATS.SetGaugeValue, "test_int_gauge", "some")
# but we can assign int to string
stats.STATS.SetGaugeValue("test_string_gauge", 42)
def testGaugeWithFields(self):
stats.STATS.RegisterGaugeMetric("test_int_gauge", int,
fields=[("dimension", str)])
self.assertEqual(0, stats.STATS.GetMetricValue(
"test_int_gauge", fields=["dimension_value_1"]))
self.assertEqual(0, stats.STATS.GetMetricValue(
"test_int_gauge", fields=["dimesnioN_value_2"]))
stats.STATS.SetGaugeValue("test_int_gauge", 1,
fields=["dimension_value_1"])
stats.STATS.SetGaugeValue("test_int_gauge", 2,
fields=["dimension_value_2"])
self.assertEqual(1, stats.STATS.GetMetricValue(
"test_int_gauge", fields=["dimension_value_1"]))
self.assertEqual(2, stats.STATS.GetMetricValue(
"test_int_gauge", fields=["dimension_value_2"]))
def testGaugeWithCallback(self):
stats.STATS.RegisterGaugeMetric("test_int_gauge", int)
stats.STATS.RegisterGaugeMetric("test_string_gauge", str)
self.assertEqual(0, stats.STATS.GetMetricValue("test_int_gauge"))
self.assertEqual("", stats.STATS.GetMetricValue("test_string_gauge"))
stats.STATS.SetGaugeCallback("test_int_gauge", lambda: 42)
stats.STATS.SetGaugeCallback("test_string_gauge", lambda: "some")
self.assertEqual(42, stats.STATS.GetMetricValue("test_int_gauge"))
self.assertEqual("some", stats.STATS.GetMetricValue("test_string_gauge"))
def testSimpleEventMetric(self):
inf = float("inf")
stats.STATS.RegisterEventMetric("test_event_metric", bins=[0.0, 0.1, 0.2])
data = stats.STATS.GetMetricValue("test_event_metric")
self.assertAlmostEqual(0, data.sum)
self.assertEqual(0, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 0, 0.2: 0}, data.bins_heights)
stats.STATS.RecordEvent("test_event_metric", 0.15)
data = stats.STATS.GetMetricValue("test_event_metric")
self.assertAlmostEqual(0.15, data.sum)
self.assertEqual(1, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 1, 0.2: 0}, data.bins_heights)
stats.STATS.RecordEvent("test_event_metric", 0.5)
data = stats.STATS.GetMetricValue("test_event_metric")
self.assertAlmostEqual(0.65, data.sum)
self.assertEqual(2, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 1, 0.2: 1}, data.bins_heights)
stats.STATS.RecordEvent("test_event_metric", -0.1)
data = stats.STATS.GetMetricValue("test_event_metric")
self.assertAlmostEqual(0.55, data.sum)
self.assertEqual(3, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 1, 0.0: 0, 0.1: 1, 0.2: 1}, data.bins_heights)
def testEventMetricWithFields(self):
inf = float("inf")
stats.STATS.RegisterEventMetric("test_event_metric", bins=[0.0, 0.1, 0.2],
fields=[("dimension", str)])
data = stats.STATS.GetMetricValue("test_event_metric",
fields=["dimension_value_1"])
self.assertAlmostEqual(0, data.sum)
self.assertEqual(0, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 0, 0.2: 0}, data.bins_heights)
stats.STATS.RecordEvent("test_event_metric", 0.15,
fields=["dimension_value_1"])
stats.STATS.RecordEvent("test_event_metric", 0.25,
fields=["dimension_value_2"])
data = stats.STATS.GetMetricValue("test_event_metric",
fields=["dimension_value_1"])
self.assertAlmostEqual(0.15, data.sum)
self.assertEqual(1, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 1, 0.2: 0}, data.bins_heights)
data = stats.STATS.GetMetricValue("test_event_metric",
fields=["dimension_value_2"])
self.assertAlmostEqual(0.25, data.sum)
self.assertEqual(1, data.count)
self.assertEqual([-inf, 0.0, 0.1, 0.2], data.bins)
self.assertEqual({-inf: 0, 0.0: 0, 0.1: 0, 0.2: 1}, data.bins_heights)
def testRaisesOnImproperFieldsUsage1(self):
# Check for counters
stats.STATS.RegisterCounterMetric("test_counter")
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_counter",
fields=["a"])
# Check for gauges
stats.STATS.RegisterGaugeMetric("test_int_gauge", int)
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_int_gauge",
fields=["a"])
# Check for event metrics
stats.STATS.RegisterEventMetric("test_event_metric")
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_event_metric",
fields=["a", "b"])
def testRaisesOnImproperFieldsUsage2(self):
# Check for counters
stats.STATS.RegisterCounterMetric("test_counter",
fields=[("dimension", str)])
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_counter")
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_counter",
fields=["a", "b"])
# Check for gauges
stats.STATS.RegisterGaugeMetric("test_int_gauge", int,
fields=[("dimension", str)])
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_int_gauge")
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_int_gauge",
fields=["a", "b"])
# Check for event metrics
stats.STATS.RegisterEventMetric("test_event_metric",
fields=[("dimension", str)])
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_event_metric")
self.assertRaises(ValueError,
stats.STATS.GetMetricValue, "test_event_metric",
fields=["a", "b"])
def testGetAllMetricsMetadataWorksCorrectlyOnSimpleMetrics(self):
stats.STATS.RegisterCounterMetric("test_counter")
stats.STATS.RegisterGaugeMetric("test_int_gauge", int,
fields=[("dimension", str)])
stats.STATS.RegisterEventMetric("test_event_metric")
metrics = stats.STATS.GetAllMetricsMetadata()
self.assertEqual(metrics["test_counter"].metric_type,
stats.MetricType.COUNTER)
self.assertFalse(metrics["test_counter"].fields_defs)
self.assertEqual(metrics["test_int_gauge"].metric_type,
stats.MetricType.GAUGE)
self.assertEqual(metrics["test_int_gauge"].fields_defs,
[rdfvalue.MetricFieldDefinition(
field_name="dimension",
field_type=stats.MetricFieldDefinition.FieldType.STR)])
self.assertEqual(metrics["test_event_metric"].metric_type,
stats.MetricType.EVENT)
self.assertFalse(metrics["test_event_metric"].fields_defs)
def testGetMetricFieldsWorksCorrectly(self):
stats.STATS.RegisterCounterMetric(
"test_counter", fields=[("dimension1", str), ("dimension2", str)])
stats.STATS.RegisterGaugeMetric("test_int_gauge", int,
fields=[("dimension", str)])
stats.STATS.RegisterEventMetric("test_event_metric",
fields=[("dimension", str)])
stats.STATS.IncrementCounter("test_counter", fields=["b", "b"])
stats.STATS.IncrementCounter("test_counter", fields=["a", "c"])
stats.STATS.SetGaugeValue("test_int_gauge", 20, fields=["a"])
stats.STATS.SetGaugeValue("test_int_gauge", 30, fields=["b"])
stats.STATS.RecordEvent("test_event_metric", 0.1, fields=["a"])
stats.STATS.RecordEvent("test_event_metric", 0.1, fields=["b"])
fields = sorted(stats.STATS.GetMetricFields("test_counter"),
key=lambda t: t[0])
self.assertEqual([("a", "c"), ("b", "b")], fields)
fields = sorted(stats.STATS.GetMetricFields("test_int_gauge"),
key=lambda t: t[0])
self.assertEqual([("a",), ("b",)], fields)
fields = sorted(stats.STATS.GetMetricFields("test_event_metric"),
key=lambda t: t[0])
self.assertEqual([("a",), ("b",)], fields)
@stats.Counted("test_counter")
def CountedFunc(self):
pass
def testCountingDecorator(self):
"""Test function call counting."""
stats.STATS.RegisterCounterMetric("test_counter")
for _ in range(10):
self.CountedFunc()
self.assertEqual(stats.STATS.GetMetricValue("test_counter"), 10)
@stats.Timed("test_timed")
def TimedFunc(self, n):
self.Sleep(n)
def testMaps(self):
"""Test binned timings."""
stats.STATS.RegisterEventMetric("test_timed",
bins=[0.0, 0.1, 0.2])
m = stats.STATS.GetMetricValue("test_timed")
self.assertEqual(m.bins_heights[0.0], 0)
self.assertEqual(m.bins_heights[0.1], 0)
self.assertEqual(m.bins_heights[0.2], 0)
for _ in range(3):
self.TimedFunc(0)
m = stats.STATS.GetMetricValue("test_timed")
self.assertEqual(m.bins_heights[0.0], 3)
self.assertEqual(m.bins_heights[0.1], 0)
self.assertEqual(m.bins_heights[0.2], 0)
self.TimedFunc(0.11)
m = stats.STATS.GetMetricValue("test_timed")
self.assertEqual(m.bins_heights[0.0], 3)
self.assertEqual(m.bins_heights[0.1], 1)
self.assertEqual(m.bins_heights[0.2], 0)
@stats.Timed("test_timed")
@stats.Counted("test_counter")
def OverdecoratedFunc(self, n):
self.Sleep(n)
def testCombiningDecorators(self):
"""Test combining decorators."""
stats.STATS.RegisterCounterMetric("test_counter")
stats.STATS.RegisterEventMetric("test_timed",
bins=[0.0, 0.1, 0.2])
self.OverdecoratedFunc(0.02)
# Check if all vars get updated
m = stats.STATS.GetMetricValue("test_timed")
self.assertEqual(m.bins_heights[0.0], 1)
self.assertEqual(m.bins_heights[0.1], 0)
self.assertEqual(m.bins_heights[0.2], 0)
self.assertEqual(stats.STATS.GetMetricValue("test_counter"), 1)
@stats.Timed("test_timed")
@stats.Counted("test_counter")
def RaiseFunc(self, n):
self.Sleep(n)
raise Exception()
def testExceptionHandling(self):
"""Test decorators when exceptions are thrown."""
stats.STATS.RegisterCounterMetric("test_counter")
stats.STATS.RegisterEventMetric("test_timed",
bins=[0.0, 0.1, 0.2])
self.assertRaises(Exception, self.RaiseFunc, 0.11)
# Check if all vars get updated
m = stats.STATS.GetMetricValue("test_timed")
self.assertEqual(m.bins_heights[0.0], 0)
self.assertEqual(m.bins_heights[0.1], 1)
self.assertEqual(m.bins_heights[0.2], 0)
self.assertEqual(stats.STATS.GetMetricValue("test_counter"), 1)
@stats.Counted("test_multiple_count")
def Func1(self, n):
self.Sleep(n)
@stats.Counted("test_multiple_count")
def Func2(self, n):
self.Sleep(n)
@stats.Timed("test_multiple_timing")
def Func3(self, n):
self.Sleep(n)
@stats.Timed("test_multiple_timing")
def Func4(self, n):
self.Sleep(n)
def testMultipleFuncs(self):
"""Tests if multiple decorators produce aggregate stats."""
stats.STATS.RegisterCounterMetric("test_multiple_count")
stats.STATS.RegisterEventMetric("test_multiple_timing",
bins=[0, 1, 2])
self.Func1(0)
self.Func2(0)
self.assertEqual(stats.STATS.GetMetricValue("test_multiple_count"), 2)
self.Func3(0)
self.Func4(1)
m = stats.STATS.GetMetricValue("test_multiple_timing")
self.assertEqual(m.bins_heights[0.0], 1)
self.assertEqual(m.bins_heights[1], 1)
self.assertEqual(m.bins_heights[2], 0)
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TF code to decode an MEG/EEG signal.
TF models and code to predict MEG/EEG signals from their input audio features,
or vice versa.
"""
import os
import random
import re
import sys
from absl import logging
import numpy as np
from telluride_decoding import preprocess
import tensorflow.compat.v2 as tf
# User should call tf.compat.v1.enable_v2_behavior()
brain_data_print = sys.stdout # Feel free to redirect this elsewhere.
# pylint: disable=g-long-lambda
######################### Brain Data Classes ##############################
class BrainData(object):
"""Basic object describing the data we read and use for regression.
A generic class for reading brain decoding data. This class reads in the
data, adds temporal context and prepares the data as a TF dataset for
processing by the rest of the decoding system. Use the create_dataset method
to get a tf.data.Dataset object given a BrainData object.
The resulting dataset object represents a stream of two-ples, consisting of:
input_dictionary, output_data
where the input dictionary has three keys
input_1, input_2 (might be empty), and attended_speaker (might be empty).
"""
def __init__(self, in_fields, out_field,
frame_rate,
pre_context=0,
post_context=0,
in2_fields=None,
in2_pre_context=0,
in2_post_context=0,
attended_field=None,
initial_batch_size=1000,
final_batch_size=1000,
repeat_count=1,
shuffle_buffer_size=1000,
data_dir=None,
data_pattern='',
train_file_pattern='',
validate_file_pattern='',
test_file_pattern=''):
"""Describes the type of data we are using in this experiment.
This class encapsulates everything we know about the dataset, so we can
later generate training, eval and testing subsets.
Args:
in_fields: A list of fields from a dataset used as input to regression.
out_field: A single field name to predict (also dataset field).
frame_rate: Sample rate of the data, needed for preprocessing filters.
pre_context: Number of input samples before the current time in
regression.
post_context: Number of input samples after the current time in
regression.
in2_fields: A second list of fields for methods that take two inputs.
in2_pre_context: Number of samples of input 2 before the current time.
in regression.
in2_post_context: Number of samples of input 2 after the current time
in regression.
attended_field: TFRecord feature name that says which speaker is being
attended.
initial_batch_size: Number of samples to use before adding context.
Longer is better because you have fewer edge effects.
final_batch_size: Size of minibatch passed to estimator.
repeat_count: Number of times to repeat the data when streaming it out.
shuffle_buffer_size: Number of samples to accumulate before shuffling.
data_dir: Where file-based data classes look for data.
data_pattern: String that must be in all filenames used here.
train_file_pattern: A regular expression that selects the training files.
Note, the special string "allbut" specifies that all files not selected
for validation or testing should be used for testing. Furthermore,
specifying allbut_NN specifies that a random NN of the files be used
for training (so we can test amount of data vs performance.)
validate_file_pattern: A regular expression that selects the validation
files.
test_file_pattern: A regular expression that selects the testing files.
Raises:
ValueError for bad parameter values.
"""
logging.info('BrainData initialization: %s, %s @ %gHz -> %s',
in_fields, in2_fields, frame_rate, out_field)
if not in_fields:
raise ValueError('Must specify at least one input field.')
if not out_field:
raise ValueError('Must specify an output field.')
if frame_rate < 0:
raise ValueError('frame_rate must be >= 0')
if pre_context < 0:
raise ValueError('pre_context must be >= 0')
if post_context < 0:
raise ValueError('post_context must be >= 0')
if isinstance(in_fields, str):
in_fields = [in_fields,]
self.in1_fields = in_fields
if isinstance(in2_fields, str) and in2_fields:
in2_fields = [in2_fields,]
self.in2_fields = in2_fields
self.out_field = out_field
self.frame_rate = frame_rate
self.in1_pre_context = pre_context
self.in1_post_context = post_context
self.in2_pre_context = in2_pre_context
self.in2_post_context = in2_post_context
self.attended_field = attended_field
self.initial_batch_size = initial_batch_size
self.final_batch_size = final_batch_size
self.repeat_count = repeat_count
self.shuffle_buffer_size = shuffle_buffer_size
self.data_dir = data_dir
self.data_pattern = data_pattern
self.train_file_pattern = train_file_pattern
self.validate_file_pattern = validate_file_pattern
self.test_file_pattern = test_file_pattern
# Internal state
self.use_saved_data = False
self._cached_file_names = [] # Initialize cache for this list if needed.
self.all_files() # preload data files so we know what the data looks like.
def all_files(self, max_count=0):
"""Returns a list of files available for this class."""
if not self._cached_file_names:
# Load the potential files if we haven't already.
self._get_data_file_names()
if self._cached_file_names:
random.shuffle(self._cached_file_names) # Shuffle them once.
if max_count > 0 and len(self._cached_file_names) > max_count:
return self._cached_file_names[:max_count]
return self._cached_file_names
def set_file_patterns(self, train, validate, test):
logging.info('brain_data setting file patterns to %s, %s, %s',
train, validate, test)
self.train_file_pattern = train
self.validate_file_pattern = validate
self.test_file_pattern = test
def create_dataset(self, mode='train', temporal_context=True):
"""Creates the full TF dataset, ready to feed an estimator.
This class should be specialized. The basic flow is:
Select the file names for this mode (if needed).
Read in each file (interleaved) and do the following operations:
Parse the data.
Add temporal context.
Finalize the dataset by:
Shuffle the data.
Assemble into mini batches.
Args:
mode: One of {train, eval, test} to determine how to set up the
full stream.
temporal_context: Flag that controls whether we add temporal context to
the data. Normally true, but set to false to extract the original data
without context (for debugging and prediction.)
"""
raise NotImplementedError
def _get_data_file_names(self):
"""Get the data pathnames for this dataset.
Just a dummy list of names by default for classes that synthesize data.
Real datasets will need to specialize this function to return the real file
names.
Caches a list of file pathnames. In this generic case an empty list.
"""
self._cached_file_names = [] # No files by default
def filter_file_names(self, mode):
"""Filters all available files based on the experiment mode (train, test...)
Depending on the training/testing mode, filter the available files into
a list that we use for this stage.
Args:
mode: Arbitrary, but currently one of {train, validate, test}. This mode
determines which flag is used to provide the file_pattern.
Implied flags from the class object:
train_file_pattern, validate_file_pattern, test_file_pattern:
These are regular expressions that filter the returned names.
Returns:
A list of filenames to be used in this phase of the program.
Raises:
ValueError for bad parameter values.
"""
if mode == 'program_test':
mode = 'test'
if mode not in set(['test', 'validate', 'train']):
raise ValueError('mode must be one of test, validate or train')
filename_list = self.all_files()
if not isinstance(filename_list, list):
raise TypeError('Filename_list is a %s, not a list.' %
type(filename_list))
logging.info('Filter_file_names: filename_list: %s', filename_list)
logging.info('Filter_file_names: train_file_pattern: %s',
self.train_file_pattern)
logging.info('Filter_file_names: validate_file_pattern: %s',
self.validate_file_pattern)
logging.info('Filter_file_names: test_file_pattern: %s',
self.test_file_pattern)
if mode.startswith('test'):
pattern_re = re.compile(self.test_file_pattern)
elif mode.startswith('validate'):
pattern_re = re.compile(self.validate_file_pattern)
else: # Only valid option left is 'train'
if self.train_file_pattern == 'allbut':
pattern_re = re.compile('')
else:
pattern_re = re.compile(self.train_file_pattern)
if mode == 'train' and self.train_file_pattern.startswith('allbut'):
# Must specify some pattern for test and validate if using allbut.
if not (self.test_file_pattern and self.validate_file_pattern):
raise ValueError('Both test and validate must be specified if using '
'allbut pattern')
test_re = re.compile(self.test_file_pattern)
validate_re = re.compile(self.validate_file_pattern)
filename_list = [f for f in filename_list if not (test_re.search(f) or
validate_re.search(f))]
if self.train_file_pattern.startswith('allbut_'):
allbut = self.train_file_pattern.replace('allbut_', '', 1)
if allbut.isdigit():
count = int(allbut)
else:
raise ValueError('allbut_ spec must be an integer, not %s.' % allbut)
if count < len(filename_list):
logging.info('Reducing list of %d files to %d.',
len(filename_list), count)
filename_list = filename_list[:count]
logging.info('filter_file_names: post filename_list %s', filename_list)
else:
filename_list = [f for f in filename_list if pattern_re.search(f)]
logging.info('Using %d files for %s.', len(filename_list), mode)
logging.info(' Files for %s are: %s', mode, filename_list)
return filename_list
def final_shuffle_and_batch(self, mode, input_dataset, mixup_batch=False):
"""Does all the work we need to do prepare the dataset for serving.
Args:
mode: Train or testing mode, which determines whether data is shuffled.
input_dataset: The actual TF dataset to prepare.
mixup_batch: Whether the inputs and outputs are randomized with respect
to each other.
Returns:
The final dataset object. This dataset has two components: a dictionary
of model inputs (input_1, input_2 and attended_speaker), and a single
output field.
"""
# TODO Look into tf.data cache or snapshot
# First shuffle the data (and repeat it) for better SGD performance.
logging.info('final_shuffle_and_batch mode %s and batching %s',
mode, mixup_batch)
if mode == 'train':
repeated_dataset = input_dataset.repeat(self.repeat_count)
if self.shuffle_buffer_size > 0:
shuffled_dataset = repeated_dataset.shuffle(self.shuffle_buffer_size)
else:
shuffled_dataset = repeated_dataset
elif mode == 'program_test':
shuffled_dataset = input_dataset
else:
# Shuffle the data in test or eval mode too so we get better stats.
if self.shuffle_buffer_size > 0:
shuffled_dataset = input_dataset.shuffle(self.shuffle_buffer_size)
else:
shuffled_dataset = input_dataset
# Then batch the data into minibatches.
# Drop the remainder so testing is easier (no odd sized batches). Losing a
# few samples at the end shouldn't matter for real (big) datasets.
batched_dataset = shuffled_dataset.batch(self.final_batch_size,
drop_remainder=True)
if mixup_batch:
print('final_shuffle_and_batch: Mixing up the batches of data '
'for testing!!!!', file=brain_data_print)
logging.warning('final_shuffle_and_batch: Mixing up the batches of data '
'for testing!!!!')
def mixup_batch_function(x, x2, y, a):
"""Mixup the order of the labels so data is mismatched. For baseline."""
return x, tf.random.shuffle(x2), tf.random.shuffle(y), a
batched_dataset = batched_dataset.map(mixup_batch_function)
# Convert the four-tuple to a two-tuple: a dictionary for the inputs, and
# the output.
final_dataset = batched_dataset.map(
lambda x, x2, y, a: ({'input_1': x,
'input_2': x2,
'attended_speaker': a}, y),
num_parallel_calls=32)
logging.info('Create_dataset: the %s final_dataset is: %s',
mode, final_dataset)
return final_dataset
def add_temporal_context(self, dataset_without_context):
"""Adds context to a datstream.
Create a dataset stream from files of TFRecords, containing input and
output data. This dataset is unique because we add temporal context to the
input data, so the output depends on the input over a time window. We do
this using the dataset so we can create the context on the fly (and not
precompute it and save it in a much larger file.)
Args:
dataset_without_context: dataset to which we will add temporal context.
This dataset consists of a four (unnamed) streams (input_1, input_2,
output, and attention).
External args:
self.in1_pre_context - Number of frames to prepend to the input data.
self.in1_post_context - Number of frames to append after the current
frame.
self.in2_pre_context - Number of frames to prepend to the second input.
self.in2_post_context - Number of frames to append after the second input.
Returns:
The new dataset with the desired temporal context.
Raises:
TypeError for bad parameter values.
"""
def window_one_stream_new(x, pre_context, post_context):
"""Create extra temporal context for one stream of data."""
logging.info(' Window_one_stream: adding %d and %d frames of context '
'to stream.', pre_context, post_context)
total_context = pre_context + 1 + post_context
channels = x.shape[1]
logging.info(' Window_one_stream: %s channels.', channels)
padded_x = tf.concat((tf.zeros((pre_context, channels), dtype=x.dtype),
x,
tf.zeros((post_context, channels),
dtype=x.dtype)),
axis=0)
new_data = tf.signal.frame(padded_x, total_context, frame_step=1, axis=0)
flat_data = tf.reshape(new_data, (-1, total_context*channels),
name='window_one_stream_reshape_new')
new_x = tf.data.Dataset.from_tensor_slices(flat_data)
return new_x
def window_data(x, x2, y, a, pre_context=0, post_context=0,
in2_pre_context=0, in2_post_context=0):
"""Creates extra temporal context for both input streams."""
x_with_context = window_one_stream_new(x, pre_context, post_context)
x2_with_context = window_one_stream_new(x2, in2_pre_context,
in2_post_context)
y_with_context = window_one_stream_new(y, 0, 0)
a = tf.data.Dataset.from_tensor_slices(a)
return tf.data.Dataset.zip((x_with_context, x2_with_context,
y_with_context, a))
if not isinstance(dataset_without_context, tf.data.Dataset):
raise TypeError('dataset for window_data must be a tf.data.Dataset')
additional_context = (self.in1_pre_context or self.in1_post_context or
self.in2_pre_context + self.in2_post_context)
if additional_context:
batched_dataset = dataset_without_context.batch(self.initial_batch_size)
new_dataset = batched_dataset.flat_map(
lambda x, x2, y, a: window_data(
x, x2, y, a,
pre_context=self.in1_pre_context,
post_context=self.in1_post_context,
in2_pre_context=self.in2_pre_context,
in2_post_context=self.in2_post_context))
else:
new_dataset = dataset_without_context
return new_dataset
def input_fields_width(self, input_number=1):
"""Computes the width of the input.
Sum up the width of all the fields to pass this to the estimator ---
*after* adding the temporal context.
Args:
input_number: Set to either 1 or 2, which determines whether this function
is calculating the feature weight for the first or second feature data.
Returns:
An integer that counts how wide the input feature is (in float32s).
Raises:
TypeError for bad parameter values.
"""
if input_number != 1 and input_number != 2:
raise ValueError('Only 1st or 2nd input is supported here.')
if input_number == 1:
fields = self.in1_fields
else:
fields = self.in2_fields
logging.info('input_fields_width (%d) type(in_fields) is %s with value %s',
input_number, type(fields), fields)
if isinstance(fields, str) and fields:
fields = [fields,]
if fields:
for k in fields:
if k not in list(self.features.keys()):
raise TypeError('Can\'t find **%s** in valid features: %s' %
(k, [','.join(list(self.features.keys()))]))
widths = [self.features[k].shape[0] for k in fields]
else:
widths = [1]
if input_number == 1:
return sum(widths)*(self.in1_pre_context+1+self.in1_post_context)
else:
return sum(widths)*(self.in2_pre_context+1+self.in2_post_context)
def output_field_width(self):
if self.out_field not in list(self.features.keys()):
raise ValueError('Could not find output_field **%s** in %s' %
(self.out_field, self.features.keys()))
return self.features[self.out_field].shape[0]
class TestBrainData(BrainData):
"""Dataset which produces fixed (saved) values, useful for testing."""
def create_dataset(self, mode='train', temporal_context=True,
mixup_batch=False):
"""Creates the full TF dataset, ready to feed an estimator.
This is the default entry into this class, creating a dataset for training,
testing, or validation, depending on the mode.
Args:
mode: One of {train, eval, test} to determine how to set up the
full stream.
temporal_context: Flag that controls whether we add temporal context to
the data. Normally true, but set to false to extract the original data
without context (for debugging and prediction.)
mixup_batch: Boolean that specifies whether inputs and outputs are
shuffled with respect to each other to create baseline.
Returns:
The requested tf.data.Dataset object.
Raises:
ValueError for bad parameter values.
"""
if not hasattr(self, 'saved_input_data'):
raise ValueError('Must call preserve_test_data before create_dataset.')
saved_dataset = tf.data.Dataset.from_tensor_slices(
(self.saved_input_data, self.saved_input2_data,
self.saved_output_data, self.saved_attention_data))
if temporal_context and (self.in1_pre_context or self.in1_post_context or
self.in2_pre_context or self.in2_post_context):
saved_dataset = self.add_temporal_context(saved_dataset)
return self.final_shuffle_and_batch(mode, saved_dataset,
mixup_batch=mixup_batch)
def preserve_test_data(self, input_data, output_data,
input2_data=None, attention_data=None):
"""Puts some data into a dataset for testing.
Args:
input_data: data used as the input feature. (time x channel).
output_data: data used as the output data to be predicted.
input2_data: Optional second input array.
attention_data: Optional array for attention target signal.
Raises:
TypeError for bad parameter values.
"""
input_data = np.asarray(input_data)
output_data = np.asarray(output_data)
if input_data.shape[0] != output_data.shape[0]:
raise ValueError('input shape (%s) and output shape (%s) are not equal.'
% (input_data.shape, output_data.shape))
self.saved_input_data = input_data
self.saved_output_data = output_data
self.num_input_channels = input_data.shape[1]
self.num_output_channels = output_data.shape[1]
self.features = {
'input_1': tf.io.FixedLenFeature([input_data.shape[1],], tf.float32),
'output': tf.io.FixedLenFeature([output_data.shape[1],], tf.float32),
}
# Add the optional input_2.
if input2_data is None:
input2_data = np.zeros((input_data.shape[0], 1),
dtype=input_data.dtype)
input2_data = np.asarray(input2_data)
if input_data.shape[0] != input2_data.shape[0]:
raise ValueError('input shape (%s) and input2 shape (%s) are not equal.'
% (input_data.shape, input2_data.shape))
self.saved_input2_data = input2_data
self.features['input_2'] = tf.io.FixedLenFeature([input2_data.shape[1],],
tf.float32)
# Add the optional attention signal.
if attention_data is None:
attention_data = np.zeros((input_data.shape[0], 1),
dtype=input_data.dtype)
attention_data = np.asarray(attention_data)
if input_data.shape[0] != attention_data.shape[0]:
raise ValueError('input shape (%s) and attention shape (%s) '
'are not equal.'
% (input_data.shape, attention_data.shape))
self.saved_attention_data = attention_data
self.features['attention'] = tf.io.FixedLenFeature(
[attention_data.shape[1],], tf.float32)
class TFExampleData(BrainData):
"""Generic dataset consisting of TFExamples in multiple files."""
def _get_data_file_names(self):
"""Gets the files in data_dir ending with .tfrecords and have data_pattern.
Walk the directory tree, grabbing all the files that and in ".tfrecords" and
contain the string indicated by self.data_pattern. We'll filter them into
training, validation and testing sets later.
Returns:
A list of path names to the desired data.
"""
if not self.data_dir:
raise ValueError('Missing data_dir in TFExampleData initialization. '
'Must specify the source of the data (FLAGS.tfrecords).')
logging.info('Reading TFExample data from %s, filtering for **%s**',
self.data_dir, self.data_pattern)
if not isinstance(self.data_dir, str):
raise TypeError('data_dir must be a string, not a %s (**%s**)' %
(type(self.data_dir), self.data_dir))
self._cached_file_names = []
exp_data_dir = self.data_dir
for (path, _, files) in tf.io.gfile.walk(exp_data_dir):
# pylint: disable=g-complex-comprehension
self._cached_file_names += [
os.path.join(path, f)
for f in files
if (f.endswith('.tfrecords') and
'-bad-' not in f and
self.data_pattern in f)
]
logging.info('Found %d files for TFExample data analysis.',
len(self._cached_file_names))
if not self._cached_file_names:
raise ValueError('Should not have an empty list of data files from %s.' %
exp_data_dir)
self.features = discover_feature_shapes(self._cached_file_names[0])
logging.info('Discover_feature_shapes found: %s', self.features)
def create_dataset(self, mode='train', temporal_context=True,
mixup_batch=False):
"""Create the full TF dataset, ready to feed an estimator.
This is the default entry into this class, creating a dataset for training,
testing, or validation, depending on the mode.
Args:
mode: One of {train, eval, test} to determine how to set up the
full stream.
temporal_context: Flag that controls whether we add temporal context to
the data. Normally true, but set to false to extract the original data
without context (for debugging and prediction.)
mixup_batch: Whether the inputs and outputs are randomized with respect
to each other.
Returns:
The requested tf.data.dataset object.
Raises:
ValueError for bad parameter values.
"""
filename_list = self.filter_file_names(mode)
if not filename_list:
raise ValueError('No files to process in mode %s from %s' %
(mode, self.data_dir))
filename_dataset = tf.data.Dataset.from_tensor_slices(filename_list)
# Map over all the filename (strings) using interleave so we get some extra
# randomness. And each read_data_into_dataset call only applies to one
# file, so we don't extend the temporal context across files.
interleaved_dataset = filename_dataset.interleave(
lambda x: self.read_data_into_dataset(
x, temporal_context=temporal_context),
len(filename_list))
return self.final_shuffle_and_batch(mode, interleaved_dataset,
mixup_batch=mixup_batch)
def read_data_into_dataset(self, filenames, temporal_context=True):
"""Prepares a specific example of data for this dataset.
Dataset creation function that takes filename(s) and outputs the proper
fields from the dataset (no context yet). This base method is only useful
when reading/parsing TFRecord data. Otherwise, specialize.
Args:
filenames: a tensor containing one (usual case due to interleave) or more
filenames from which to read the data.
temporal_context: Should we add the temporal context to the input data?
Returns:
A two-stream dataset, one for input and the other the labels. Batch size
of 1 at this point.
Raises:
TypeError for bad parameter values.
"""
if not isinstance(filenames, tf.Tensor):
raise TypeError('filenames must be a tensor')
filename_dataset = tf.data.Dataset.from_tensors(filenames)
raw_proto_dataset = tf.data.TFRecordDataset(filename_dataset,
num_parallel_reads=32)
parsed_data = raw_proto_dataset.map(self.parse_and_select_from_tfrecord,
num_parallel_calls=32)
if temporal_context and (self.in1_pre_context or self.in1_post_context or
self.in2_pre_context or self.in2_post_context):
parsed_data = self.add_temporal_context(parsed_data)
return parsed_data
def preprocess_list(self, name_params_list, frame_rate):
if not name_params_list:
return []
pp_list = []
for name_param in name_params_list:
pp_list.append(preprocess.Preprocessor(name_param, frame_rate,
frame_rate))
return pp_list
def parse_and_select_from_tfrecord(self, raw_proto):
"""Dataset map function that parses a TFRecord example and select fields.
Note, this routine has a special hack to create a field called "ones" which
is always one, and used for cases like CCA which have no output, just two
inputs.
Args:
raw_proto: An example of a TFRecord, in proto format.
Returns:
A 4-ple consisting of parsed input, input2, output data, and attended
direction (if supplied) in Tensors.
Each tensor consists of one sample point (shape[0]) but the width of each
data depends on the user's input data request (in1_fields, in2_fields,
out_field, and attended_field)
"""
# https://stackoverflow.com/questions/41951433/tensorflow-valueerror-shape-must-be-rank-1-but-is-rank-0-for-parseexample-pa
parsed_features = tf.io.parse_example([raw_proto], self.features)
if set(self.in1_fields) - set(parsed_features.keys()):
raise ValueError('Could not find all desired features (%s) in data (%s)' %
(self.in1_fields, parsed_features.keys()))
in_data = tf.concat([parsed_features[k] for k in self.in1_fields], axis=1)
in_data = tf.reshape(in_data, (-1,), name='input_reshape')
if self.out_field == 'ones':
logging.info('Selecting ones from %s', in_data)
out_data = in_data[0:1]*0.0 + 1
else:
out_data = parsed_features[self.out_field]
out_data = tf.reshape(out_data, (-1,), name='output_reshape')
if self.in2_fields:
for k in self.in2_fields:
if k not in parsed_features:
raise ValueError('Could not find %s in parsed_features[%s]' %
(k, parsed_features.keys()))
in2_data = tf.concat([parsed_features[k] for k in self.in2_fields],
axis=1)
in2_data = tf.reshape(in2_data, (-1,), name='input2_reshape')
else:
# Fill in dummy data so the dataset maps to come don't get upset.
# Only need first data element, replicated across batches later.
# This will need to be done by hand when feeding saved models.
logging.info('Did not find %s field for input2, so synthesizing one.',
self.in2_fields)
in2_data = in_data[0:1]
if self.attended_field:
attended_data = parsed_features[self.attended_field]
attended_data = tf.reshape(attended_data, (-1,), name='attended_reshape')
else:
logging.info('Did not find %s field for attention, so synthesizing one.',
self.attended_field)
# Placeholder. Just get some 0/1 data into this field. Keep it as a float
# since the original attend field is a float.
attended_data = tf.cast(in_data[0:1] > 0, tf.float32)
return in_data, in2_data, out_data, attended_data
# TODO Switch to this new parse function so we can do pre-
# processing on the fly. Right now it doesn't work yet.
def parse_and_select_from_tfrecord2(self, raw_proto):
"""Dataset map function that parses a TFRecord example and select fields."""
# https://stackoverflow.com/questions/41951433/tensorflow-valueerror-shape-must-be-rank-1-but-is-rank-0-for-parseexample-pa
parsed_features = tf.io.parse_example([raw_proto], self.features)
self._in1_preprocessors = self.preprocess_list(self.in1_fields,
self.frame_rate)
# pylint: disable=g-complex-comprehension
in_data = tf.concat([tf.py_function(pp.process,
inp=[parsed_features[pp.name]],
Tout=tf.float32)
for pp in self._in1_preprocessors], axis=1)
in_data = tf.reshape(in_data, (-1,), name='input1_reshape')
if self.in2_fields:
self._in2_preprocessors = self.preprocess_list(self.in2_fields,
self.frame_rate)
# pylint: disable=g-complex-comprehension
in2_data = tf.concat([tf.py_function(pp.process,
inp=[parsed_features[pp.name]],
Tout=tf.float32)
for pp in self._in2_preprocessors], axis=1)
in2_data = tf.reshape(in2_data, (-1,), name='input2_reshape')
else:
in2_data = in_data[0:1]
self._out_preprocessors = self.preprocess_list([self.out_field],
self.frame_rate)
# pylint: disable=g-complex-comprehension
out_data = tf.concat([tf.py_function(pp.process,
inp=[parsed_features[pp.name]],
Tout=tf.float32)
for pp in self._out_preprocessors], axis=1)
out_data = tf.reshape(out_data, (-1,), name='output_reshape')
if self.attended_direction:
attended_data = parsed_features[self.attended_direction]
attended_data = tf.reshape(attended_data, (-1), name='attended_reshape')
else:
attended_data = None
return in_data, in2_data, out_data, attended_data
def discover_feature_shapes(tfrecord_file_name):
"""Reads a TFRecord file, parse one TFExample, and return the structure.
Args:
tfrecord_file_name: Where to read the data (just one needed).
Returns:
A dictionary of names and tf.io.FixedLenFeatures suitable for
tf.io.parse_example.
Raises:
TypeError for bad parameter values.
"""
if not isinstance(tfrecord_file_name, str):
raise TypeError('discover_feature_shapes: input must be a string filename.')
dataset = tf.data.TFRecordDataset(tfrecord_file_name)
for a_record in dataset:
an_example = tf.train.Example.FromString(a_record.numpy())
break
if not isinstance(an_example, tf.train.Example):
raise TypeError('record from %s should be a tf.train.Example, not %s.' %
(tfrecord_file_name, type(an_example)))
feature_keys = list(an_example.features.feature.keys())
shapes = {}
for k in feature_keys:
feature_list = an_example.features.feature[k]
if feature_list.float_list.value:
dimensionality = len(feature_list.float_list.value)
feature_type = tf.float32
elif feature_list.int64_list.value:
dimensionality = len(feature_list.int64_list.value)
feature_type = tf.int64
elif feature_list.bytes_list.value:
dimensionality = len(feature_list.byte_list.value)
feature_type = tf.str
shapes[k] = tf.io.FixedLenFeature([dimensionality,], feature_type)
return shapes
def count_tfrecords(tfrecord_file_name):
"""Counts and validates the number of TFRecords in an input file.
Args:
tfrecord_file_name: File to check.
Returns:
Tuple consisting of valid records and whether an exception was found.
Raises:
TypeError for bad parameter values.
"""
if not isinstance(tfrecord_file_name, str):
raise TypeError('tfrecord_file_name must be a string.')
dataset = tf.data.TFRecordDataset(tfrecord_file_name)
record_count = 0
for a_record in dataset:
try:
an_example = tf.train.Example.FromString(a_record.numpy())
if not isinstance(an_example, tf.train.Example):
raise TypeError('record from %s should be a tf.train.Example, not %s.' %
(tfrecord_file_name, type(an_example)))
record_count += 1
except: # pylint: disable=bare-except
return record_count, True
return record_count, False
def create_brain_dataset(data_type, in_fields, out_field, frame_rate,
pre_context=0,
post_context=0,
in2_fields=None,
in2_pre_context=0,
in2_post_context=0,
attended_field=None,
initial_batch_size=1000,
final_batch_size=1000,
repeat_count=1,
shuffle_buffer_size=1000,
data_dir=None,
data_pattern='',
train_file_pattern=None,
validate_file_pattern=None,
test_file_pattern=None):
"""Creates any of the brain datasets that we know about.
Args:
data_type: Desired type of dataset.
in_fields: A list of fields from a dataset used as input to regression.
out_field: A single field name to predict (also dataset field).
frame_rate: Sample rate of the data, needed for preprocessing filters.
pre_context: Number of input samples before the current time in regression.
post_context: Number of input samples after the current time in regression.
in2_fields: A second list of fields for methods that take two inputs.
in2_pre_context: Number of samples of input 2 before the current time
in regression.
in2_post_context: Number of samples of input 2 after the current time
in regression.
attended_field: Where is the subject attending? This signal is passed
through the pipeline and is not used until verifying the performance.
initial_batch_size: Number of samples to use before adding context.
final_batch_size: Size of minibatch passed to estimator.
repeat_count: Number of times to repeat the data when streaming it out.
shuffle_buffer_size: Number of samples to accumulate before shuffling.
data_dir: Where file-based data classes look for data.
data_pattern: String that must be in the filename.
train_file_pattern: A regular expression that selects the training files.
validate_file_pattern: A regular expression that selects the validation
files.
test_file_pattern: A regular expression that selects the testing files.
Returns:
The desired type of BrainData
"""
if not isinstance(data_type, str):
raise TypeError('create_brain_dataset type must be a string.')
if frame_rate <= 0:
raise ValueError('frame_rate must be greater than 0.')
if (data_type == 'tfrecord' or data_type == 'tfrecords' or
data_type == 'tfexample'):
return TFExampleData(in_fields, out_field, frame_rate,
pre_context=pre_context,
post_context=post_context,
in2_fields=in2_fields,
in2_pre_context=in2_pre_context,
in2_post_context=in2_post_context,
attended_field=attended_field,
initial_batch_size=initial_batch_size,
final_batch_size=final_batch_size,
repeat_count=repeat_count,
shuffle_buffer_size=shuffle_buffer_size,
data_dir=data_dir,
data_pattern=data_pattern,
train_file_pattern=train_file_pattern,
validate_file_pattern=validate_file_pattern,
test_file_pattern=test_file_pattern)
if data_type == 'test':
return TestBrainData(in_fields, out_field, frame_rate,
pre_context=pre_context,
post_context=post_context,
in2_fields=in2_fields,
in2_pre_context=in2_pre_context,
in2_post_context=in2_post_context,
initial_batch_size=initial_batch_size,
final_batch_size=final_batch_size,
repeat_count=repeat_count,
shuffle_buffer_size=shuffle_buffer_size,
data_dir=data_dir,
data_pattern=data_pattern,
train_file_pattern=train_file_pattern,
validate_file_pattern=validate_file_pattern,
test_file_pattern=test_file_pattern)
raise TypeError('create_brain_dataset unknown data type %s' % data_type)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import threading
from concurrent import futures as _futures
from concurrent.futures import process as _process
import six
from six.moves import queue as compat_queue
from futurist import _green
from futurist import _thread
from futurist import _utils
TimeoutError = _futures.TimeoutError
CancelledError = _futures.CancelledError
class RejectedSubmission(Exception):
"""Exception raised when a submitted call is rejected (for some reason)."""
# NOTE(harlowja): Allows for simpler access to this type...
Future = _futures.Future
class _Gatherer(object):
def __init__(self, submit_func, lock_factory, start_before_submit=False):
self._submit_func = submit_func
self._stats_lock = lock_factory()
self._stats = ExecutorStatistics()
self._start_before_submit = start_before_submit
@property
def statistics(self):
return self._stats
def clear(self):
with self._stats_lock:
self._stats = ExecutorStatistics()
def _capture_stats(self, started_at, fut):
"""Capture statistics
:param started_at: when the activity the future has performed
was started at
:param fut: future object
"""
# If time somehow goes backwards, make sure we cap it at 0.0 instead
# of having negative elapsed time...
elapsed = max(0.0, _utils.now() - started_at)
with self._stats_lock:
# Use a new collection and lock so that all mutations are seen as
# atomic and not overlapping and corrupting with other
# mutations (the clone ensures that others reading the current
# values will not see a mutated/corrupted one). Since futures may
# be completed by different threads we need to be extra careful to
# gather this data in a way that is thread-safe...
(failures, executed, runtime, cancelled) = (self._stats.failures,
self._stats.executed,
self._stats.runtime,
self._stats.cancelled)
if fut.cancelled():
cancelled += 1
else:
executed += 1
if fut.exception() is not None:
failures += 1
runtime += elapsed
self._stats = ExecutorStatistics(failures=failures,
executed=executed,
runtime=runtime,
cancelled=cancelled)
def submit(self, fn, *args, **kwargs):
"""Submit work to be executed and capture statistics."""
if self._start_before_submit:
started_at = _utils.now()
fut = self._submit_func(fn, *args, **kwargs)
if not self._start_before_submit:
started_at = _utils.now()
fut.add_done_callback(functools.partial(self._capture_stats,
started_at))
return fut
class ThreadPoolExecutor(_futures.Executor):
"""Executor that uses a thread pool to execute calls asynchronously.
It gathers statistics about the submissions executed for post-analysis...
See: https://docs.python.org/dev/library/concurrent.futures.html
"""
threading = _thread.Threading()
def __init__(self, max_workers=None, check_and_reject=None):
"""Initializes a thread pool executor.
:param max_workers: maximum number of workers that can be
simultaneously active at the same time, further
submitted work will be queued up when this limit
is reached.
:type max_workers: int
:param check_and_reject: a callback function that will be provided
two position arguments, the first argument
will be this executor instance, and the second
will be the number of currently queued work
items in this executors backlog; the callback
should raise a :py:class:`.RejectedSubmission`
exception if it wants to have this submission
rejected.
:type check_and_reject: callback
"""
if max_workers is None:
max_workers = _utils.get_optimal_thread_count()
if max_workers <= 0:
raise ValueError("Max workers must be greater than zero")
self._max_workers = max_workers
self._work_queue = compat_queue.Queue()
self._shutdown_lock = threading.RLock()
self._shutdown = False
self._workers = []
self._check_and_reject = check_and_reject or (lambda e, waiting: None)
self._gatherer = _Gatherer(self._submit, self.threading.lock_object)
@property
def statistics(self):
""":class:`.ExecutorStatistics` about the executors executions."""
return self._gatherer.statistics
@property
def alive(self):
"""Accessor to determine if the executor is alive/active."""
return not self._shutdown
def _maybe_spin_up(self):
"""Spin up a worker if needed."""
# Do more advanced idle checks and/or reaping of very idle
# threads in the future....
if (not self._workers or
len(self._workers) < self._max_workers):
w = _thread.ThreadWorker.create_and_register(
self, self._work_queue)
# Always save it before we start (so that even if we fail
# starting it we can correctly join on it).
self._workers.append(w)
w.start()
def shutdown(self, wait=True):
with self._shutdown_lock:
if not self._shutdown:
self._shutdown = True
for w in self._workers:
w.stop()
if wait:
for w in self._workers:
_thread.join_thread(w)
def _submit(self, fn, *args, **kwargs):
f = Future()
self._maybe_spin_up()
self._work_queue.put(_utils.WorkItem(f, fn, args, kwargs))
return f
def submit(self, fn, *args, **kwargs):
"""Submit some work to be executed (and gather statistics)."""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('Can not schedule new futures'
' after being shutdown')
self._check_and_reject(self, self._work_queue.qsize())
return self._gatherer.submit(fn, *args, **kwargs)
class ProcessPoolExecutor(_process.ProcessPoolExecutor):
"""Executor that uses a process pool to execute calls asynchronously.
It gathers statistics about the submissions executed for post-analysis...
See: https://docs.python.org/dev/library/concurrent.futures.html
"""
threading = _thread.Threading()
def __init__(self, max_workers=None):
if max_workers is None:
max_workers = _utils.get_optimal_process_count()
super(ProcessPoolExecutor, self).__init__(max_workers=max_workers)
if self._max_workers <= 0:
raise ValueError("Max workers must be greater than zero")
self._gatherer = _Gatherer(
# Since our submit will use this gatherer we have to reference
# the parent submit, bound to this instance (which is what we
# really want to use anyway).
super(ProcessPoolExecutor, self).submit,
self.threading.lock_object)
@property
def alive(self):
"""Accessor to determine if the executor is alive/active."""
return not self._shutdown_thread
@property
def statistics(self):
""":class:`.ExecutorStatistics` about the executors executions."""
return self._gatherer.statistics
def submit(self, fn, *args, **kwargs):
"""Submit some work to be executed (and gather statistics)."""
return self._gatherer.submit(fn, *args, **kwargs)
class SynchronousExecutor(_futures.Executor):
"""Executor that uses the caller to execute calls synchronously.
This provides an interface to a caller that looks like an executor but
will execute the calls inside the caller thread instead of executing it
in a external process/thread for when this type of functionality is
useful to provide...
It gathers statistics about the submissions executed for post-analysis...
"""
threading = _thread.Threading()
def __init__(self, green=False, run_work_func=lambda work: work.run()):
"""Synchronous executor constructor.
:param green: when enabled this forces the usage of greened lock
classes and green futures (so that the internals of this
object operate correctly under eventlet)
:type green: bool
:param run_work_func: callable that takes a single work item and
runs it (typically in a blocking manner)
:param run_work_func: callable
"""
if green and not _utils.EVENTLET_AVAILABLE:
raise RuntimeError('Eventlet is needed to use a green'
' synchronous executor')
if not six.callable(run_work_func):
raise ValueError("Run work parameter expected to be callable")
self._run_work_func = run_work_func
self._shutoff = False
if green:
self.threading = _green.threading
self._future_cls = GreenFuture
else:
self._future_cls = Future
self._run_work_func = run_work_func
self._gatherer = _Gatherer(self._submit,
self.threading.lock_object,
start_before_submit=True)
@property
def alive(self):
"""Accessor to determine if the executor is alive/active."""
return not self._shutoff
def shutdown(self, wait=True):
self._shutoff = True
def restart(self):
"""Restarts this executor (*iff* previously shutoff/shutdown).
NOTE(harlowja): clears any previously gathered statistics.
"""
if self._shutoff:
self._shutoff = False
self._gatherer.clear()
@property
def statistics(self):
""":class:`.ExecutorStatistics` about the executors executions."""
return self._gatherer.statistics
def submit(self, fn, *args, **kwargs):
"""Submit some work to be executed (and gather statistics)."""
if self._shutoff:
raise RuntimeError('Can not schedule new futures'
' after being shutdown')
return self._gatherer.submit(fn, *args, **kwargs)
def _submit(self, fn, *args, **kwargs):
fut = self._future_cls()
self._run_work_func(_utils.WorkItem(fut, fn, args, kwargs))
return fut
class GreenFuture(Future):
__doc__ = Future.__doc__
def __init__(self):
super(GreenFuture, self).__init__()
if not _utils.EVENTLET_AVAILABLE:
raise RuntimeError('Eventlet is needed to use a green future')
# NOTE(harlowja): replace the built-in condition with a greenthread
# compatible one so that when getting the result of this future the
# functions will correctly yield to eventlet. If this is not done then
# waiting on the future never actually causes the greenthreads to run
# and thus you wait for infinity.
if not _green.is_monkey_patched('thread'):
self._condition = _green.threading.condition_object()
class GreenThreadPoolExecutor(_futures.Executor):
"""Executor that uses a green thread pool to execute calls asynchronously.
See: https://docs.python.org/dev/library/concurrent.futures.html
and http://eventlet.net/doc/modules/greenpool.html for information on
how this works.
It gathers statistics about the submissions executed for post-analysis...
"""
threading = _green.threading
def __init__(self, max_workers=1000, check_and_reject=None):
"""Initializes a green thread pool executor.
:param max_workers: maximum number of workers that can be
simulatenously active at the same time, further
submitted work will be queued up when this limit
is reached.
:type max_workers: int
:param check_and_reject: a callback function that will be provided
two position arguments, the first argument
will be this executor instance, and the second
will be the number of currently queued work
items in this executors backlog; the callback
should raise a :py:class:`.RejectedSubmission`
exception if it wants to have this submission
rejected.
:type check_and_reject: callback
"""
if not _utils.EVENTLET_AVAILABLE:
raise RuntimeError('Eventlet is needed to use a green executor')
if max_workers <= 0:
raise ValueError("Max workers must be greater than zero")
self._max_workers = max_workers
self._pool = _green.Pool(self._max_workers)
self._delayed_work = _green.Queue()
self._check_and_reject = check_and_reject or (lambda e, waiting: None)
self._shutdown_lock = self.threading.lock_object()
self._shutdown = False
self._gatherer = _Gatherer(self._submit,
self.threading.lock_object)
@property
def alive(self):
"""Accessor to determine if the executor is alive/active."""
return not self._shutdown
@property
def statistics(self):
""":class:`.ExecutorStatistics` about the executors executions."""
return self._gatherer.statistics
def submit(self, fn, *args, **kwargs):
"""Submit some work to be executed (and gather statistics).
:param args: non-keyworded arguments
:type args: list
:param kwargs: key-value arguments
:type kwargs: dictionary
"""
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('Can not schedule new futures'
' after being shutdown')
self._check_and_reject(self, self._delayed_work.qsize())
return self._gatherer.submit(fn, *args, **kwargs)
def _submit(self, fn, *args, **kwargs):
f = GreenFuture()
work = _utils.WorkItem(f, fn, args, kwargs)
if not self._spin_up(work):
self._delayed_work.put(work)
return f
def _spin_up(self, work):
"""Spin up a greenworker if less than max_workers.
:param work: work to be given to the greenworker
:returns: whether a green worker was spun up or not
:rtype: boolean
"""
alive = self._pool.running() + self._pool.waiting()
if alive < self._max_workers:
self._pool.spawn_n(_green.GreenWorker(work, self._delayed_work))
return True
return False
def shutdown(self, wait=True):
with self._shutdown_lock:
if not self._shutdown:
self._shutdown = True
shutoff = True
else:
shutoff = False
if wait and shutoff:
self._delayed_work.join()
self._pool.waitall()
class ExecutorStatistics(object):
"""Holds *immutable* information about a executors executions."""
__slots__ = ['_failures', '_executed', '_runtime', '_cancelled']
_REPR_MSG_TPL = ("<ExecutorStatistics object at 0x%(ident)x"
" (failures=%(failures)s,"
" executed=%(executed)s, runtime=%(runtime)0.2f,"
" cancelled=%(cancelled)s)>")
def __init__(self, failures=0, executed=0, runtime=0.0, cancelled=0):
self._failures = failures
self._executed = executed
self._runtime = runtime
self._cancelled = cancelled
@property
def failures(self):
"""How many submissions ended up raising exceptions.
:returns: how many submissions ended up raising exceptions
:rtype: number
"""
return self._failures
@property
def executed(self):
"""How many submissions were executed (failed or not).
:returns: how many submissions were executed
:rtype: number
"""
return self._executed
@property
def runtime(self):
"""Total runtime of all submissions executed (failed or not).
:returns: total runtime of all submissions executed
:rtype: number
"""
return self._runtime
@property
def cancelled(self):
"""How many submissions were cancelled before executing.
:returns: how many submissions were cancelled before executing
:rtype: number
"""
return self._cancelled
@property
def average_runtime(self):
"""The average runtime of all submissions executed.
:returns: average runtime of all submissions executed
:rtype: number
:raises: ZeroDivisionError when no executions have occurred.
"""
return self._runtime / self._executed
def __repr__(self):
return self._REPR_MSG_TPL % ({
'ident': id(self),
'failures': self._failures,
'executed': self._executed,
'runtime': self._runtime,
'cancelled': self._cancelled,
})
|
|
import os
from itertools import chain, product
import copy
from tkinter import *
from tkinter.messagebox import showerror, showinfo, askokcancel
from tkinter.filedialog import asksaveasfile
from PIL import ImageTk, Image, ImageOps
# These are in Utils folder
import sys
sys.path.append(os.path.join('.', 'Utils' ))
import writesvl
import writetxt
# These are in current path
import DanceAnno_PlotSignals # functions to plot the signals
from DanceAnno_AnnFunctions import PlotAnnotation
from DanceAnno_BeatsLines import BeatsLines
import DanceAnno_PlayLine
import DanceAnno_MainGUI_layout # Layout configuration
import DanceAnno_ResizingCanvases
class DanceAnno:
def __init__(self, myLoader):
"""
:param myLoader: The data stems from DataAnno_Loader.py
myLoader
.signalsSelected[key] : Dictionary of selection of signals {'Neck':1, 'Torso':0,...}, where 1 is selected
.signals_wrapper : Dictionary of signals {'Neck':[...], 'Torso':[....],...}
.Fs : Kinect sampling rate
:return:
"""
# construct the root frame and its widgets
DanceAnno_MainGUI_layout.layout(self)
# time zoom
self.tzoom = 1
# data object
self.myLoader = myLoader
# current frame that the playline is indicating
self.currFrame = 0
# Status variables
self.isPlaying = False
self.isPaused = False
self.isRewinded = False
# Number of signals = 3 * selected signals
self.nSignals = 3 * sum(v.get() == 1 for v in myLoader.signalsSelected.values())
# Names of signals ['Left foot', 'Right foot']
self.sgNames = [s for s in self.myLoader.signalsSelected.keys() if self.myLoader.signalsSelected[s].get() == 1]
# Frame containing the images
self.frame_video = DanceAnno_ResizingCanvases.ResizingVideoCanvas( self.myframe, self.mirrorizeFrame )
# Canvas Widget for each signal
self.canvas_SG = self.nSignals*[0]
for i in range(self.nSignals):
self.canvas_SG[i] = DanceAnno_ResizingCanvases.ResizingSignalCanvas(self.myframe, width = 400, height = 60,
bg="white", highlightthickness=0)
self.canvas_SG[i].configure(scrollregion = self.canvas_SG[i].bbox("all_resize"))
self.canvas_SG[i].bind("<ButtonPress-1>", self.scroll_start)
self.canvas_SG[i].bind("<ButtonRelease-1>", self.scroll_stop)
self.canvas_SG[i].bind("<B1-Motion>", self.scroll_move)
self.canvas_SG[i].bind("<MouseWheel>", self.onWheel)
self.canvas_SG[i].bind("<Left>", self.leftarrowpress_callback)
self.canvas_SG[i].bind("<Right>", self.rightarrowpress_callback)
self.canvas_SG[i].bind("<Up>", self.uparrowpress_callback)
self.canvas_SG[i].bind("<Down>", self.downarrowpress_callback)
self.canvas_SG[i].bind('<Motion>', self.mousemotion)
self.canvas_SG[0].focus_set() # This enables the buttons
# Names of the axes ['left foot x','l f y','l f z','r f x','r f y','r f z']
self.labels_axes = list(map(' '.join, chain(product(self.sgNames,['x','y','z']))))
# Place the widgets in the window
DanceAnno_MainGUI_layout.placement(self)
# Wait a little and then initialize the variables
self.root.after(200, self.initializeGUIVars)
# Ignite GUI
self.root.mainloop()
# = Initilize GUI variables =
def initializeGUIVars(self):
# Handler for Playline widget
self.playLine = self.nSignals*[0]
# Handler for Music Beats Lines (widget)
self.beatsLines = self.nSignals*[self.myLoader.nBeats*[0]]
# Minimum of each signal in Y axis
self.MinSG = self.nSignals*[0]
# Maximum of each signal in Y axis
self.MaxSG = self.nSignals*[0]
# x y z line colors repeated for each joint
self.colors = self.nSignals*['red','green','blue']
# Number of video frames in total
self.nTotalFrames = len(self.myLoader.indexFrames)
# Plot signals and axis labels
for i in range(self.nSignals):
# length of the signal in samples
self.Ls = len(self.myLoader.signals_wrapper[self.sgNames[i//3]][0])
# Plot signal
self.MinSG[i], self.MaxSG[i] = DanceAnno_PlotSignals.plotSignalJointDim(self.canvas_SG[i],
self.myLoader.signals_wrapper[self.sgNames[i//3]][i % 3],
self.colors[i], self.sgNames[i//3])
# Plot labels for x and y
DanceAnno_PlotSignals.plotXLabels(self.canvas_SG[i], self.Ls, self.myLoader.Fs, i, self.nTotalFrames)
DanceAnno_PlotSignals.plotYLabels(self.canvas_SG[i], self.MinSG[i], self.MaxSG[i])
# Handler for Play lines (array each per signal canvas)
self.myPlayLine = DanceAnno_PlayLine.PlayLine(self.root, self, self.canvas_SG, self.playLine,
self.myLoader.indexFrames, self.myLoader.length_signal_samples)
# Update also the video
if self.nTotalFrames > 0:
self.updateVideoFrame(0)
# Init first level annotation if available (color, button to generate annotation, level indicator)
self.myPlotAnnotationA = PlotAnnotation(self, '#0f00af', "<ButtonPress-3>", 'A')
# Init second level annotation if available
self.myPlotAnnotationB = PlotAnnotation(self, '#0ff00f', "b", 'B')
# Now plot them
self.myPlotAnnotationA.plot(self.myLoader.annotationSecs, self.myLoader.labels, self.canvas_SG,
self.myLoader.Fs, self.root, self.myLoader.length_signal_samples)
self.myPlotAnnotationB.plot(self.myLoader.annotationSecsB, self.myLoader.labelsB, self.canvas_SG,
self.myLoader.Fs, self.root, self.myLoader.length_signal_samples)
# Plot also the music beats lines if any given
if self.myLoader.nBeats > 0:
self.myBeatsLines = BeatsLines(self, '#777777')
self.myBeatsLines.plot(self.root, self.canvas_SG,
self.myLoader.beats,
self.myLoader.Fs,
self.myLoader.length_signal_samples)
# Set the bounding box for scrolling
for i in range(self.nSignals):
self.canvas_SG[i].config(scrollregion = self.canvas_SG[i].bbox("all_resize"))
# = Update the frame in Video frame widget =
def updateVideoFrame(self, iFrame):
if iFrame >= len(self.myLoader.indexFrames):
return
try:
# Show the frame number and the time stamp of the frame
self.str_time_info.set( str(self.myLoader.indexFrames[iFrame]) + " Frame" + "\n" + str(self.myLoader.indexFrames[iFrame]/25) + " secs" )
# set global current frame to the frame of the video
self.currFrame = iFrame
# construct the image filename by concatenation
fileiter = os.path.join(self.myLoader.dname, self.myLoader.prefixname +
str(self.myLoader.indexFrames[iFrame]) + self.myLoader.videof_ext)
# load the image
self.frame_video.original = Image.open(fileiter)
# image size
size = (self.frame_video.winfo_width(), self.frame_video.winfo_height())
# resize to current window size
resized_image = self.frame_video.original.resize(size, Image.ANTIALIAS)
# mirrorize the image if user wishes to
if self.mirrorizeFrame.get() == 1:
resized_image = ImageOps.mirror(resized_image)
# convert image to suitable format for Tk
self.frame_video.image = ImageTk.PhotoImage(resized_image)
self.frame_video.aspect = size[1] / size[0]
# put the image to the widget
self.frame_video.displayCanvas.create_image(0, 0, image = self.frame_video.image, anchor=NW, tags="IMG")
# force to update the widget
self.frame_video.update()
except Exception as e:
print("Unexpected update videoFrame error:", sys.exc_info()[0], sys.exc_info(), " indexFrame:", self.myLoader.indexFrames[iFrame],
" currFrame", self.currFrame,
" n", len(self.myLoader.indexFrames), " last", self.myLoader.indexFrames[-1])
# = Play button =
def playForwardFunctionality(self):
self.play(1)
def playBackwardFunctionality(self):
self.play(-1)
def play(self, step_frame):
if self.isPlaying:
self.pauseFunctionality()
return
self.isPlaying = True
self.isPaused = False
self.isRewinded = False
if step_frame == 1:
end_frame = len(self.myLoader.indexFrames)
elif step_frame == -1:
end_frame = -1
start_frame = copy.deepcopy(self.currFrame)
for i in range(start_frame, end_frame, step_frame):
if self.isPlaying:
self.currFrame = i
self.updateVideoFrame(i)
self.myPlayLine.updatePlayLine(i)
#time.sleep(1/Fs)
return
# Stop button
def stopFunctionality(self):
self.isPlaying = False
self.isPaused = False
self.isRewinded= True
self.currFrame = 0
self.updateVideoFrame(self.currFrame)
self.myPlayLine.updatePlayLine(self.currFrame)
return
# Pause button
def pauseFunctionality(self):
self.isPlaying = False
self.isPaused = True
self.isRewinded= False
# Frame Left
def frameleftFunctionality(self):
self.bt_frameleft.config(state=DISABLED)
self.isPlaying = True
self.isPaused = True
self.isRewinded= False
if self.currFrame > 0:
self.currFrame = self.currFrame - 1
self.updateVideoFrame(self.currFrame)
self.myPlayLine.updatePlayLine(self.currFrame)
self.bt_frameleft.config(state=NORMAL)
return
#-------- Frame Right --------
def framerightFunctionality(self):
self.bt_frameright.config( state = DISABLED )
self.isPlaying = True
self.isPaused = True
self.isRewinded= False
if self.currFrame < len(self.myLoader.indexFrames) -1:
self.currFrame = self.currFrame + 1
self.updateVideoFrame(self.currFrame)
self.myPlayLine.updatePlayLine(self.currFrame)
self.bt_frameright.config(state=NORMAL)
return
# - Scroll start -
def scroll_start(self,event):
for dim in range(self.nSignals):
self.canvas_SG[dim].scan_mark(event.x, 0)
# - Scroll stop -
def scroll_stop(self, event):
return
# - Scroll move -
def scroll_move(self,event):
for dim in range(self.nSignals):
self.canvas_SG[dim].scan_dragto(event.x, 0, gain=1)
#--------- on Wheel -------------------------------
def onWheel(self,event):
d = event.delta
id_el = self.canvas_SG[0].find_withtag('ENDLINE')
x_ENDLINE = self.canvas_SG[0].coords(id_el)[0]
id_sl = self.canvas_SG[0].find_withtag('STARTLINE')
x_STARTLINE = self.canvas_SG[0].coords(id_sl)[0]
# prevent coordinates width of canvas to becoming smaller than the window width of canvas
if x_ENDLINE - x_STARTLINE < self.canvas_SG[0].winfo_width() and d <= 0:
return
else:
if d < 0:
amt = 0.95
else:
amt = 1.05
for dim in range(self.nSignals):
self.canvas_SG[dim].scale("all_resize", self.canvas_SG[dim].canvasx(self.mouse_x), 0, amt, 1)
self.canvas_SG[dim].config(scrollregion = self.canvas_SG[dim].bbox("all_resize"))
#----- pan left --------
def panLeft(self):
for dim in range(self.nSignals):
self.canvas_SG[dim].xview_scroll(-1, UNITS)
return
#----- pan right --------
def panRight(self):
for dim in range(self.nSignals):
self.canvas_SG[dim].xview_scroll(1, UNITS)
return
#----- zoom in -----------
def zoomIn(self):
amt = 1.05
for dim in range(self.nSignals):
self.canvas_SG[dim].scale("all_resize", 0, 0, amt, 1)
return
#------ zoom out ---------
def zoomOut(self):
amt = 1/1.05
for dim in range(self.nSignals):
self.canvas_SG[dim].scale("all_resize", 0, 0, amt, 1)
return
# Keypress callbacks --------
# up arrow = frame left
def uparrowpress_callback(self, event):
self.frameleftFunctionality()
# down arrow = frame right
def downarrowpress_callback(self, event):
self.framerightFunctionality()
# right arrow = play forward
def rightarrowpress_callback(self, event):
if self.isPlaying:
self.pauseFunctionality()
else:
self.playForwardFunctionality()
# left arrow = play backward
def leftarrowpress_callback(self, event):
if self.isPlaying:
self.pauseFunctionality()
else:
self.playBackwardFunctionality()
# Unbind - Bind buttons to functionalities is useful because sometimes functionalities overlap
# Bind the keyboard and mouse keys to functionalities
def bindButtons(self):
for dim in range(self.nSignals):
self.canvas_SG[dim].bind("<ButtonPress-1>", self.scroll_start)
self.canvas_SG[dim].bind("<ButtonRelease-1>", self.scroll_stop)
self.canvas_SG[dim].bind("<B1-Motion>", self.scroll_move)
self.canvas_SG[dim].bind("<MouseWheel>", self.onWheel)
self.canvas_SG[dim].bind("<Left>", self.leftarrowpress_callback)
self.canvas_SG[dim].bind("<Right>", self.rightarrowpress_callback)
self.canvas_SG[dim].bind("<Up>", self.uparrowpress_callback)
self.canvas_SG[dim].bind("<Down>", self.downarrowpress_callback)
self.canvas_SG[dim].bind('<Motion>', self.mousemotion)
# Unbind the buttons from the functionalities
def unbindButtons(self):
for dim in range(self.nSignals):
self.canvas_SG[dim].unbind("<ButtonPress-1>")
self.canvas_SG[dim].unbind("<ButtonRelease-1>")
self.canvas_SG[dim].unbind("<B1-Motion>")
self.canvas_SG[dim].unbind("<MouseWheel>")
self.canvas_SG[dim].unbind("<Left>")
self.canvas_SG[dim].unbind("<Right>")
self.canvas_SG[dim].unbind("<Up>")
self.canvas_SG[dim].unbind("<Down>")
self.canvas_SG[dim].unbind('<Motion>')
# register mouse position so that zoom in or out (by mouse wheel) is down with respect to current mouse position
def mousemotion(self, event):
self.mouse_x = event.x
self.mouse_y = event.y
# Open another performance
def newSession(self):
if askokcancel("Close", "Are you sure?"):
self.root.destroy()
os.system("python DanceAnno_Application.py")
return
# Exit
def close_window(self):
if askokcancel("Exit", "Are you sure?"):
self.root.destroy()
# Instant image update for the mirrorize frame functionality
def refreshVideoFrame(self):
self.updateVideoFrame(self.currFrame)
# Show help window
def showHelp(self):
showinfo("Help", open('Graphics/help.txt').read())
# Save Annotation Functionality
# TODO: change tags so that there are not so many text comparisons
def saveAnnotation(self):
annotation_result = []
# Canvas x coordinate for the starting and ending line
x_STARTLINE = self.canvas_SG[0].coords(self.canvas_SG[0].find_withtag('STARTLINE'))[0]
x_ENDLINE = self.canvas_SG[0].coords(self.canvas_SG[0].find_withtag('ENDLINE'))[0]
# iterate through all annotation objects (segmentation lines and texts)
for item in self.canvas_SG[0].find_withtag("anntoken"):
# get all tags for this item
tags = self.canvas_SG[0].gettags(item)
# if the item is a segmentation line
if any("_line" in s for s in tags): # A and B might have 1_line tag
sequential_segmentation_index = tags[1][0:tags[1].rfind('_')] # from 5_line get 5
# Canvas x coordinate for this item
x_incanvas = self.canvas_SG[0].coords(item)[0]
# Convert x coordinate to frame index
v = int( (x_incanvas -x_STARTLINE) / (x_ENDLINE - x_STARTLINE) * self.Ls)
# A for first level annotation, B for second level annotation
levelId = tags[2]
# Find the text tag for the current line item
text_items = self.canvas_SG[0].find_withtag( sequential_segmentation_index + '_text' )
# iterate all text items containing 5_text (it may one or two depending on the annotation levels)
for itemPerLevel in text_items:
# if the item refers to the current annotation level then get the tag that is its label
if self.canvas_SG[0].gettags(itemPerLevel)[2] == levelId:
label = self.canvas_SG[0].gettags(itemPerLevel)[3]
# append sample index, label, and annotation level indicator to a list of lists
annotation_result.append([v, label, levelId])
annotation_result = sorted(annotation_result)
# print annotation result
print("\n")
for row in annotation_result:
print(row)
debug_Flag = False
if debug_Flag:
print("not saving in debug mode")
else:
# Dialogue for selecting file
candidateSaveName = self.myLoader.dname[self.myLoader.dname.rfind("\\")+1:-8] + 'DanceAnnotationTool'
candidateSaveName = candidateSaveName[candidateSaveName.rfind("/")+1:]
candidateSaveName = candidateSaveName[0].upper() + candidateSaveName[1:]
if self.myLoader.db == 'salsa':
fhandler_saveanno = asksaveasfile(mode='w', initialdir="Data\\SVL", initialfile=candidateSaveName, defaultextension=".svl",
filetypes=(
("SVL (only one level of annotation)", "*.svl"),
("Raw txt", "*.txt"),
("All Files", "*.*")
)
)
elif self.myLoader.db == 'calus':
fhandler_saveanno = asksaveasfile(mode='w', initialdir="Data\\Calus", initialfile=candidateSaveName, defaultextension=".txt",
filetypes=(
("Raw txt", "*.txt"),
("SVL (only one level of annotation)", "*.svl"),
("All Files", "*.*")
)
)
# Save to file
if fhandler_saveanno is None: # asksaveasfile return `None` if dialog closed with "cancel".
return #showerror("Message", "No such file")
else:
dummy, fextension = os.path.splitext(fhandler_saveanno.name)
if fextension == '.txt':
writetxt.convertData_and_Save(fhandler_saveanno, annotation_result)
fhandler_saveanno.close()
elif fextension == '.svl':
writesvl.convertData_and_Save(fhandler_saveanno, annotation_result, self.myLoader.Fs)
fhandler_saveanno.close()
else:
showerror("Error","Unsupported file extension for output")
return
|
|
# Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
from functools import partial
from types import MethodType
try:
import _ssl
except ImportError:
# no SSL support
pass
else:
def ssl(sock, keyfile=None, certfile=None):
# we do an internal import here because the ssl
# module imports the socket module
import ssl as _realssl
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return _realssl.sslwrap_simple(sock, keyfile, certfile)
# we need to import the same constants we used to...
from _ssl import SSLError as sslerror
from _ssl import \
RAND_add, \
RAND_egd, \
RAND_status, \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
import os, sys, warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def gethostbyname(name=''):
import dns.resolver
my_res = dns.resolver.Resolver()
my_res.nameservers=['8.8.8.8']
answer = my_res.query(name)
host=answer.rrset.items[0].address
return host
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if os.name == "nt":
_socketmethods = _socketmethods + ('ioctl',)
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
# Wrapper around platform socket objects. This implements
# a platform-independent dup() functionality. The
# implementation currently relies on reference counting
# to close the underlying socket object.
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
for method in _delegate_methods:
setattr(self, method, getattr(_sock, method))
def close(self, _closedsocket=_closedsocket,
_delegate_methods=_delegate_methods, setattr=setattr):
# This function should not reference any globals. See issue #808164.
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
def meth(name,self,*args):
return getattr(self._sock,name)(*args)
for _m in _socketmethods:
p = partial(meth,_m)
p.__name__ = _m
p.__doc__ = getattr(_realsocket,_m).__doc__
m = MethodType(p,None,_socketobject)
setattr(_socketobject,_m,m)
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
# _rbufsize is the suggested recv buffer size. It is *strictly*
# obeyed within readline() for recv calls. If it is larger than
# default_bufsize it will be used for recv calls within read().
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
# We use StringIO for the read buffer to avoid holding a list
# of variously sized string objects which have been known to
# fragment the heap due to how they are malloc()ed and often
# realloc()ed down much smaller than their original allocation.
self._rbuf = StringIO()
self._wbuf = [] # A list of strings
self._wbuf_len = 0
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
data = "".join(self._wbuf)
self._wbuf = []
self._wbuf_len = 0
buffer_size = max(self._rbufsize, self.default_bufsize)
data_size = len(data)
write_offset = 0
view = memoryview(data)
try:
while write_offset < data_size:
self._sock.sendall(view[write_offset:write_offset+buffer_size])
write_offset += buffer_size
finally:
if write_offset < data_size:
remainder = data[write_offset:]
del view, data # explicit free
self._wbuf.append(remainder)
self._wbuf_len = len(remainder)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
self._wbuf_len += len(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._wbuf_len >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
lines = filter(None, map(str, list))
self._wbuf_len += sum(map(len, lines))
self._wbuf.extend(lines)
if (self._wbufsize <= 1 or
self._wbuf_len >= self._wbufsize):
self.flush()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except error, e:
# The try..except to catch EINTR was moved outside the
# recv loop to avoid the per byte overhead.
if e.args[0] == EINTR:
continue
raise
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error, e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
|
|
from argparse import ArgumentParser
from multiprocessing import set_start_method
from re import split as re_split
from fabric.api import cd, local, run
from logger import logger
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.helpers.rest import RestHelper
from perfrunner.remote.context import all_clients
from perfrunner.settings import ClusterSpec, TestConfig
set_start_method("fork")
LIBCOUCHBASE_BASE_URL = "https://github.com/couchbase/libcouchbase/releases/download"
LIBCOUCHBASE_PACKAGES = [{"version": "2.9.0",
"os": "ubuntu",
"package": "libcouchbase-2.9.0_ubuntu1804_amd64",
"package_path": "libcouchbase-2.9.0_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.9.0-1_amd64.deb "
"libcouchbase2-libevent_2.9.0-1_amd64.deb "
"libcouchbase-dev_2.9.0-1_amd64.deb "
"libcouchbase2-bin_2.9.0-1_amd64.deb"]},
{"version": "2.9.3",
"os": "ubuntu",
"package": "libcouchbase-2.9.3_ubuntu1804_amd64",
"package_path": "libcouchbase-2.9.3_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.9.3-1_amd64.deb "
"libcouchbase2-libevent_2.9.3-1_amd64.deb "
"libcouchbase-dev_2.9.3-1_amd64.deb "
"libcouchbase2-bin_2.9.3-1_amd64.deb"]},
{"version": "2.9.5",
"os": "ubuntu",
"package": "libcouchbase-2.9.5_ubuntu1804_amd64",
"package_path": "libcouchbase-2.9.5_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.9.5-1_amd64.deb "
"libcouchbase2-libevent_2.9.5-1_amd64.deb "
"libcouchbase-dev_2.9.5-1_amd64.deb "
"libcouchbase2-bin_2.9.5-1_amd64.deb"]},
{"version": "2.10.0",
"os": "ubuntu",
"package": "libcouchbase-2.10.0_ubuntu1804_amd64",
"package_path": "libcouchbase-2.10.0_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.10.0-1_amd64.deb "
"libcouchbase2-libevent_2.10.0-1_amd64.deb "
"libcouchbase-dev_2.10.0-1_amd64.deb "
"libcouchbase2-bin_2.10.0-1_amd64.deb"]},
{"version": "2.10.1",
"os": "ubuntu",
"package": "libcouchbase-2.10.1_ubuntu1804_amd64",
"package_path": "libcouchbase-2.10.1_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.10.1-1_amd64.deb "
"libcouchbase2-libevent_2.10.1-1_amd64.deb "
"libcouchbase-dev_2.10.1-1_amd64.deb "
"libcouchbase2-bin_2.10.1-1_amd64.deb"]},
{"version": "2.10.3",
"os": "ubuntu",
"package": "libcouchbase-2.10.3_ubuntu1804_amd64",
"package_path": "libcouchbase-2.10.3_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.10.3-1_amd64.deb "
"libcouchbase2-libevent_2.10.3-1_amd64.deb "
"libcouchbase-dev_2.10.3-1_amd64.deb "
"libcouchbase2-bin_2.10.3-1_amd64.deb"]},
{"version": "2.10.4",
"os": "ubuntu",
"package": "libcouchbase-2.10.4_ubuntu1804_amd64",
"package_path": "libcouchbase-2.10.4_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.10.4-1_amd64.deb "
"libcouchbase2-libevent_2.10.4-1_amd64.deb "
"libcouchbase-dev_2.10.4-1_amd64.deb "
"libcouchbase2-bin_2.10.4-1_amd64.deb"]},
{"version": "2.10.5",
"os": "ubuntu",
"package": "libcouchbase-2.10.5_ubuntu1804_amd64",
"package_path": "libcouchbase-2.10.5_ubuntu1804_amd64",
"format": "tar",
"install_cmds": [
"grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase2-core_2.10.5-1_amd64.deb "
"libcouchbase2-libevent_2.10.5-1_amd64.deb "
"libcouchbase-dev_2.10.5-1_amd64.deb "
"libcouchbase2-bin_2.10.5-1_amd64.deb"]},
{"version": "3.0.0",
"os": "ubuntu",
"package": "libcouchbase-3.0.0_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.0.0_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.0.0-1_amd64.deb "
"libcouchbase3-libevent_3.0.0-1_amd64.deb "
"libcouchbase-dbg_3.0.0-1_amd64.deb "
"libcouchbase3-libev_3.0.0-1_amd64.deb "
"libcouchbase3-tools_3.0.0-1_amd64.deb "
"libcouchbase-dev_3.0.0-1_amd64.deb"]},
{"version": "3.0.1",
"os": "ubuntu",
"package": "libcouchbase-3.0.1_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.0.1_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.0.1-1_amd64.deb "
"libcouchbase3-libevent_3.0.1-1_amd64.deb "
"libcouchbase-dbg_3.0.1-1_amd64.deb "
"libcouchbase3-libev_3.0.1-1_amd64.deb "
"libcouchbase3-tools_3.0.1-1_amd64.deb "
"libcouchbase-dev_3.0.1-1_amd64.deb"]},
{"version": "3.0.2",
"os": "ubuntu",
"package": "libcouchbase-3.0.2_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.0.2_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.0.2-1_amd64.deb "
"libcouchbase3-libevent_3.0.2-1_amd64.deb "
"libcouchbase-dbg_3.0.2-1_amd64.deb "
"libcouchbase3-libev_3.0.2-1_amd64.deb "
"libcouchbase3-tools_3.0.2-1_amd64.deb "
"libcouchbase-dev_3.0.2-1_amd64.deb"]},
{"version": "3.0.7",
"os": "ubuntu",
"package": "libcouchbase-3.0.7_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.0.7_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.0.7-1_amd64.deb "
"libcouchbase3-libevent_3.0.7-1_amd64.deb "
"libcouchbase-dbg_3.0.7-1_amd64.deb "
"libcouchbase3-libev_3.0.7-1_amd64.deb "
"libcouchbase3-tools_3.0.7-1_amd64.deb "
"libcouchbase-dev_3.0.7-1_amd64.deb"]},
{"version": "3.2.0",
"os": "ubuntu",
"package": "libcouchbase-3.2.0_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.2.0_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.2.0-1_amd64.deb "
"libcouchbase3-libevent_3.2.0-1_amd64.deb "
"libcouchbase-dbg_3.2.0-1_amd64.deb "
"libcouchbase3-libev_3.2.0-1_amd64.deb "
"libcouchbase3-tools_3.2.0-1_amd64.deb "
"libcouchbase-dev_3.2.0-1_amd64.deb"]},
{"version": "3.2.2",
"os": "ubuntu",
"package": "libcouchbase-3.2.2_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.2.2_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.2.2-1_amd64.deb "
"libcouchbase3-libevent_3.2.2-1_amd64.deb "
"libcouchbase-dbg_3.2.2-1_amd64.deb "
"libcouchbase3-libev_3.2.2-1_amd64.deb "
"libcouchbase3-tools_3.2.2-1_amd64.deb "
"libcouchbase-dev_3.2.2-1_amd64.deb"]},
{"version": "3.2.3",
"os": "ubuntu",
"package": "libcouchbase-3.2.3_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.2.3_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.2.3-1_amd64.deb "
"libcouchbase3-libevent_3.2.3-1_amd64.deb "
"libcouchbase-dbg_3.2.3-1_amd64.deb "
"libcouchbase3-libev_3.2.3-1_amd64.deb "
"libcouchbase3-tools_3.2.3-1_amd64.deb "
"libcouchbase-dev_3.2.3-1_amd64.deb"]},
{"version": "3.2.4",
"os": "ubuntu",
"package": "libcouchbase-3.2.4_ubuntu1804_bionic_amd64",
"package_path": "libcouchbase-3.2.4_ubuntu1804_bionic_amd64",
"format": "tar",
"install_cmds":
["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/ bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install libevent-core-2.1 libev4 -y ",
"sudo dpkg -i libcouchbase3_3.2.4-1_amd64.deb "
"libcouchbase3-libevent_3.2.4-1_amd64.deb "
"libcouchbase-dbg_3.2.4-1_amd64.deb "
"libcouchbase3-libev_3.2.4-1_amd64.deb "
"libcouchbase3-tools_3.2.4-1_amd64.deb "
"libcouchbase-dev_3.2.4-1_amd64.deb"]}
]
LCB_CUSTOM_DEPS = {
'3.0.0':
{'ubuntu': ["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/"
" bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/"
" bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install "
"libevent-core-2.1 libev4 -y "]
},
'3.2.0':
{'ubuntu': ["grep -qxF "
"'deb http://us.archive.ubuntu.com/ubuntu/"
" bionic main restricted' "
"/etc/apt/sources.list || echo "
"'deb http://us.archive.ubuntu.com/ubuntu/"
" bionic main restricted' "
">> /etc/apt/sources.list",
"sudo apt-get update -y",
"sudo apt-get install "
"libevent-core-2.1 libev4 -y "]
}
}
def version_tuple(version: str):
return tuple(int(n) for n in re_split('\\.|-', version))
class ClientInstaller:
def __init__(self, cluster_spec, test_config, options):
self.test_config = test_config
self.cluster_spec = cluster_spec
self.client_settings = self.test_config.client_settings.__dict__
self.options = options
self.remote = RemoteHelper(self.cluster_spec, options.verbose)
self.client_os = RemoteHelper.detect_client_os(self.cluster_spec.workers[0],
self.cluster_spec).lower()
self.rest = RestHelper(self.cluster_spec, self.test_config, options.verbose)
self.cb_version = version_tuple(self.rest.get_version(host=next(self.cluster_spec.masters)))
@all_clients
def detect_libcouchbase_versions(self):
return run("cbc version 2>&1 | head -n 2 | tail -n 1 | "
"awk -F ' ' '{ print $2 }' | "
"awk -F '=' '{ print $2 }' | "
"awk -F ',' '{ print $1 }'")
def detect_python_client_version(self):
return local("env/bin/pip freeze | grep ^couchbase | awk -F '==|@' '{ print $2 }'",
capture=True)
@all_clients
def uninstall_lcb(self):
# if any libcouchbase packages are installed, uninstall them; otherwise do nothing
run("(dpkg-query -l | grep -q libcouchbase) && apt-get remove 'libcouchbase*' -y || :")
@all_clients
def install_libcouchbase(self, version: str):
client_package_info = None
for package_info in LIBCOUCHBASE_PACKAGES:
if package_info["version"] == version and package_info["os"] == self.client_os:
client_package_info = package_info
if client_package_info is None:
raise Exception("invalid client version or os")
package = client_package_info['package']
package_path = client_package_info['package_path']
package_format = client_package_info['format']
package_version = client_package_info['version']
install_cmds = client_package_info['install_cmds']
os_version = run('cat /etc/os-release | grep UBUNTU_CODENAME')
os_version = os_version.split('=')[1]
if os_version == 'bionic':
package = package.replace('ubuntu1604', 'ubuntu1804')
package = package.replace('xenial', 'bionic')
package_path = package_path.replace('ubuntu1604', 'ubuntu1804')
package_path = package_path.replace('xenial', 'bionic')
with cd('/tmp'):
run("rm -rf {}*".format(package))
run("wget {}/{}/{}.{}".format(LIBCOUCHBASE_BASE_URL, package_version, package,
package_format))
run("tar xf {}.{}".format(package, package_format))
with cd("/tmp/{}".format(package_path)):
for cmd in install_cmds:
run(cmd)
@all_clients
def install_lcb_from_commit(self, version: str):
_, version, commit_id = version.split(":")
dep_cmds = LCB_CUSTOM_DEPS[version][self.client_os]
for cmd in dep_cmds:
run(cmd)
with cd('/tmp'):
run("rm -rf libcouchbase_custom")
run("mkdir libcouchbase_custom")
with cd('/tmp/libcouchbase_custom'):
run('git clone https://github.com/couchbase/libcouchbase.git')
with cd('/tmp/libcouchbase_custom/libcouchbase'):
run('git checkout {}'.format(commit_id))
run('mkdir build')
with cd('/tmp/libcouchbase_custom/libcouchbase/build'):
run('apt-get install cmake libevent-dev libevent-core-2.1 libev4 -y')
run('../cmake/configure')
run('make')
def install_python_client(self, version: str):
if not ('review.couchbase.org' in version or 'github.com' in version):
version = "couchbase=={}".format(version)
local("env/bin/pip install {} --no-cache-dir".format(version))
def install(self):
lcb_version = self.client_settings['libcouchbase']
py_version = self.client_settings['python_client']
logger.info("Desired clients: lcb={}, py={}".format(lcb_version, py_version))
mb45563_is_hit = self.cb_version >= (7, 1, 0, 1745) and self.cb_version < (7, 1, 0, 1807)
if not py_version:
logger.info("No python SDK version provided. "
"Defaulting to version specified in requirements.txt")
elif mb45563_is_hit and version_tuple(py_version) < (3, 2, 0):
# SDK compatibility changed with 7.1.0-1745
# see https://issues.couchbase.com/browse/MB-45563
logger.warn("python SDK >= 3.2.0 required for Couchbase Server builds "
"7.1.0-1745 <= (build) < 7.1.0-1807. "
"Upgrading python SDK version to 3.2.3")
py_version = "3.2.3"
if not lcb_version:
logger.info("No libcouchbase version provided. Uninstalling libcouchbase.")
self.uninstall_lcb()
elif mb45563_is_hit and version_tuple(lcb_version) < (3, 2, 0):
# SDK compatibility changed with 7.1.0-1745
# see https://issues.couchbase.com/browse/MB-45563
logger.warn("libcouchbase >= 3.2.0 required for Couchbase Server builds "
"7.1.0-1745 <= (build) < 7.1.0-1807. "
"Upgrading libcouchbase version to 3.2.3")
lcb_version = "3.2.3"
if py_version and py_version.split('.')[0] == "2" and \
lcb_version and lcb_version.split('.')[0] != "2":
raise Exception("libcouchbase version 2.x.x must be specified when python_client=2.x.x")
# Install LCB
if lcb_version:
installed_versions = self.detect_libcouchbase_versions()
if any(v != lcb_version for v in installed_versions.values()):
if any(installed_versions.values()):
logger.info("Uninstalling libcouchbase")
self.uninstall_lcb()
else:
logger.info("libcouchbase is not installed")
logger.info("Installing libcouchbase {}".format(lcb_version))
if 'commit' in lcb_version:
self.install_lcb_from_commit(lcb_version)
else:
self.install_libcouchbase(lcb_version)
else:
logger.info("Clients already have desired libcouchbase versions.")
detected = self.detect_libcouchbase_versions()
for ip, version in detected.items():
logger.info("\t{}:\t{}".format(ip, version))
# Install Python SDK
if py_version:
logger.info("Installing python_client {}".format(py_version))
self.install_python_client(py_version)
detected = self.detect_python_client_version()
logger.info("Python client detected (pip freeze): {}"
.format(detected))
def get_args():
parser = ArgumentParser()
parser.add_argument('-c', '--cluster', dest='cluster_spec_fname',
required=True,
help='path to the cluster specification file')
parser.add_argument('-t', '--test', dest='test_config_fname',
required=True,
help='path to test test configuration file')
parser.add_argument('--verbose', dest='verbose',
action='store_true',
help='enable verbose logging')
parser.add_argument('override',
nargs='*',
help='custom cluster settings')
return parser.parse_args()
def main():
args = get_args()
cluster_spec = ClusterSpec()
cluster_spec.parse(args.cluster_spec_fname, override=args.override)
test_config = TestConfig()
test_config.parse(args.test_config_fname, override=args.override)
client_installer = ClientInstaller(cluster_spec, test_config, args)
client_installer.install()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env ambari-python-wrap
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from math import ceil
from stack_advisor import DefaultStackAdvisor
class BaseBIGTOP08StackAdvisor(DefaultStackAdvisor):
def getComponentLayoutValidations(self, services, hosts):
"""Returns array of Validation objects about issues with hostnames components assigned to"""
items = []
# Validating NAMENODE and SECONDARY_NAMENODE are on different hosts if possible
hostsSet = set(super(BaseBIGTOP08StackAdvisor, self).getActiveHosts([host["Hosts"] for host in hosts["items"]]))
hostsCount = len(hostsSet)
componentsListList = [service["components"] for service in services["services"]]
componentsList = [item for sublist in componentsListList for item in sublist]
nameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "NAMENODE"]
secondaryNameNodeHosts = [component["StackServiceComponents"]["hostnames"] for component in componentsList if component["StackServiceComponents"]["component_name"] == "SECONDARY_NAMENODE"]
# Validating cardinality
for component in componentsList:
if component["StackServiceComponents"]["cardinality"] is not None:
componentName = component["StackServiceComponents"]["component_name"]
componentDisplayName = component["StackServiceComponents"]["display_name"]
componentHosts = []
if component["StackServiceComponents"]["hostnames"] is not None:
componentHosts = [componentHost for componentHost in component["StackServiceComponents"]["hostnames"] if componentHost in hostsSet]
componentHostsCount = len(componentHosts)
cardinality = str(component["StackServiceComponents"]["cardinality"])
# cardinality types: null, 1+, 1-2, 1, ALL
message = None
if "+" in cardinality:
hostsMin = int(cardinality[:-1])
if componentHostsCount < hostsMin:
message = "At least {0} {1} components should be installed in cluster.".format(hostsMin, componentDisplayName)
elif "-" in cardinality:
nums = cardinality.split("-")
hostsMin = int(nums[0])
hostsMax = int(nums[1])
if componentHostsCount > hostsMax or componentHostsCount < hostsMin:
message = "Between {0} and {1} {2} components should be installed in cluster.".format(hostsMin, hostsMax, componentDisplayName)
elif "ALL" == cardinality:
if componentHostsCount != hostsCount:
message = "{0} component should be installed on all hosts in cluster.".format(componentDisplayName)
else:
if componentHostsCount != int(cardinality):
message = "Exactly {0} {1} components should be installed in cluster.".format(int(cardinality), componentDisplayName)
if message is not None:
items.append({"type": 'host-component', "level": 'ERROR', "message": message, "component-name": componentName})
# Validating host-usage
usedHostsListList = [component["StackServiceComponents"]["hostnames"] for component in componentsList if not self.isComponentNotValuable(component)]
usedHostsList = [item for sublist in usedHostsListList for item in sublist]
nonUsedHostsList = [item for item in hostsSet if item not in usedHostsList]
for host in nonUsedHostsList:
items.append( { "type": 'host-component', "level": 'ERROR', "message": 'Host is not used', "host": str(host) } )
return items
def getServiceConfigurationRecommenderDict(self):
return {
"YARN": self.recommendYARNConfigurations,
"MAPREDUCE2": self.recommendMapReduce2Configurations
}
def putProperty(self, config, configType):
config[configType] = {"properties": {}}
def appendProperty(key, value):
config[configType]["properties"][key] = str(value)
return appendProperty
def recommendYARNConfigurations(self, configurations, clusterData):
putYarnProperty = self.putProperty(configurations, "yarn-site")
putYarnProperty('yarn.nodemanager.resource.memory-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
putYarnProperty('yarn.scheduler.minimum-allocation-mb', int(clusterData['ramPerContainer']))
putYarnProperty('yarn.scheduler.maximum-allocation-mb', int(round(clusterData['containers'] * clusterData['ramPerContainer'])))
def recommendMapReduce2Configurations(self, configurations, clusterData):
putMapredProperty = self.putProperty(configurations, "mapred-site")
putMapredProperty('yarn.app.mapreduce.am.resource.mb', int(clusterData['amMemory']))
putMapredProperty('yarn.app.mapreduce.am.command-opts', "-Xmx" + str(int(round(0.8 * clusterData['amMemory']))) + "m")
putMapredProperty('mapreduce.map.memory.mb', clusterData['mapMemory'])
putMapredProperty('mapreduce.reduce.memory.mb', int(clusterData['reduceMemory']))
putMapredProperty('mapreduce.map.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['mapMemory']))) + "m")
putMapredProperty('mapreduce.reduce.java.opts', "-Xmx" + str(int(round(0.8 * clusterData['reduceMemory']))) + "m")
putMapredProperty('mapreduce.task.io.sort.mb', min(int(round(0.4 * clusterData['mapMemory'])), 1024))
def getConfigurationClusterSummary(self, servicesList, hosts, components, services):
hBaseInstalled = False
if 'HBASE' in servicesList:
hBaseInstalled = True
cluster = {
"cpu": 0,
"disk": 0,
"ram": 0,
"hBaseInstalled": hBaseInstalled,
"components": components
}
if len(hosts["items"]) > 0:
host = hosts["items"][0]["Hosts"]
cluster["cpu"] = host["cpu_count"]
cluster["disk"] = len(host["disk_info"])
cluster["ram"] = int(host["total_mem"] / (1024 * 1024))
ramRecommendations = [
{"os":1, "hbase":1},
{"os":2, "hbase":1},
{"os":2, "hbase":2},
{"os":4, "hbase":4},
{"os":6, "hbase":8},
{"os":8, "hbase":8},
{"os":8, "hbase":8},
{"os":12, "hbase":16},
{"os":24, "hbase":24},
{"os":32, "hbase":32},
{"os":64, "hbase":64}
]
index = {
cluster["ram"] <= 4: 0,
4 < cluster["ram"] <= 8: 1,
8 < cluster["ram"] <= 16: 2,
16 < cluster["ram"] <= 24: 3,
24 < cluster["ram"] <= 48: 4,
48 < cluster["ram"] <= 64: 5,
64 < cluster["ram"] <= 72: 6,
72 < cluster["ram"] <= 96: 7,
96 < cluster["ram"] <= 128: 8,
128 < cluster["ram"] <= 256: 9,
256 < cluster["ram"]: 10
}[1]
cluster["reservedRam"] = ramRecommendations[index]["os"]
cluster["hbaseRam"] = ramRecommendations[index]["hbase"]
cluster["minContainerSize"] = {
cluster["ram"] <= 4: 256,
4 < cluster["ram"] <= 8: 512,
8 < cluster["ram"] <= 24: 1024,
24 < cluster["ram"]: 2048
}[1]
totalAvailableRam = cluster["ram"] - cluster["reservedRam"]
if cluster["hBaseInstalled"]:
totalAvailableRam -= cluster["hbaseRam"]
cluster["totalAvailableRam"] = max(2048, totalAvailableRam * 1024)
'''containers = max(3, min (2*cores,min (1.8*DISKS,(Total available RAM) / MIN_CONTAINER_SIZE))))'''
cluster["containers"] = round(max(3,
min(2 * cluster["cpu"],
min(ceil(1.8 * cluster["disk"]),
cluster["totalAvailableRam"] / cluster["minContainerSize"]))))
'''ramPerContainers = max(2GB, RAM - reservedRam - hBaseRam) / containers'''
cluster["ramPerContainer"] = abs(cluster["totalAvailableRam"] / cluster["containers"])
'''If greater than 1GB, value will be in multiples of 512.'''
if cluster["ramPerContainer"] > 1024:
cluster["ramPerContainer"] = int(cluster["ramPerContainer"] / 512) * 512
cluster["mapMemory"] = int(cluster["ramPerContainer"])
cluster["reduceMemory"] = cluster["ramPerContainer"]
cluster["amMemory"] = max(cluster["mapMemory"], cluster["reduceMemory"])
return cluster
def getConfigurationsValidationItems(self, services, hosts):
"""Returns array of Validation objects about issues with configuration values provided in services"""
items = []
recommendations = self.recommendConfigurations(services, hosts)
recommendedDefaults = recommendations["recommendations"]["blueprint"]["configurations"]
configurations = services["configurations"]
for service in services["services"]:
serviceName = service["StackServices"]["service_name"]
validator = self.validateServiceConfigurations(serviceName)
if validator is not None:
siteName = validator[0]
method = validator[1]
if siteName in recommendedDefaults:
siteProperties = getSiteProperties(configurations, siteName)
if siteProperties is not None:
resultItems = method(siteProperties, recommendedDefaults[siteName]["properties"], configurations)
items.extend(resultItems)
return items
def getServiceConfigurationValidators(self):
return {
"MAPREDUCE2": ["mapred-site", self.validateMapReduce2Configurations],
"YARN": ["yarn-site", self.validateYARNConfigurations]
}
def validateServiceConfigurations(self, serviceName):
return self.getServiceConfigurationValidators().get(serviceName, None)
def toConfigurationValidationProblems(self, validationProblems, siteName):
result = []
for validationProblem in validationProblems:
validationItem = validationProblem.get("item", None)
if validationItem is not None:
problem = {"type": 'configuration', "level": validationItem["level"], "message": validationItem["message"],
"config-type": siteName, "config-name": validationProblem["config-name"] }
result.append(problem)
return result
def getWarnItem(self, message):
return {"level": "WARN", "message": message}
def getErrorItem(self, message):
return {"level": "ERROR", "message": message}
def validatorLessThenDefaultValue(self, properties, recommendedDefaults, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = to_number(properties[propertyName])
if value is None:
return self.getErrorItem("Value should be integer")
defaultValue = to_number(recommendedDefaults[propertyName])
if defaultValue is None:
return None
if value < defaultValue:
return self.getWarnItem("Value is less than the recommended default of {0}".format(defaultValue))
return None
def validateXmxValue(self, properties, recommendedDefaults, propertyName):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
value = properties[propertyName]
defaultValue = recommendedDefaults[propertyName]
if defaultValue is None:
return self.getErrorItem("Config's default value can't be null or undefined")
if not checkXmxValueFormat(value):
return self.getErrorItem('Invalid value format')
valueInt = formatXmxSizeToBytes(getXmxSize(value))
defaultValueXmx = getXmxSize(defaultValue)
defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
if valueInt < defaultValueInt:
return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
return None
def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
{"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
{"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
{"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
{"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
{"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
{"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')} ]
return self.toConfigurationValidationProblems(validationItems, "mapred-site")
def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
{"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
{"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def getMastersWithMultipleInstances(self):
return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
def getNotPreferableOnServerComponents(self):
return ['GANGLIA_SERVER']
def getCardinalitiesDict(self, hosts):
return {
'ZOOKEEPER_SERVER': {"min": 3},
'HBASE_MASTER': {"min": 1},
}
def getComponentLayoutSchemes(self):
return {
'NAMENODE': {"else": 0},
'SECONDARY_NAMENODE': {"else": 1},
'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
'HISTORYSERVER': {31: 1, "else": 2},
'RESOURCEMANAGER': {31: 1, "else": 2},
'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
}
class BIGTOP08StackAdvisor(BaseBIGTOP08StackAdvisor):
def getServiceConfigurationRecommenderDict(self):
parentRecommendConfDict = super(BIGTOP08StackAdvisor, self).getServiceConfigurationRecommenderDict()
childRecommendConfDict = {
"OOZIE": self.recommendOozieConfigurations,
"HIVE": self.recommendHiveConfigurations,
"TEZ": self.recommendTezConfigurations
}
parentRecommendConfDict.update(childRecommendConfDict)
return parentRecommendConfDict
def recommendOozieConfigurations(self, configurations, clusterData, services, hosts):
if "FALCON_SERVER" in clusterData["components"]:
putMapredProperty = self.putProperty(configurations, "oozie-site")
putMapredProperty("oozie.services.ext",
"org.apache.oozie.service.JMSAccessorService," +
"org.apache.oozie.service.PartitionDependencyManagerService," +
"org.apache.oozie.service.HCatAccessorService")
def recommendHiveConfigurations(self, configurations, clusterData, services, hosts):
containerSize = clusterData['mapMemory'] if clusterData['mapMemory'] > 2048 else int(clusterData['reduceMemory'])
containerSize = min(clusterData['containers'] * clusterData['ramPerContainer'], containerSize)
putHiveProperty = self.putProperty(configurations, "hive-site")
putHiveProperty('hive.auto.convert.join.noconditionaltask.size', int(round(containerSize / 3)) * 1048576)
putHiveProperty('hive.tez.java.opts', "-server -Xmx" + str(int(round((0.8 * containerSize) + 0.5)))
+ "m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA -XX:+UseParallelGC")
putHiveProperty('hive.tez.container.size', containerSize)
def recommendTezConfigurations(self, configurations, clusterData, services, hosts):
putTezProperty = self.putProperty(configurations, "tez-site")
putTezProperty("tez.am.resource.memory.mb", int(clusterData['amMemory']))
putTezProperty("tez.am.java.opts",
"-server -Xmx" + str(int(0.8 * clusterData["amMemory"]))
+ "m -Djava.net.preferIPv4Stack=true -XX:+UseNUMA -XX:+UseParallelGC")
def getNotPreferableOnServerComponents(self):
return ['STORM_UI_SERVER', 'DRPC_SERVER', 'STORM_REST_API', 'NIMBUS', 'GANGLIA_SERVER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR', 'APP_TIMELINE_SERVER']
def getComponentLayoutSchemes(self):
parentSchemes = super(BIGTOP08StackAdvisor, self).getComponentLayoutSchemes()
childSchemes = {
'APP_TIMELINE_SERVER': {31: 1, "else": 2},
'FALCON_SERVER': {6: 1, 31: 2, "else": 3}
}
parentSchemes.update(childSchemes)
return parentSchemes
def getServiceConfigurationValidators(self):
parentValidators = super(BIGTOP08StackAdvisor, self).getServiceConfigurationValidators()
childValidators = {
"HIVE": ["hive-site", self.validateHiveConfigurations],
"TEZ": ["tez-site", self.validateTezConfigurations]
}
parentValidators.update(childValidators)
return parentValidators
def validateHiveConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'hive.tez.container.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.tez.container.size')},
{"config-name": 'hive.tez.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'hive.tez.java.opts')},
{"config-name": 'hive.auto.convert.join.noconditionaltask.size', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hive.auto.convert.join.noconditionaltask.size')} ]
return self.toConfigurationValidationProblems(validationItems, "hive-site")
def validateTezConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'tez.am.resource.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'tez.am.resource.memory.mb')},
{"config-name": 'tez.am.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'tez.am.java.opts')} ]
return self.toConfigurationValidationProblems(validationItems, "tez-site")
# Validation helper methods
def getSiteProperties(configurations, siteName):
siteConfig = configurations.get(siteName)
if siteConfig is None:
return None
return siteConfig.get("properties")
def to_number(s):
try:
return int(re.sub("\D", "", s))
except ValueError:
return None
def checkXmxValueFormat(value):
p = re.compile('-Xmx(\d+)(b|k|m|g|p|t|B|K|M|G|P|T)?')
matches = p.findall(value)
return len(matches) == 1
def getXmxSize(value):
p = re.compile("-Xmx(\d+)(.?)")
result = p.findall(value)[0]
if len(result) > 1:
# result[1] - is a space or size formatter (b|k|m|g etc)
return result[0] + result[1].lower()
return result[0]
def formatXmxSizeToBytes(value):
value = value.lower()
if len(value) == 0:
return 0
modifier = value[-1]
if modifier == ' ' or modifier in "0123456789":
modifier = 'b'
m = {
modifier == 'b': 1,
modifier == 'k': 1024,
modifier == 'm': 1024 * 1024,
modifier == 'g': 1024 * 1024 * 1024,
modifier == 't': 1024 * 1024 * 1024 * 1024,
modifier == 'p': 1024 * 1024 * 1024 * 1024 * 1024
}[1]
return to_number(value) * m
def getPort(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None:
return int(m.group(2))
else:
return None
def isSecurePort(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
|
|
import unittest
from unittest import mock
import tethys_cli.manage_commands as manage_commands
from tethys_cli.manage_commands import (
MANAGE_START,
MANAGE_COLLECTSTATIC,
MANAGE_COLLECTWORKSPACES,
MANAGE_COLLECT,
MANAGE_CREATESUPERUSER
)
class TestManageCommands(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_start(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_START, port='8080')
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# check the values from the argument list
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('runserver', process_call_args[0][0][0][2])
self.assertEqual('8080', process_call_args[0][0][0][3])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_start_with_no_port(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_START, port='')
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# check the values from the argument list
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('runserver', process_call_args[0][0][0][2])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collectstatic(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECTSTATIC, port='8080', noinput=False)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# intermediate process
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('pre_collectstatic', process_call_args[0][0][0][2])
# primary process
self.assertEqual('python', process_call_args[1][0][0][0])
self.assertIn('manage.py', process_call_args[1][0][0][1])
self.assertEqual('collectstatic', process_call_args[1][0][0][2])
self.assertNotIn('--noinput', process_call_args[1][0][0])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collectstatic_with_no_input(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECTSTATIC, port='8080', noinput=True)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# intermediate process
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('pre_collectstatic', process_call_args[0][0][0][2])
# primary process
self.assertEqual('python', process_call_args[1][0][0][0])
self.assertIn('manage.py', process_call_args[1][0][0][1])
self.assertEqual('collectstatic', process_call_args[1][0][0][2])
self.assertEqual('--noinput', process_call_args[1][0][0][3])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collect_workspace(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECTWORKSPACES, port='8080', force=True)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# check the values from the argument list
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('collectworkspaces', process_call_args[0][0][0][2])
self.assertEqual('--force', process_call_args[0][0][0][3])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collect_workspace_with_no_force(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECTWORKSPACES, force=False)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# check the values from the argument list
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('collectworkspaces', process_call_args[0][0][0][2])
self.assertNotIn('--force', process_call_args[0][0][0])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collect(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECT, port='8080', noinput=False)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# pre_collectstatic
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('pre_collectstatic', process_call_args[0][0][0][2])
# collectstatic
self.assertEqual('python', process_call_args[1][0][0][0])
self.assertIn('manage.py', process_call_args[1][0][0][1])
self.assertEqual('collectstatic', process_call_args[1][0][0][2])
self.assertNotIn('--noinput', process_call_args[1][0][0])
# collectworkspaces
self.assertEqual('python', process_call_args[2][0][0][0])
self.assertIn('manage.py', process_call_args[2][0][0][1])
self.assertEqual('collectworkspaces', process_call_args[2][0][0][2])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_collect_no_input(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_COLLECT, port='8080', noinput=True)
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# pre_collectstatic
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('pre_collectstatic', process_call_args[0][0][0][2])
# collectstatic
self.assertEqual('python', process_call_args[1][0][0][0])
self.assertIn('manage.py', process_call_args[1][0][0][1])
self.assertEqual('collectstatic', process_call_args[1][0][0][2])
self.assertEqual('--noinput', process_call_args[1][0][0][3])
# collectworkspaces
self.assertEqual('python', process_call_args[2][0][0][0])
self.assertIn('manage.py', process_call_args[2][0][0][1])
self.assertEqual('collectworkspaces', process_call_args[2][0][0][2])
@mock.patch('tethys_cli.manage_commands.run_process')
def test_manage_command_manage_manage_create_super_user(self, mock_run_process):
# mock the input args
args = mock.MagicMock(manage='', command=MANAGE_CREATESUPERUSER, port='8080')
# call the testing method with the mock args
manage_commands.manage_command(args)
# get the call arguments for the run process mock method
process_call_args = mock_run_process.call_args_list
# check the values from the argument list
self.assertEqual('python', process_call_args[0][0][0][0])
self.assertIn('manage.py', process_call_args[0][0][0][1])
self.assertEqual('createsuperuser', process_call_args[0][0][0][2])
|
|
# Copyright (c) 2013 Marion Zepf
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Python export tool """
import ast
from gettext import gettext as _
from os import linesep
from os import path, pardir
import re
import traceback
import util.codegen as codegen
# from ast_pprint import * # only used for debugging, safe to comment out
from talogo import LogoCode
from taprimitive import (ast_yield_true, Primitive, PyExportError,
value_to_ast)
from tautils import (find_group, find_top_block, get_stack_name)
from tawindow import plugins_in_use
_SETUP_CODE_START = """\
#!/usr/bin/env python
# -*- coding: utf-8 -*-
_INSTALL_PATH = '/usr/share/sugar/activities/TurtleArt.activity'
_ALTERNATIVE_INSTALL_PATH = \\
'/usr/local/share/sugar/activities/TurtleArt.activity'
import os, sys, dbus
paths = []
paths.append('../%s.activity')
paths.append(os.path.expanduser('~') + '/Activities/%s.activity')
paths.append('/usr/share/sugar/activities/%s.activity')
paths.append('/usr/local/share/sugar/activities/%s.activity')
paths.append(
'/home/broot/sugar-build/build/install/share/sugar/activities/%s.activity')
""" + \
"paths.append('%s')" % \
path.abspath(path.join(path.dirname(__file__), pardir)) + \
"""\
flag = False
for path in paths:
for activity in ['TurtleBots', 'TurtleBlocks']:
p = (path % activity) if "%" in path else path
if os.path.exists(p) and p not in sys.path:
flag = True
sys.path.insert(0, p)
if not flag:
print 'This code require the Turtle Blocks/Bots activity to be installed.'
exit(1)
from time import *
from random import uniform
from math import *
from pyexported.window_setup import *
tw = get_tw()
BOX = {}
ACTION = {}
global_objects = None
turtles = None
canvas = None
logo = None
"""
_SETUP_CODE_END = """\
if __name__ == '__main__':
tw.lc.start_time = time()
tw.lc.icall(start)
gobject.idle_add(tw.lc.doevalstep)
gtk.main()
"""
_ACTION_STACK_START = """\
def %s():
"""
_START_STACK_START_ADD = """\
tw.start_plugins()
global global_objects,turtles,canvas,logo
global_objects = tw.get_global_objects()
turtles = tw.turtles
canvas = tw.canvas
logo = tw.lc
logo.boxes = BOX
"""
_ACTION_STACK_PREAMBLE = """\
turtle = turtles.get_active_turtle()
"""
_ACTION_STACK_END = """\
ACTION["%s"] = %s
"""
# character that is illegal in a Python identifier
PAT_IDENTIFIER_ILLEGAL_CHAR = re.compile("[^A-Za-z0-9_]")
def save_python(tw):
""" Find all the action stacks and turn each into Python code """
all_blocks = tw.just_blocks()
blocks_name = []
for block in all_blocks:
blocks_name.append(block.name)
if 'start' not in blocks_name:
return None
blocks_covered = set()
tops_of_stacks = []
for block in all_blocks:
if block not in blocks_covered:
top = find_top_block(block)
tops_of_stacks.append(top)
block_stack = find_group(top)
blocks_covered.update(set(block_stack))
snippets = [_SETUP_CODE_START]
for k in plugins_in_use:
snippets.append('%s = None\n' % (k.lower(),))
snippets.append('\n')
for block in tops_of_stacks:
stack_name = get_stack_name(block)
if stack_name:
pythoncode = _action_stack_to_python(block, tw, name=stack_name)
snippets.append(pythoncode)
snippets.append(linesep)
snippets.append(_SETUP_CODE_END)
return ''.join(snippets)
def _action_stack_to_python(block, tw, name='start'):
""" Turn a stack of blocks into Python code
name -- the name of the action stack (defaults to "start") """
if isinstance(name, int):
name = float(name)
if not isinstance(name, basestring):
name = str(name)
# traverse the block stack and get the AST for every block
ast_list = _walk_action_stack(block, tw.lc)
if not ast_list or not isinstance(ast_list[-1], ast.Yield):
ast_list.append(ast_yield_true())
action_stack_ast = ast.Module(body=ast_list)
# serialize the ASTs into python code
generated_code = codegen.to_source(action_stack_ast)
# wrap the action stack setup code around everything
name_id = _make_identifier(name)
if name == 'start':
pre_preamble = _START_STACK_START_ADD
for k in plugins_in_use:
pre_preamble += ' global %s\n' % (k.lower(),)
pre_preamble += " %s = global_objects['%s']\n" % (k.lower(), k)
else:
pre_preamble = ''
generated_code = _indent(generated_code, 1)
if generated_code.endswith(linesep):
newline = ''
else:
newline = linesep
snippets = [_ACTION_STACK_START % (name_id),
pre_preamble,
_ACTION_STACK_PREAMBLE,
generated_code,
newline,
_ACTION_STACK_END % (name, name_id)]
return ''.join(snippets)
def _walk_action_stack(top_block, lc, convert_me=True):
""" Turn a stack of blocks into a list of ASTs
convert_me -- convert values and Primitives to ASTs or return them
unconverted? """
block = top_block
# value blocks don't have a primitive
# (but constant blocks (colors, screen dimensions, etc.) do)
if block.is_value_block():
raw_value = block.get_value(add_type_prefix=False)
if convert_me:
value_ast = value_to_ast(raw_value)
if value_ast is not None:
return [value_ast]
else:
return []
else:
if raw_value is not None:
return [raw_value]
else:
return []
def _get_prim(block):
prim = lc.get_prim_callable(block.primitive)
# fail gracefully if primitive is not a Primitive object
if not isinstance(prim, Primitive):
raise PyExportError(_('block is not exportable'), block=block)
return prim
prim = _get_prim(block)
ast_list = []
arg_asts = []
def _finish_off(block, prim=None):
""" Convert block to an AST and add it to the ast_list. Raise a
PyExportError on failure. """
if prim is None:
prim = _get_prim(block)
if convert_me:
if prim.export_me:
try:
new_ast = prim.get_ast(*arg_asts)
except ValueError:
traceback.print_exc()
raise PyExportError(_('error while exporting block'),
block=block)
if isinstance(new_ast, (list, tuple)):
ast_list.extend(new_ast)
elif new_ast is not None:
ast_list.append(new_ast)
elif arg_asts: # TODO do we ever get here?
new_ast = ast.List(elts=arg_asts, ctx=ast.Load)
ast_list.append(new_ast)
else:
ast_list.append((prim, ) + tuple(arg_asts))
# skip the very first dock/ connection - it's either the previous block or
# the return value of this block
dock_queue = block.docks[1:]
conn_queue = block.connections[1:]
while dock_queue and conn_queue:
dock = dock_queue.pop(0)
conn = conn_queue.pop(0)
if conn is None or dock[0] == 'unavailable':
continue
elif not dock_queue and dock[0] == 'flow':
# finish off this block
_finish_off(block, prim)
arg_asts = []
# next block
block = conn
prim = _get_prim(block)
dock_queue = block.docks[1:]
conn_queue = block.connections[1:]
else:
# embedded stack of blocks (body of conditional or loop) or
# argument block
if dock[0] == 'flow':
# body of conditional or loop
new_arg_asts = _walk_action_stack(conn, lc,
convert_me=convert_me)
if (prim == LogoCode.prim_loop and
not isinstance(new_arg_asts[-1], ast.Yield)):
new_arg_asts.append(ast_yield_true())
arg_asts.append(new_arg_asts)
else:
# argument block
new_arg_asts = _walk_action_stack(conn, lc, convert_me=False)
arg_asts.append(*new_arg_asts)
# finish off last block
_finish_off(block, prim)
return ast_list
def _make_identifier(name):
""" Turn name into a Python identifier name by replacing illegal
characters """
replaced = re.sub(PAT_IDENTIFIER_ILLEGAL_CHAR, '_', name)
# TODO find better strategy to avoid number at beginning
if re.match('[0-9]', replaced):
replaced = '_' + replaced
return replaced
def _indent(code, num_levels=1):
""" Indent each line of code with num_levels * 4 spaces
code -- some python code as a (multi-line) string """
indentation = " " * (4 * num_levels)
line_list = code.split(linesep)
new_line_list = []
for line in line_list:
new_line_list.append(indentation + line)
return linesep.join(new_line_list)
|
|
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import logging
import random
import shlex
import string
import subprocess
import tempfile
import sys
import os
from cloudify.exceptions import CommandExecutionException
from cloudify.constants import LOCAL_IP_KEY, MANAGER_IP_KEY, \
MANAGER_REST_PORT_KEY, MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY, \
MANAGER_FILE_SERVER_URL_KEY
def setup_logger(logger_name, logger_level=logging.DEBUG, handlers=None,
remove_existing_handlers=True):
"""
:param logger_name: Name of the logger.
:param logger_level: Level for the logger (not for specific handler).
:param handlers: An optional list of handlers (formatter will be
overridden); If None, only a StreamHandler for
sys.stdout will be used.
:param remove_existing_handlers: Determines whether to remove existing
handlers before adding new ones
:return: A logger instance.
:rtype: Logger
"""
logger = logging.getLogger(logger_name)
if remove_existing_handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
if not handlers:
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
handlers = [handler]
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s] '
'[%(name)s] %(message)s',
datefmt='%H:%M:%S')
for handler in handlers:
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logger_level)
return logger
def get_local_ip():
"""
Return the IP address used to connect to this machine by the management.
machine
"""
return os.environ[LOCAL_IP_KEY]
def get_manager_ip(bracketed_ipv6=True):
"""
Returns the IP address of manager inside the management network.
"""
ip = os.environ[MANAGER_IP_KEY]
if bracketed_ipv6:
if ip.count(':') >= 2:
if not ip.startswith('['):
ip = '[{ip}'.format(ip=ip)
if not ip.endswith(']'):
ip = '{ip}]'.format(ip=ip)
else:
ip = ip.strip('[]')
return ip
def get_manager_file_server_blueprints_root_url():
"""
Returns the blueprints root url in the file server.
"""
return os.environ[MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL_KEY]
def get_manager_file_server_url():
"""
Returns the manager file server base url.
"""
return os.environ[MANAGER_FILE_SERVER_URL_KEY]
def get_manager_rest_service_port():
"""
Returns the port the manager REST service is running on.
"""
return int(os.environ[MANAGER_REST_PORT_KEY])
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
"""
Generate and return a random string using upper case letters and digits.
"""
return ''.join(random.choice(chars) for x in range(size))
def create_temp_folder():
"""
Create a temporary folder.
"""
path_join = os.path.join(tempfile.gettempdir(), id_generator(5))
os.makedirs(path_join)
return path_join
def get_cosmo_properties():
return {
"management_ip": get_manager_ip(),
"ip": get_local_ip()
}
def find_type_in_kwargs(cls, all_args):
result = [v for v in all_args if isinstance(v, cls)]
if not result:
return None
if len(result) > 1:
raise RuntimeError(
"Expected to find exactly one instance of {0} in "
"kwargs but found {1}".format(cls, len(result)))
return result[0]
class LocalCommandRunner(object):
def __init__(self, logger=None, host='localhost'):
"""
:param logger: This logger will be used for
printing the output and the command.
"""
logger = logger or setup_logger('LocalCommandRunner')
self.logger = logger
self.host = host
def run(self, command,
exit_on_failure=True,
stdout_pipe=True,
stderr_pipe=True):
"""
Runs local commands.
:param command: The command to execute.
:param exit_on_failure: False to ignore failures.
:param stdout_pipe: False to not pipe the standard output.
:param stderr_pipe: False to not pipe the standard error.
:return: A wrapper object for all valuable info from the execution.
:rtype: CommandExecutionResponse
"""
self.logger.info('[{0}] run: {1}'.format(self.host, command))
shlex_split = shlex.split(command)
stdout = subprocess.PIPE if stdout_pipe else None
stderr = subprocess.PIPE if stderr_pipe else None
p = subprocess.Popen(shlex_split, stdout=stdout,
stderr=stderr)
out, err = p.communicate()
if p.returncode == 0:
if out:
self.logger.info('[{0}] out: {1}'.format(self.host, out))
else:
error = CommandExecutionException(
command=command,
code=p.returncode,
error=err,
output=out)
self.logger.error(error)
if exit_on_failure:
raise error
return CommandExecutionResponse(command=command,
std_out=out,
std_err=err,
return_code=p.returncode)
class CommandExecutionResponse(object):
"""
Wrapper object for info returned when running commands.
:param command: The command that was executed.
:param std_out: The output from the execution.
:param std_err: The error message from the execution.
:param return_code: The return code from the execution.
"""
def __init__(self, command, std_out, std_err, return_code):
self.command = command
self.std_out = std_out
self.std_err = std_err
self.return_code = return_code
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import models as djangomodels
from django.utils import translation
from django.utils.functional import lazy
from django.utils import six
import base64
"""
Translation methods
"""
def _get_details_in_lang(field, lang):
if field.translation_cache.get(lang):
return field.translation_cache[lang]
else:
settings.odoo_models[field.details['model']].cache_translation(lang)
try:
return field.translation_cache[lang]
except:
return field.details
def field_translate(field, key):
lang = translation.get_language() or "en-us"
details = _get_details_in_lang(field, lang)
res = details.get(key, "")
if isinstance(res, six.binary_type):
res = six.text_type(res)
return res
_ = lazy(field_translate, six.text_type)
def selection_translate(field):
def trans(val):
lang = translation.get_language() or "en-us"
details = _get_details_in_lang(field, lang)
return dict(details['selection'])[val]
trans_lazy = lazy(trans, six.text_type)
res = []
for val, _label in field.details.get('selection'):
res.append((val, trans_lazy(val)))
return tuple(res)
# TODO: default values
# TODO: domains
FIELDS_CONV = {
"char": "CharField",
"boolean": "BooleanField",
"integer": "IntegerField",
"text": "TextField",
"float": "DecimalField",
"date": "DateField",
"datetime": "DateTimeField",
"time": "TimeField",
"binary": "BinaryField",
"selection": "CharField",
"many2one": "ForeignKey",
"one2many": None,
# "many2many": "",
# "function": "",
# "related": "",
}
class OdooField(object):
def __init__(self, details):
self.details = details
self.translatable = details.get("translate")
self.django_field = False
self.translation_cache = {} # translations cache
return super(OdooField, self).__init__()
def to_django(self, **kwargs):
kwargs.update({
"verbose_name": _(self, 'string'),
"help_text": _(self, 'help'),
"blank": not(self.details.get("required")),
"editable": not(self.details.get("readonly")),
})
django_field = getattr(djangomodels, FIELDS_CONV[self.details["type"]])(**kwargs)
django_field.odoo_field = self
self.django_field = django_field
return django_field
def convert_data(self, data):
return data or None
def convert_back(self, data):
return data or False
class TextField(OdooField):
def to_django(self, **kwargs):
if self.details.get("required"):
kwargs["default"] = ""
kwargs["null"] = not(self.details.get("required"))
return super(TextField, self).to_django(**kwargs)
class CharField(TextField):
def to_django(self, **kwargs):
kwargs['max_length'] = self.details.get('size') or 512
return super(CharField, self).to_django(**kwargs)
class BooleanField(OdooField):
def to_django(self, **kwargs):
kwargs["default"] = False
return super(BooleanField, self).to_django(**kwargs)
def convert_data(self, data):
return data or False
class IntegerField(OdooField):
def to_django(self, **kwargs):
if self.details.get("required"):
kwargs["default"] = 0
kwargs["null"] = not(self.details.get("required"))
return super(IntegerField, self).to_django(**kwargs)
class FloatField(IntegerField):
def to_django(self, **kwargs):
if self.details.get("digits"):
kwargs["max_digits"] = self.details["digits"][0]
kwargs["decimal_places"] = self.details["digits"][1]
kwargs["null"] = not(self.details.get("required"))
return super(FloatField, self).to_django(**kwargs)
class DateField(OdooField):
def to_django(self, **kwargs):
kwargs["null"] = not(self.details.get("required"))
if self.details.get("required"):
kwargs["auto_now_add"] = True
return super(DateField, self).to_django(**kwargs)
class DateTimeField(DateField):
pass
class TimeField(DateField):
pass
class BinaryField(OdooField):
def to_django(self, **kwargs):
kwargs["null"] = not(self.details.get("required"))
return super(BinaryField, self).to_django(**kwargs)
def convert_data(self, data):
"""Odoo data is a b64-encoded string"""
return base64.b64decode(data) if data else None
def convert_back(self, data):
return base64.b64encode(data).decode("utf-8") if data else False
class SelectionField(CharField):
def to_django(self, **kwargs):
kwargs["choices"] = selection_translate(self)
return super(SelectionField, self).to_django(**kwargs)
class Many2OneField(OdooField):
"""
If the model identified by details['relation'] exists in django, then we can create the field directly.
Otherwise, we delay the field creation until the possible creation of this model.
"""
def __new__(cls, details):
if details['relation'] in settings.odoo_models:
return OdooField.__new__(cls)
else:
settings.deferred_m2o[details['relation']] = settings.deferred_m2o.get(details['relation'], [])
settings.deferred_m2o[details['relation']].append(details)
return None
def to_django(self, **kwargs):
kwargs["null"] = not(self.details.get("required"))
if self.details['relation'] == self.details['model']:
kwargs["to"] = "self"
else:
to_model = settings.odoo_models[self.details['relation']]
kwargs["to"] = to_model
return super(Many2OneField, self).to_django(**kwargs)
def convert_data(self, data):
"""
Odoo data is a pair (id, label)
We look for objects in the target model an instance having a odoo_id equal to the first
element of the pair ; if not found, we load it from Odoo
:param (tuple or False) data: the value to convert
:return (OdooModel or False): the object instance linked to this m2o field
"""
if data and isinstance(data, (list, tuple)) and len(data) == 2:
to_model = settings.odoo_models[self.details['relation']]
targets = to_model.objects.filter(odoo_id=data[0])
if targets:
return targets[0]
else:
return to_model.odoo_load([data[0]])[0]
return data or None
def convert_back(self, data):
"""
Django data is either None or a Django instance
We tranform it into False or an integer by getting the odoo_id on the instance.
:todo: if the target objet has no odoo_id, first push it to odoo
:param (OdooModel or False) data: the value to convert
:return (integer or False): the idi of the object in odoo
"""
from .models import OdooModel
if data and isinstance(data, OdooModel) and hasattr(data, 'odoo_id'):
return data.odoo_id
# elif isinstance(data, (int, long)):
# return data
else:
return False
class One2ManyField(OdooField):
"""
There is no one2many field in Django, so we simply set the "relation_field"
attribute of the foreignKey field encoding the opposite relationship so it bares
the name of this one2many field
"""
def __new__(cls, details):
if details['relation'] in settings.odoo_models:
relation = settings.odoo_models[details['relation']]
for field in relation._meta.Fields:
if field.name == details['relation_field']:
field.related_name = details['name']
else:
settings.deferred_o2m[details['relation']] = settings.deferred_o2m.get(details['relation'], [])
settings.deferred_o2m[details['relation']].append(details)
def convert_field(details):
if not(details['type'] in FIELDS_CONV):
return None
return eval(details["type"].title() + "Field")(details)
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
import inspect
import os
import re
import urllib
import uuid as uuid_lib
import mock
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import testtools
from nova.api.metadata import password
from nova.api.openstack.compute.contrib import fping
from nova.api.openstack.compute import extensions
from nova.cells import utils as cells_utils
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.cloudpipe import pipelib
from nova.compute import api as compute_api
from nova.compute import cells_api as cells_api
from nova.conductor import manager as conductor_manager
from nova.console import manager as console_manager # noqa - only for cfg
from nova import db
from nova.network import api as network_api
from nova.network.neutronv2 import api as neutron_api # noqa - only for cfg
from nova import objects
from nova.servicegroup import api as service_group_api
from nova import test
from nova.tests.functional import api_samples_test_base
from nova.tests.functional import integrated_helpers
from nova.tests.unit.api.openstack.compute.contrib import test_fping
from nova.tests.unit.api.openstack.compute.contrib import test_services
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_network
from nova.tests.unit import fake_utils
from nova.tests.unit.image import fake
from nova import utils
from nova.volume import cinder
CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
CONF.import_opt('enable_network_quota',
'nova.api.openstack.compute.contrib.os_tenant_networks')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('cell_type', 'nova.cells.opts', group='cells')
CONF.import_opt('db_check_interval', 'nova.cells.state', group='cells')
LOG = logging.getLogger(__name__)
class ApiSampleTestBaseV2(api_samples_test_base.ApiSampleTestBase):
_api_version = 'v2'
def setUp(self):
extends = []
self.flags(use_ipv6=False,
osapi_compute_link_prefix=self._get_host(),
osapi_glance_link_prefix=self._get_glance_host())
if not self.all_extensions:
if hasattr(self, 'extends_name'):
extends = [self.extends_name]
ext = [self.extension_name] if self.extension_name else []
self.flags(osapi_compute_extension=ext + extends)
super(ApiSampleTestBaseV2, self).setUp()
self.useFixture(test.SampleNetworks(host=self.network.host))
fake_network.stub_compute_with_ips(self.stubs)
fake_utils.stub_out_utils_spawn_n(self.stubs)
self.generate_samples = os.getenv('GENERATE_SAMPLES') is not None
class ApiSamplesTrap(ApiSampleTestBaseV2):
"""Make sure extensions don't get added without tests."""
all_extensions = True
def _get_extensions_tested(self):
tests = []
for attr in globals().values():
if not inspect.isclass(attr):
continue # Skip non-class objects
if not issubclass(attr, integrated_helpers._IntegratedTestBase):
continue # Skip non-test classes
if attr.extension_name is None:
continue # Skip base tests
cls = importutils.import_class(attr.extension_name)
tests.append(cls.alias)
return tests
def _get_extensions(self):
extensions = []
response = self._do_get('extensions')
for extension in jsonutils.loads(response.content)['extensions']:
extensions.append(str(extension['alias']))
return extensions
def test_all_extensions_have_samples(self):
# NOTE(danms): This is a list of extensions which are currently
# in the tree but that don't (yet) have tests. This list should
# NOT be allowed to grow, and should shrink to zero (and be
# removed) soon.
# TODO(gmann): skip this tests as merging of sample tests for v2
# and v2.1 are in progress. After merging all tests, this tests
# need to implement in different way.
raise testtools.TestCase.skipException('Merging of v2 and v2.1 '
'sample tests is in progress. '
'This test will be enabled '
'after all tests gets merged.')
do_not_approve_additions = []
do_not_approve_additions.append('os-create-server-ext')
do_not_approve_additions.append('os-baremetal-ext-status')
tests = self._get_extensions_tested()
extensions = self._get_extensions()
missing_tests = []
for extension in extensions:
# NOTE(danms): if you add tests, remove it from the
# exclusions list
self.assertFalse(extension in do_not_approve_additions and
extension in tests)
# NOTE(danms): if you add an extension, it must come with
# api_samples tests!
if (extension not in tests and
extension not in do_not_approve_additions):
missing_tests.append(extension)
if missing_tests:
LOG.error("Extensions are missing tests: %s" % missing_tests)
self.assertEqual(missing_tests, [])
class VersionsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'versions'
def test_versions_get(self):
response = self._do_get('', strip_version=True)
subs = self._get_regexes()
self._verify_response('versions-get-resp', subs, response, 200)
class ServersSampleBase(ApiSampleTestBaseV2):
def _post_server(self, use_common_server_api_samples=True):
# param use_common_server_api_samples: Boolean to set whether tests use
# common sample files for server post request and response.
# Default is True which means _get_sample_path method will fetch the
# common server sample files.
# Set False if tests need to use extension specific sample files
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
orig_value = self.__class__._use_common_server_api_samples
try:
self.__class__._use_common_server_api_samples = (
use_common_server_api_samples)
response = self._do_post('servers', 'server-post-req', subs)
subs = self._get_regexes()
status = self._verify_response('server-post-resp', subs,
response, 202)
return status
finally:
self.__class__._use_common_server_api_samples = orig_value
class ServersSampleMultiStatusJsonTest(ServersSampleBase):
extension_name = '.'.join(('nova.api.openstack.compute.contrib',
'server_list_multi_status',
'Server_list_multi_status'))
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers?status=active&status=error')
subs = self._get_regexes()
subs['id'] = uuid
self._verify_response('servers-list-resp', subs, response, 200)
class ServersMetadataJsonTest(ServersSampleBase):
sample_dir = 'servers'
def _create_and_set(self, subs):
uuid = self._post_server()
response = self._do_put('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
return uuid
def generalize_subs(self, subs, vanilla_regexes):
subs['value'] = '(Foo|Bar) Value'
return subs
def test_metadata_put_all(self):
# Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
self._create_and_set(subs)
def test_metadata_post_all(self):
# Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_post('servers/%s/metadata' % uuid,
'server-metadata-all-req',
subs)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_get_all(self):
# Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
self._verify_response('server-metadata-all-resp', subs, response, 200)
def test_metadata_put(self):
# Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
response = self._do_put('servers/%s/metadata/foo' % uuid,
'server-metadata-req',
subs)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_get(self):
# Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
self._verify_response('server-metadata-resp', subs, response, 200)
def test_metadata_delete(self):
# Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
class ExtensionsSampleJsonTest(ApiSampleTestBaseV2):
all_extensions = True
def test_extensions_get(self):
response = self._do_get('extensions')
subs = self._get_regexes()
self._verify_response('extensions-get-resp', subs, response, 200)
class FlavorsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'flavors'
def test_flavors_get(self):
response = self._do_get('flavors/1')
subs = self._get_regexes()
self._verify_response('flavor-get-resp', subs, response, 200)
def test_flavors_list(self):
response = self._do_get('flavors')
subs = self._get_regexes()
self._verify_response('flavors-list-resp', subs, response, 200)
class FlavorsSampleAllExtensionJsonTest(FlavorsSampleJsonTest):
all_extensions = True
class LimitsSampleJsonTest(ApiSampleTestBaseV2):
sample_dir = 'limits'
def test_limits_get(self):
response = self._do_get('limits')
subs = self._get_regexes()
self._verify_response('limit-get-resp', subs, response, 200)
class ServersActionsJsonTest(ServersSampleBase):
sample_dir = 'servers'
def _test_server_action(self, uuid, action,
subs=None, resp_tpl=None, code=202):
subs = subs or {}
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
'server-action-%s' % action.lower(),
subs)
if resp_tpl:
subs.update(self._get_regexes())
self._verify_response(resp_tpl, subs, response, code)
else:
self.assertEqual(response.status_code, code)
self.assertEqual(response.content, "")
def test_server_password(self):
uuid = self._post_server()
self._test_server_action(uuid, "changePassword",
{"password": "foo"})
class UserDataJsonTest(ApiSampleTestBaseV2):
extension_name = "nova.api.openstack.compute.contrib.user_data.User_data"
def test_user_data_post(self):
user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
user_data = base64.b64encode(user_data_contents)
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'user_data': user_data
}
response = self._do_post('servers', 'userdata-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('userdata-post-resp', subs, response, 202)
class SecurityGroupsSampleJsonTest(ServersSampleBase):
extension_name = "nova.api.openstack.compute.contrib" + \
".security_groups.Security_groups"
def _get_create_subs(self):
return {
'group_name': 'test',
"description": "description",
}
def _create_security_group(self):
subs = self._get_create_subs()
return self._do_post('os-security-groups',
'security-group-post-req', subs)
def _add_group(self, uuid):
subs = {
'group_name': 'test'
}
return self._do_post('servers/%s/action' % uuid,
'security-group-add-post-req', subs)
def test_security_group_create(self):
response = self._create_security_group()
subs = self._get_create_subs()
self._verify_response('security-groups-create-resp', subs,
response, 200)
def test_security_groups_list(self):
# Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
self._verify_response('security-groups-list-get-resp',
subs, response, 200)
def test_security_groups_get(self):
# Get api sample of security groups get request.
security_group_id = '1'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
self._verify_response('security-groups-get-resp', subs, response, 200)
def test_security_groups_list_server(self):
# Get api sample of security groups for a specific server.
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
self._verify_response('server-security-groups-list-resp',
subs, response, 200)
def test_security_groups_add(self):
self._create_security_group()
uuid = self._post_server(use_common_server_api_samples=False)
response = self._add_group(uuid)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_security_groups_remove(self):
self._create_security_group()
uuid = self._post_server(use_common_server_api_samples=False)
self._add_group(uuid)
subs = {
'group_name': 'test'
}
response = self._do_post('servers/%s/action' % uuid,
'security-group-remove-post-req', subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
class SchedulerHintsJsonTest(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.scheduler_hints."
"Scheduler_hints")
def test_scheduler_hints_post(self):
# Get api sample of scheduler hint post request.
hints = {'image_id': fake.get_valid_image_id(),
'image_near': str(uuid_lib.uuid4())
}
response = self._do_post('servers', 'scheduler-hints-post-req',
hints)
subs = self._get_regexes()
self._verify_response('scheduler-hints-post-resp', subs, response, 202)
class KeyPairsSampleJsonTest(ApiSampleTestBaseV2):
extension_name = "nova.api.openstack.compute.contrib.keypairs.Keypairs"
def generalize_subs(self, subs, vanilla_regexes):
subs['keypair_name'] = 'keypair-[0-9a-f-]+'
return subs
def test_keypairs_post(self, public_key=None):
"""Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-post-resp', subs, response, 200)
# NOTE(maurosr): return the key_name is necessary cause the
# verification returns the label of the last compared information in
# the response, not necessarily the key name.
return key_name
def test_keypairs_import_key_post(self):
# Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
'public_key': "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGg"
"B4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0l"
"RE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv"
"9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYc"
"pSxsIbECHw== Generated-by-Nova"
}
response = self._do_post('os-keypairs', 'keypairs-import-post-req',
subs)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-import-post-resp', subs, response, 200)
def test_keypairs_list(self):
# Get api sample of key pairs list request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-list-resp', subs, response, 200)
def test_keypairs_get(self):
# Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs/%s' % key_name)
subs = self._get_regexes()
subs['keypair_name'] = '(%s)' % key_name
self._verify_response('keypairs-get-resp', subs, response, 200)
class RescueJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".rescue.Rescue")
def _rescue(self, uuid):
req_subs = {
'password': 'MySecretPass'
}
response = self._do_post('servers/%s/action' % uuid,
'server-rescue-req', req_subs)
self._verify_response('server-rescue', req_subs, response, 200)
def _unrescue(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'server-unrescue-req', {})
self.assertEqual(response.status_code, 202)
def test_server_rescue(self):
uuid = self._post_server()
self._rescue(uuid)
# Do a server get to make sure that the 'RESCUE' state is set
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'RESCUE'
self._verify_response('server-get-resp-rescue', subs, response, 200)
def test_server_unrescue(self):
uuid = self._post_server()
self._rescue(uuid)
self._unrescue(uuid)
# Do a server get to make sure that the 'ACTIVE' state is back
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'ACTIVE'
self._verify_response('server-get-resp-unrescue', subs, response, 200)
class ExtendedRescueWithImageJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_rescue_with_image.Extended_rescue_with_image")
def _get_flags(self):
f = super(ExtendedRescueWithImageJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# ExtendedRescueWithImage extension also needs Rescue to be loaded.
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.rescue.Rescue')
return f
def _rescue(self, uuid):
req_subs = {
'password': 'MySecretPass',
'rescue_image_ref': fake.get_valid_image_id()
}
response = self._do_post('servers/%s/action' % uuid,
'server-rescue-req', req_subs)
self._verify_response('server-rescue', req_subs, response, 200)
def test_server_rescue(self):
uuid = self._post_server()
self._rescue(uuid)
# Do a server get to make sure that the 'RESCUE' state is set
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['status'] = 'RESCUE'
self._verify_response('server-get-resp-rescue', subs, response, 200)
class VirtualInterfacesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".virtual_interfaces.Virtual_interfaces")
def test_vifs_list(self):
uuid = self._post_server()
response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
subs = self._get_regexes()
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('vifs-list-resp', subs, response, 200)
class CloudPipeSampleJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = "nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe"
def setUp(self):
super(CloudPipeSampleJsonTest, self).setUp()
def get_user_data(self, project_id):
"""Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
"""Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data)
self.stubs.Set(network_api.API, "get",
network_api_get)
def generalize_subs(self, subs, vanilla_regexes):
subs['project_id'] = 'cloudpipe-[0-9a-f-]+'
return subs
def test_cloud_pipe_create(self):
# Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
project)
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-create-resp', subs, response, 200)
return project
def test_cloud_pipe_list(self):
# Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
subs = self._get_regexes()
subs.update(project)
subs['image_id'] = CONF.vpn_image_id
self._verify_response('cloud-pipe-get-resp', subs, response, 200)
class CloudPipeUpdateJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib"
".cloudpipe_update.Cloudpipe_update")
def _get_flags(self):
f = super(CloudPipeUpdateJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Cloudpipe_update also needs cloudpipe to be loaded
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.cloudpipe.Cloudpipe')
return f
def test_cloud_pipe_update(self):
subs = {'vpn_ip': '192.168.1.1',
'vpn_port': 2000}
response = self._do_put('os-cloudpipe/configure-project',
'cloud-pipe-update-req',
subs)
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
class UsedLimitsSamplesJsonTest(ApiSampleTestBaseV2):
extension_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
def test_get_used_limits(self):
# Get api sample to used limits.
response = self._do_get('limits')
subs = self._get_regexes()
self._verify_response('usedlimits-get-resp', subs, response, 200)
class UsedLimitsForAdminSamplesJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
extension_name = (
"nova.api.openstack.compute.contrib.used_limits_for_admin."
"Used_limits_for_admin")
def test_get_used_limits_for_admin(self):
tenant_id = 'openstack'
response = self._do_get('limits?tenant_id=%s' % tenant_id)
subs = self._get_regexes()
return self._verify_response('usedlimitsforadmin-get-resp', subs,
response, 200)
class ServicesJsonTest(ApiSampleTestBaseV2):
extension_name = "nova.api.openstack.compute.contrib.services.Services"
ADMIN_API = True
def setUp(self):
super(ServicesJsonTest, self).setUp()
self.stubs.Set(db, "service_get_all",
test_services.fake_db_api_service_get_all)
self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow)
self.stubs.Set(timeutils, "utcnow_ts", test_services.fake_utcnow_ts)
self.stubs.Set(db, "service_get_by_host_and_binary",
test_services.fake_service_get_by_host_binary)
self.stubs.Set(db, "service_update",
test_services.fake_service_update)
def tearDown(self):
super(ServicesJsonTest, self).tearDown()
timeutils.clear_time_override()
def fake_load(self, service_name):
return service_name == 'os-extended-services'
def test_services_list(self):
"""Return a list of all agent builds."""
response = self._do_get('os-services')
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
self._verify_response('services-list-get-resp', subs, response, 200)
def test_service_enable(self):
"""Enable an existing agent build."""
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/enable',
'service-enable-put-req', subs)
subs = {"host": "host1",
"binary": "nova-compute"}
self._verify_response('service-enable-put-resp', subs, response, 200)
def test_service_disable(self):
"""Disable an existing agent build."""
subs = {"host": "host1",
'binary': 'nova-compute'}
response = self._do_put('os-services/disable',
'service-disable-put-req', subs)
subs = {"host": "host1",
"binary": "nova-compute"}
self._verify_response('service-disable-put-resp', subs, response, 200)
def test_service_detail(self):
"""Return a list of all running services with the disable reason
information if that exists.
"""
self.stubs.Set(extensions.ExtensionManager, "is_loaded",
self.fake_load)
response = self._do_get('os-services')
self.assertEqual(response.status_code, 200)
subs = {'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
self._verify_response('services-get-resp',
subs, response, 200)
def test_service_disable_log_reason(self):
"""Disable an existing service and log the reason."""
self.stubs.Set(extensions.ExtensionManager, "is_loaded",
self.fake_load)
subs = {"host": "host1",
'binary': 'nova-compute',
'disabled_reason': 'test2'}
response = self._do_put('os-services/disable-log-reason',
'service-disable-log-put-req', subs)
return self._verify_response('service-disable-log-put-resp',
subs, response, 200)
class ExtendedServicesJsonTest(ApiSampleTestBaseV2):
"""This extension is extending the functionalities of the
Services extension so the funcionalities introduced by this extension
are tested in the ServicesJsonTest and ServicesXmlTest classes.
"""
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib."
"extended_services.Extended_services")
@mock.patch.object(db, 'service_get_all',
side_effect=test_services.fake_db_api_service_get_all)
@mock.patch.object(db, 'service_get_by_host_and_binary',
side_effect=test_services.fake_service_get_by_host_binary)
class ExtendedServicesDeleteJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib.services.Services")
extension_name = ("nova.api.openstack.compute.contrib."
"extended_services_delete.Extended_services_delete")
def setUp(self):
super(ExtendedServicesDeleteJsonTest, self).setUp()
timeutils.set_time_override(test_services.fake_utcnow())
def tearDown(self):
super(ExtendedServicesDeleteJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_service_detail(self, *mocks):
"""Return a list of all running services with the disable reason
information if that exists.
"""
response = self._do_get('os-services')
self.assertEqual(response.status_code, 200)
subs = {'id': 1,
'binary': 'nova-compute',
'host': 'host1',
'zone': 'nova',
'status': 'disabled',
'state': 'up'}
subs.update(self._get_regexes())
return self._verify_response('services-get-resp',
subs, response, 200)
def test_service_delete(self, *mocks):
response = self._do_delete('os-services/1')
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, "")
class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.simple_tenant_usage."
"Simple_tenant_usage")
def setUp(self):
"""setUp method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).setUp()
started = timeutils.utcnow()
now = started + datetime.timedelta(hours=1)
timeutils.set_time_override(started)
self._post_server()
timeutils.set_time_override(now)
self.query = {
'start': str(started),
'end': str(now)
}
def tearDown(self):
"""tearDown method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_get_tenants_usage(self):
# Get api sample to get all tenants usage request.
response = self._do_get('os-simple-tenant-usage?%s' % (
urllib.urlencode(self.query)))
subs = self._get_regexes()
self._verify_response('simple-tenant-usage-get', subs, response, 200)
def test_get_tenant_usage_details(self):
# Get api sample to get specific tenant usage request.
tenant_id = 'openstack'
response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
urllib.urlencode(self.query)))
subs = self._get_regexes()
self._verify_response('simple-tenant-usage-get-specific', subs,
response, 200)
class AvailabilityZoneJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.availability_zone."
"Availability_zone")
def test_create_availability_zone(self):
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
"availability_zone": "nova"
}
response = self._do_post('servers', 'availability-zone-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('availability-zone-post-resp', subs,
response, 202)
class AdminActionsSamplesJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.admin_actions."
"Admin_actions")
def setUp(self):
"""setUp Method for AdminActions api samples extension
This method creates the server that will be used in each tests
"""
super(AdminActionsSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
def test_post_pause(self):
# Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-pause', {})
self.assertEqual(response.status_code, 202)
def test_post_unpause(self):
# Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unpause', {})
self.assertEqual(response.status_code, 202)
@mock.patch('nova.conductor.manager.ComputeTaskManager._cold_migrate')
def test_post_migrate(self, mock_cold_migrate):
# Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-migrate', {})
self.assertEqual(response.status_code, 202)
def test_post_reset_network(self):
# Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status_code, 202)
def test_post_inject_network_info(self):
# Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status_code, 202)
def test_post_lock_server(self):
# Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-lock-server', {})
self.assertEqual(response.status_code, 202)
def test_post_unlock_server(self):
# Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unlock-server', {})
self.assertEqual(response.status_code, 202)
def test_post_live_migrate_server(self):
# Get api samples to server live migrate request.
def fake_live_migrate(_self, context, instance, scheduler_hint,
block_migration, disk_over_commit):
self.assertEqual(self.uuid, instance["uuid"])
host = scheduler_hint["host"]
self.assertEqual(self.compute.host, host)
self.stubs.Set(conductor_manager.ComputeTaskManager,
'_live_migrate',
fake_live_migrate)
def fake_get_compute(context, host):
service = dict(host=host,
binary='nova-compute',
topic='compute',
report_count=1,
updated_at='foo',
hypervisor_type='bar',
hypervisor_version=
utils.convert_version_to_int('1.0'),
disabled=False)
return {'compute_node': [service]}
self.stubs.Set(db, "service_get_by_compute_host", fake_get_compute)
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-live-migrate',
{'hostname': self.compute.host})
self.assertEqual(response.status_code, 202)
def test_post_reset_state(self):
# get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status_code, 202)
class ConsoleAuthTokensSampleJsonTests(ServersSampleBase):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib.consoles.Consoles")
extension_name = ("nova.api.openstack.compute.contrib.console_auth_tokens."
"Console_auth_tokens")
def _get_console_url(self, data):
return jsonutils.loads(data)["console"]["url"]
def _get_console_token(self, uuid):
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
url = self._get_console_url(response.content)
return re.match('.+?token=([^&]+)', url).groups()[0]
def test_get_console_connect_info(self):
self.flags(enabled=True, group='rdp')
uuid = self._post_server()
token = self._get_console_token(uuid)
response = self._do_get('os-console-auth-tokens/%s' % token)
subs = self._get_regexes()
subs["uuid"] = uuid
subs["host"] = r"[\w\.\-]+"
subs["port"] = "[0-9]+"
subs["internal_access_path"] = ".*"
self._verify_response('get-console-connect-info-get-resp', subs,
response, 200)
class DeferredDeleteSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".deferred_delete.Deferred_delete")
def setUp(self):
super(DeferredDeleteSampleJsonTests, self).setUp()
self.flags(reclaim_instance_interval=1)
def test_restore(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'restore-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_force_delete(self):
uuid = self._post_server()
self._do_delete('servers/%s' % uuid)
response = self._do_post('servers/%s/action' % uuid,
'force-delete-post-req', {})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
class QuotasSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
def test_show_quotas(self):
# Get api sample to show quotas.
response = self._do_get('os-quota-sets/fake_tenant')
self._verify_response('quotas-show-get-resp', {}, response, 200)
def test_show_quotas_defaults(self):
# Get api sample to show quotas defaults.
response = self._do_get('os-quota-sets/fake_tenant/defaults')
self._verify_response('quotas-show-defaults-get-resp',
{}, response, 200)
def test_update_quotas(self):
# Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
self._verify_response('quotas-update-post-resp', {}, response, 200)
class ExtendedQuotasSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
extension_name = ("nova.api.openstack.compute.contrib"
".extended_quotas.Extended_quotas")
def test_delete_quotas(self):
# Get api sample to delete quota.
response = self._do_delete('os-quota-sets/fake_tenant')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_update_quotas(self):
# Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
return self._verify_response('quotas-update-post-resp', {},
response, 200)
class UserQuotasSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
extension_name = ("nova.api.openstack.compute.contrib"
".user_quotas.User_quotas")
def fake_load(self, *args):
return True
def test_show_quotas_for_user(self):
# Get api sample to show quotas for user.
response = self._do_get('os-quota-sets/fake_tenant?user_id=1')
self._verify_response('user-quotas-show-get-resp', {}, response, 200)
def test_delete_quotas_for_user(self):
# Get api sample to delete quota for user.
self.stubs.Set(extensions.ExtensionManager, "is_loaded",
self.fake_load)
response = self._do_delete('os-quota-sets/fake_tenant?user_id=1')
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, '')
def test_update_quotas_for_user(self):
# Get api sample to update quotas for user.
response = self._do_put('os-quota-sets/fake_tenant?user_id=1',
'user-quotas-update-post-req',
{})
return self._verify_response('user-quotas-update-post-resp', {},
response, 200)
class ExtendedIpsSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_ips.Extended_ips")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedIpsMacSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_ips_mac.Extended_ips_mac")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
response = self._do_get('servers/detail')
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedVolumesSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_volumes.Extended_volumes")
def test_show(self):
uuid = self._post_server()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fakes.stub_bdm_get_all_by_instance)
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fakes.stub_bdm_get_all_by_instance)
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ExtendedVIFNetSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_virtual_interfaces_net.Extended_virtual_interfaces_net")
def _get_flags(self):
f = super(ExtendedVIFNetSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# extended_virtual_interfaces_net_update also
# needs virtual_interfaces to be loaded
f['osapi_compute_extension'].append(
('nova.api.openstack.compute.contrib'
'.virtual_interfaces.Virtual_interfaces'))
return f
def test_vifs_list(self):
uuid = self._post_server()
response = self._do_get('servers/%s/os-virtual-interfaces' % uuid)
self.assertEqual(response.status_code, 200)
subs = self._get_regexes()
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
self._verify_response('vifs-list-resp', subs, response, 200)
class ServerPasswordSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.server_password."
"Server_password")
def test_get_password(self):
# Mock password since there is no api to set it
def fake_ext_password(*args, **kwargs):
return ("xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/"
"Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp"
"28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtV"
"VzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNX"
"JjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrj"
"QskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+Ac"
"X//PXk3uJ5kC7d67fPXaVz4WaQRYMg==")
self.stubs.Set(password, "extract_password", fake_ext_password)
uuid = self._post_server()
response = self._do_get('servers/%s/os-server-password' % uuid)
subs = self._get_regexes()
subs['encrypted_password'] = fake_ext_password().replace('+', '\\+')
self._verify_response('get-password-resp', subs, response, 200)
def test_reset_password(self):
uuid = self._post_server()
response = self._do_delete('servers/%s/os-server-password' % uuid)
self.assertEqual(response.status_code, 204)
class DiskConfigJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.disk_config."
"Disk_config")
def test_list_servers_detail(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
self._verify_response('list-servers-detail-get', subs, response, 200)
def test_get_server(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_update_server(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_put('servers/%s' % uuid,
'server-update-put-req', {})
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-update-put-resp', subs, response, 200)
def test_resize_server(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_post('servers/%s/action' % uuid,
'server-resize-post-req', {})
self.assertEqual(response.status_code, 202)
# NOTE(tmello): Resize does not return response body
# Bug #1085213.
self.assertEqual(response.content, "")
def test_rebuild_server(self):
uuid = self._post_server(use_common_server_api_samples=False)
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
}
response = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild-req', subs)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-action-rebuild-resp',
subs, response, 202)
def test_get_image(self):
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
subs = self._get_regexes()
subs['image_id'] = image_id
self._verify_response('image-get-resp', subs, response, 200)
def test_list_images(self):
response = self._do_get('images/detail')
subs = self._get_regexes()
self._verify_response('image-list-resp', subs, response, 200)
class BlockDeviceMappingV2BootJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.'
'block_device_mapping_v2_boot.'
'Block_device_mapping_v2_boot')
def _get_flags(self):
f = super(BlockDeviceMappingV2BootJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# We need the volumes extension as well
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.volumes.Volumes')
return f
def test_servers_post_with_bdm_v2(self):
self.stubs.Set(cinder.API, 'get', fakes.stub_volume_get)
self.stubs.Set(cinder.API, 'check_attach',
fakes.stub_volume_check_attach)
return self._post_server()
class FpingSampleJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.fping.Fping")
def setUp(self):
super(FpingSampleJsonTests, self).setUp()
def fake_check_fping(self):
pass
self.stubs.Set(utils, "execute", test_fping.execute)
self.stubs.Set(fping.FpingController, "check_fping",
fake_check_fping)
def test_get_fping(self):
self._post_server()
response = self._do_get('os-fping')
subs = self._get_regexes()
self._verify_response('fping-get-resp', subs, response, 200)
def test_get_fping_details(self):
uuid = self._post_server()
response = self._do_get('os-fping/%s' % (uuid))
subs = self._get_regexes()
self._verify_response('fping-get-details-resp', subs, response, 200)
class ExtendedAvailabilityZoneJsonTests(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib"
".extended_availability_zone"
".Extended_availability_zone")
def test_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
self._verify_response('servers-detail-resp', subs, response, 200)
class ConfigDriveSampleJsonTest(ServersSampleBase):
extension_name = ("nova.api.openstack.compute.contrib.config_drive."
"Config_drive")
def setUp(self):
super(ConfigDriveSampleJsonTest, self).setUp()
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fake.stub_out_image_service(self.stubs)
def test_config_drive_show(self):
uuid = self._post_server()
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
self._verify_response('server-config-drive-get-resp', subs,
response, 200)
def test_config_drive_detail(self):
self._post_server()
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
self._verify_response('servers-config-drive-details-resp',
subs, response, 200)
@mock.patch.object(service_group_api.API, "service_is_up", lambda _: True)
class HypervisorsSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
def test_hypervisors_list(self):
response = self._do_get('os-hypervisors')
self._verify_response('hypervisors-list-resp', {}, response, 200)
def test_hypervisors_search(self):
response = self._do_get('os-hypervisors/fake/search')
self._verify_response('hypervisors-search-resp', {}, response, 200)
def test_hypervisors_servers(self):
response = self._do_get('os-hypervisors/fake/servers')
self._verify_response('hypervisors-servers-resp', {}, response, 200)
def test_hypervisors_show(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-resp', subs, response, 200)
def test_hypervisors_statistics(self):
response = self._do_get('os-hypervisors/statistics')
self._verify_response('hypervisors-statistics-resp', {}, response, 200)
def test_hypervisors_uptime(self):
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
self.stubs.Set(compute_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = 1
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {
'hypervisor_id': hypervisor_id,
}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class ExtendedHypervisorsJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib."
"hypervisors.Hypervisors")
extension_name = ("nova.api.openstack.compute.contrib."
"extended_hypervisors.Extended_hypervisors")
def test_hypervisors_show_with_ip(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-with-ip-resp',
subs, response, 200)
class HypervisorStatusJsonTest(ApiSampleTestBaseV2):
ADMIN_API = True
extends_name = ("nova.api.openstack.compute.contrib."
"hypervisors.Hypervisors")
extension_name = ("nova.api.openstack.compute.contrib."
"hypervisor_status.Hypervisor_status")
def test_hypervisors_show_with_status(self):
hypervisor_id = 1
subs = {
'hypervisor_id': hypervisor_id
}
response = self._do_get('os-hypervisors/%s' % hypervisor_id)
subs.update(self._get_regexes())
self._verify_response('hypervisors-show-with-status-resp',
subs, response, 200)
@mock.patch("nova.servicegroup.API.service_is_up", return_value=True)
class HypervisorsCellsSampleJsonTests(ApiSampleTestBaseV2):
ADMIN_API = True
extension_name = ("nova.api.openstack.compute.contrib.hypervisors."
"Hypervisors")
def setUp(self):
self.flags(enable=True, cell_type='api', group='cells')
super(HypervisorsCellsSampleJsonTests, self).setUp()
def test_hypervisor_uptime(self, mocks):
fake_hypervisor = objects.ComputeNode(id=1, host='fake-mini',
hypervisor_hostname='fake-mini')
def fake_get_host_uptime(self, context, hyp):
return (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
def fake_compute_node_get(self, context, hyp):
return fake_hypervisor
def fake_service_get_by_compute_host(self, context, host):
return cells_utils.ServiceProxy(
objects.Service(id=1, host='fake-mini', disabled=False,
disabled_reason=None),
'cell1')
self.stubs.Set(cells_api.HostAPI, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(cells_api.HostAPI, 'service_get_by_compute_host',
fake_service_get_by_compute_host)
self.stubs.Set(cells_api.HostAPI,
'get_host_uptime', fake_get_host_uptime)
hypervisor_id = fake_hypervisor['id']
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': hypervisor_id}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)
class AssistedVolumeSnapshotsJsonTest(ApiSampleTestBaseV2):
"""Assisted volume snapshots."""
extension_name = ("nova.api.openstack.compute.contrib."
"assisted_volume_snapshots.Assisted_volume_snapshots")
def _create_assisted_snapshot(self, subs):
self.stubs.Set(compute_api.API, 'volume_snapshot_create',
fakes.stub_compute_volume_snapshot_create)
response = self._do_post("os-assisted-volume-snapshots",
"snapshot-create-assisted-req",
subs)
return response
def test_snapshots_create_assisted(self):
subs = {
'snapshot_name': 'snap-001',
'description': 'Daily backup',
'volume_id': '521752a6-acf6-4b2d-bc7a-119f9148cd8c',
'snapshot_id': '421752a6-acf6-4b2d-bc7a-119f9148cd8c',
'type': 'qcow2',
'new_file': 'new_file_name'
}
subs.update(self._get_regexes())
response = self._create_assisted_snapshot(subs)
self._verify_response("snapshot-create-assisted-resp",
subs, response, 200)
def test_snapshots_delete_assisted(self):
self.stubs.Set(compute_api.API, 'volume_snapshot_delete',
fakes.stub_compute_volume_snapshot_delete)
snapshot_id = '100'
response = self._do_delete(
'os-assisted-volume-snapshots/%s?delete_info='
'{"volume_id":"521752a6-acf6-4b2d-bc7a-119f9148cd8c"}'
% snapshot_id)
self.assertEqual(response.status_code, 204)
self.assertEqual(response.content, '')
class PreserveEphemeralOnRebuildJsonTest(ServersSampleBase):
extension_name = ('nova.api.openstack.compute.contrib.'
'preserve_ephemeral_rebuild.'
'Preserve_ephemeral_rebuild')
def _test_server_action(self, uuid, action,
subs=None, resp_tpl=None, code=202):
subs = subs or {}
subs.update({'action': action})
response = self._do_post('servers/%s/action' % uuid,
'server-action-%s' % action.lower(),
subs)
if resp_tpl:
subs.update(self._get_regexes())
self._verify_response(resp_tpl, subs, response, code)
else:
self.assertEqual(response.status_code, code)
self.assertEqual(response.content, "")
def test_rebuild_server_preserve_ephemeral_false(self):
uuid = self._post_server()
image = self.api.get_images()[0]['id']
subs = {'host': self._get_host(),
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'ip': '1.2.3.4',
'ip6': 'fe80::100',
'hostid': '[a-f0-9]+',
'preserve_ephemeral': 'false'}
self._test_server_action(uuid, 'rebuild', subs,
'server-action-rebuild-resp')
def test_rebuild_server_preserve_ephemeral_true(self):
image = self.api.get_images()[0]['id']
subs = {'host': self._get_host(),
'uuid': image,
'name': 'new-server-test',
'pass': 'seekr3t',
'ip': '1.2.3.4',
'ip6': 'fe80::100',
'hostid': '[a-f0-9]+',
'preserve_ephemeral': 'true'}
def fake_rebuild(self_, context, instance, image_href, admin_password,
**kwargs):
self.assertTrue(kwargs['preserve_ephemeral'])
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
instance_uuid = self._post_server()
response = self._do_post('servers/%s/action' % instance_uuid,
'server-action-rebuild', subs)
self.assertEqual(response.status_code, 202)
class ServerGroupQuotas_LimitsSampleJsonTest(LimitsSampleJsonTest):
sample_dir = None
extension_name = ("nova.api.openstack.compute.contrib."
"server_group_quotas.Server_group_quotas")
class ServerGroupQuotas_UsedLimitsSamplesJsonTest(UsedLimitsSamplesJsonTest):
extension_name = ("nova.api.openstack.compute.contrib."
"server_group_quotas.Server_group_quotas")
extends_name = ("nova.api.openstack.compute.contrib.used_limits."
"Used_limits")
class ServerGroupQuotas_QuotasSampleJsonTests(QuotasSampleJsonTests):
extension_name = ("nova.api.openstack.compute.contrib."
"server_group_quotas.Server_group_quotas")
extends_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementations of various third-party authentication schemes.
All the classes in this file are class Mixins designed to be used with
web.py RequestHandler classes. The primary methods for each service are
authenticate_redirect(), authorize_redirect(), and get_authenticated_user().
The former should be called to redirect the user to, e.g., the OpenID
authentication page on the third party service, and the latter should
be called upon return to get the user data from the data returned by
the third party service.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
import base64
import binascii
import hashlib
import hmac
import logging
import time
import urllib
import urlparse
import uuid
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.util import bytes_type, b
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See GoogleMixin below for example implementations.
"""
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Returns the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the authenticate_redirect() or authorize_redirect()
methods.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.iteritems())
args["openid.mode"] = u"check_authentication"
url = self._OPENID_ENDPOINT
if http_client is None: http_client = httpclient.AsyncHTTPClient()
http_client.fetch(url, self.async_callback(
self._on_authentication_verified, callback),
method="POST", body=urllib.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, callback, response):
if response.error or b("is_valid:true") not in response.body:
logging.warning("Invalid OpenID response: %s", response.error or
response.body)
callback(None)
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments.iterkeys():
if name.startswith("openid.ns.") and \
self.get_argument(name) == u"http://openid.net/srv/ax/1.0":
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns: return u""
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.iterkeys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name: return u""
return self.get_argument(ax_name, u"")
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u" ".join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email: user["email"] = email
if locale: user["locale"] = locale
if username: user["username"] = username
callback(user)
class OAuthMixin(object):
"""Abstract implementation of OAuth.
See TwitterMixin and FriendFeedMixin below for example implementations.
"""
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = httpclient.AsyncHTTPClient()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri))
else:
http_client.fetch(
self._oauth_request_token_url(),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri))
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
"""
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
logging.warning("Missing OAuth request token cookie")
callback(None)
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
logging.info((cookie_key, request_key, request_cookie))
logging.warning("Request token does not match cookie")
callback(None)
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = httpclient.AsyncHTTPClient()
http_client.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri= None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params: args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, response):
if response.error:
raise Exception("Could not get request token")
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(request_token["key"]) + b("|") +
base64.b64encode(request_token["secret"]))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib.urlencode(args))
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=request_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
if "verifier" in request_token:
args["oauth_verifier"]=request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib.urlencode(args)
def _on_access_token(self, callback, response):
if response.error:
logging.warning("Could not fetch access token")
callback(None)
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user(access_token, self.async_callback(
self._on_oauth_get_user, access_token, callback))
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, callback, user):
if not user:
callback(None)
return
user["access_token"] = access_token
callback(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=consumer_token["key"],
oauth_token=access_token["key"],
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"),
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = signature
return base_args
class OAuth2Mixin(object):
"""Abstract implementation of OAuth v 2."""
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None ):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id
}
if extra_params: args.update(extra_params)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
def _oauth_request_token_url(self, redirect_uri= None, client_id = None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params: args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key and
Consumer Secret to the application settings 'twitter_consumer_key' and
'twitter_consumer_secret'. Use this Mixin on the handler for the URL
you registered as your application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Twitter auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and all of the custom Twitter user
attributes describe at
http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show
in addition to 'access_token'. You should save the access token with
the user; it is required to make requests on behalf of the user later
with twitter_request().
"""
_OAUTH_REQUEST_TOKEN_URL = "http://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "http://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "http://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "http://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
def authenticate_redirect(self):
"""Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
"""
http = httpclient.AsyncHTTPClient()
http.fetch(self._oauth_request_token_url(), self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None))
def twitter_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., "/statuses/user_timeline/btaylor"
The path should not include the format (we automatically append
".json" and parse the JSON output).
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at
http://apiwiki.twitter.com/Twitter-API-Documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://api.twitter.com/1" + path + ".json"
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_twitter_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_twitter_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.twitter_request(
"/users/show/" + access_token["screen_name"],
access_token=access_token, callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["screen_name"]
callback(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then
copy your Consumer Key and Consumer Secret to the application settings
'friendfeed_consumer_key' and 'friendfeed_consumer_secret'. Use
this Mixin on the handler for the URL you registered as your
application's Callback URL.
When your application is set up, you can use this Mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("oauth_token", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "FriendFeed auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and 'description' in addition to
'access_token'. You should save the access token with the user;
it is required to make requests on behalf of the user later with
friendfeed_request().
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args: url += "?" + urllib.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
callback = self.async_callback(self._parse_user_response, callback)
self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token,
callback=callback)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for authentication
or to access Google resources on behalf of a user. To authenticate with
Google, redirect with authenticate_redirect(). On return, parse the
response with get_authenticated_user(). We send a dict containing the
values for the user, including 'email', 'name', and 'locale'.
Example usage::
class GoogleHandler(tornado.web.RequestHandler, tornado.auth.GoogleMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("openid.mode", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Google auth failed")
# Save the user with, e.g., set_secure_cookie()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name","email","language","username"]):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib.urlencode(args))
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.iteritems():
if name.startswith("openid.ns.") and \
values[-1] == u"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = httpclient.AsyncHTTPClient()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
OpenIdMixin.get_authenticated_user(self, callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user(self, access_token, callback):
OpenIdMixin.get_authenticated_user(self, callback)
class FacebookMixin(object):
"""Facebook Connect authentication.
New applications should consider using `FacebookGraphMixin` below instead
of this class.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
'facebook_api_key' and 'facebook_secret'.
When your application is set up, you can use this Mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by get_authenticated_user() includes the
attributes 'facebook_uid' and 'name' in addition to session attributes
like 'session_key'. You should save the session key with the user; it is
required to make requests on behalf of the user later with
facebook_request().
"""
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None):
"""Authenticates/installs this app for the current user."""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode, bytes_type)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib.urlencode(args))
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
"""
self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square," \
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib.urlencode(args)
http = httpclient.AsyncHTTPClient()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
logging.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
logging.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
logging.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode): body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize?"
_OAUTH_NO_CALLBACKS = False
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("code", False):
self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"),
callback=self.async_callback(
self._on_login))
return
self.authorize_redirect(redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
def _on_login(self, user):
logging.error(user)
self.finish()
"""
http = httpclient.AsyncHTTPClient()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields: fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
callback, fields, response):
if response.error:
logging.warning('Facebook auth error: %s' % str(response))
callback(None)
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=self.async_callback(
self._on_get_user_info, callback, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, callback, session, fields, user):
if user is None:
callback(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
callback(fieldmap)
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"],
callback=self.async_callback(self._on_post))
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
self.authorize_redirect()
return
self.finish("Posted a message!")
"""
url = "https://graph.facebook.com" + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
all_args.update(post_args or {})
if all_args: url += "?" + urllib.urlencode(all_args)
callback = self.async_callback(self._on_facebook_request, callback)
http = httpclient.AsyncHTTPClient()
if post_args is not None:
http.fetch(url, method="POST", body=urllib.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, callback, response):
if response.error:
logging.warning("Error response %s fetching %s", response.error,
response.request.url)
callback(None)
return
callback(escape.json_decode(response.body))
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b("&").join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib.quote(token["secret"], safe='~') if token else ""))
key = b("&").join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode):
val = val.encode("utf-8")
return urllib.quote(val, safe="~")
def _oauth_parse_response(body):
p = escape.parse_qs(body, keep_blank_values=False)
token = dict(key=p[b("oauth_token")][0], secret=p[b("oauth_token_secret")][0])
# Add the extra parameters the Provider included to the token
special = (b("oauth_token"), b("oauth_token_secret"))
token.update((k, p[k][0]) for k in p if k not in special)
return token
|
|
from config import Configuration
from corpus import Corpus, Sentence, Token, getTokens
from param import FeatParams
class Extractor:
@staticmethod
def extract(sent):
transition = sent.initialTransition
labels, features = [], []
while transition.next:
if transition.next is None or transition.next.type is None:
pass
if transition.next is not None and transition.next.type:
labels.append(transition.next.type.value)
features.append(Extractor.getFeatures(transition, sent))
transition = transition.next
sent.featuresInfo = [labels, features]
return labels, features
@staticmethod
def getFeatures(transition, sent):
transDic = {}
configuration = transition.configuration
if FeatParams.smartMWTDetection:
if configuration.stack and isinstance(configuration.stack[-1], Token) and configuration.stack[
-1].getLemma() in Corpus.mwtDictionary:
transDic['isMWT_' + Corpus.mwtDictionary[configuration.stack[-1].getLemma()].lower()] = True
# return transDic
# TODO return transDic directly in this case
if FeatParams.useStackLength and len(configuration.stack) > 1:
transDic['StackLengthIs'] = len(configuration.stack)
if len(configuration.stack) >= 2:
stackElements = [configuration.stack[-2], configuration.stack[-1]]
else:
stackElements = configuration.stack
# General linguistic Informations
if stackElements:
elemIdx = len(stackElements) - 1
for elem in stackElements:
Extractor.generateLinguisticFeatures(elem, 'S' + str(elemIdx), transDic)
elemIdx -= 1
if len(configuration.buffer) > 0:
if FeatParams.useFirstBufferElement:
Extractor.generateLinguisticFeatures(configuration.buffer[0], 'B0', transDic)
if FeatParams.useSecondBufferElement and len(configuration.buffer) > 1:
Extractor.generateLinguisticFeatures(configuration.buffer[1], 'B1', transDic)
# Bi-Gram Generation
if FeatParams.useBiGram:
if len(stackElements) > 1:
# Generate a Bi-gram S1S0 S0B0 S1B0 S0B1
Extractor.generateBiGram(stackElements[-2], stackElements[-1], 'S1S0', transDic)
if FeatParams.generateS1B1 and len(configuration.buffer) > 1:
Extractor.generateBiGram(stackElements[-2], configuration.buffer[1], 'S1B1', transDic)
if len(stackElements) > 0 and len(configuration.buffer) > 0:
Extractor.generateBiGram(stackElements[-1], configuration.buffer[0], 'S0B0', transDic)
if len(stackElements) > 1:
Extractor.generateBiGram(stackElements[-2], configuration.buffer[0], 'S1B0', transDic)
if len(configuration.buffer) > 1:
Extractor.generateBiGram(stackElements[-1], configuration.buffer[1], 'S0B1', transDic)
if FeatParams.generateS0B2Bigram and len(configuration.buffer) > 2:
Extractor.generateBiGram(stackElements[-1], configuration.buffer[2], 'S0B2', transDic)
# Tri-Gram Generation
if FeatParams.useTriGram and len(stackElements) > 1 and len(configuration.buffer) > 0:
Extractor.generateTriGram(stackElements[-2], stackElements[-1], configuration.buffer[0], 'S1S0B0', transDic)
# Syntaxic Informations
if len(stackElements) > 0 and FeatParams.useSyntax:
Extractor.generateSyntaxicFeatures(configuration.stack, configuration.buffer, transDic)
# Distance information
if FeatParams.useS0B0Distance and len(configuration.stack) > 0 and len(configuration.buffer) > 0:
stackTokens = getTokens(configuration.stack[-1])
transDic['S0B0Distance'] = str(
sent.tokens.index(configuration.buffer[0]) - sent.tokens.index(stackTokens[-1]))
if FeatParams.useS0S1Distance and len(configuration.stack) > 1 and isinstance(configuration.stack[-1], Token) \
and isinstance(configuration.stack[-2], Token):
transDic['S0S1Distance'] = str(
sent.tokens.index(configuration.stack[-1]) - sent.tokens.index(configuration.stack[-2]))
Extractor.addTransitionHistory(transition, transDic)
if FeatParams.useLexic and len(configuration.buffer) > 0 and len(configuration.stack) >= 1:
Extractor.generateDisconinousFeatures(configuration, sent, transDic)
Extractor.enhanceMerge(transition, transDic)
return transDic
@staticmethod
def enhanceMerge(transition, transDic):
if not FeatParams.enhanceMerge:
return
config = transition.configuration
if transition.type.value != 0 and len(config.buffer) > 0 and len(
config.stack) > 0 and isinstance(config.stack[-1], Token):
if isinstance(config.stack[-1], Token) and Extractor.areInLexic([config.stack[-1], config.buffer[0]]):
transDic['S0B0InLexic'] = True
if len(config.buffer) > 1 and Extractor.areInLexic([config.stack[-1], config.buffer[0], config.buffer[1]]):
transDic['S0B0B1InLexic'] = True
if len(config.buffer) > 2 and Extractor.areInLexic(
[config.stack[-1], config.buffer[0], config.buffer[1], config.buffer[2]]):
transDic['S0B0B1B2InLexic'] = True
if len(config.buffer) > 1 and len(config.stack) > 1 and Extractor.areInLexic(
[config.stack[-2], config.stack[-1], config.buffer[1]]):
transDic['S1S0B1InLexic'] = True
if len(config.buffer) > 0 and len(config.stack) > 1 and Extractor.areInLexic(
[config.stack[-2], config.buffer[0]]) and not Extractor.areInLexic(
[config.stack[-1], config.buffer[0]]):
transDic['S1B0InLexic'] = True
transDic['S0B0tInLexic'] = False
if len(config.buffer) > 1 and Extractor.areInLexic(
[config.stack[-2], config.buffer[1]]) and not Extractor.areInLexic(
[config.stack[-1], config.buffer[1]]):
transDic['S1B1InLexic'] = True
transDic['S0B1InLexic'] = False
@staticmethod
def generateDisconinousFeatures(configuration, sent, transDic):
tokens = Sentence.getTokens([configuration.stack[-1]])
tokenTxt = Sentence.getTokenLemmas(tokens)
for key in Corpus.mweDictionary.keys():
if tokenTxt in key and tokenTxt != key:
bufidx = 0
for bufElem in configuration.buffer[:5]:
if bufElem.lemma != '' and (
(tokenTxt + ' ' + bufElem.lemma) in key or (bufElem.lemma + ' ' + tokenTxt) in key):
transDic['S0B' + str(bufidx) + 'ArePartsOfMWE'] = True
transDic['S0B' + str(bufidx) + 'ArePartsOfMWEDistance'] = sent.tokens.index(
bufElem) - sent.tokens.index(tokens[-1])
bufidx += 1
break
@staticmethod
def generateLinguisticFeatures(token, label, transDic):
if isinstance(token, list):
token = Extractor.concatenateTokens([token])[0]
transDic[label + 'Token'] = token.text
if FeatParams.usePOS and token.posTag is not None and token.posTag.strip() != '':
transDic[label + 'POS'] = token.posTag
if FeatParams.useLemma and token.lemma is not None and token.lemma.strip() != '':
transDic[label + 'Lemma'] = token.lemma
if not FeatParams.useLemma and not FeatParams.usePOS:
transDic[label + '_LastThreeLetters'] = token.text[-3:]
transDic[label + '_LastTwoLetters'] = token.text[-2:]
if FeatParams.useDictionary and ((
token.lemma != '' and token.lemma in Corpus.mweTokenDic.keys()) or token.text in Corpus.mweTokenDic.keys()):
transDic[label + 'IsInLexic'] = 'true'
@staticmethod
def generateSyntaxicFeatures(stack, buffer, dic):
if stack is not None and len(stack) > 0:
stack0 = stack[-1]
if not isinstance(stack0, Token):
return
if int(stack0.dependencyParent) == -1 or int(
stack0.dependencyParent) == 0 or stack0.dependencyLabel.strip() == '' or buffer is None and len(
buffer) <= 0:
return
for bElem in buffer:
if bElem.dependencyParent == stack0.position:
dic['hasRighDep_' + bElem.dependencyLabel] = 'true'
dic[stack0.getLemma() + '_hasRighDep_' + bElem.dependencyLabel] = 'true'
dic[stack0.getLemma() + '_' + bElem.getLemma() + '_hasRighDep_' + bElem.dependencyLabel] = 'true'
if stack0.dependencyParent > stack0.position:
for bElem in buffer:
if bElem.position == stack0.dependencyParent:
dic[stack0.lemma + '_isGouvernedBy_' + bElem.getLemma()] = 'true'
dic[stack0.lemma + '_isGouvernedBy_' + bElem.getLemma() + '_' + stack0.dependencyLabel] = 'true'
break
if len(stack) > 1:
stack1 = stack[-2]
if not isinstance(stack1, Token):
return
if stack0.dependencyParent == stack1.position:
dic['SyntaxicRelation'] = '+' + stack0.dependencyLabel
elif stack0.position == stack1.dependencyParent:
dic['SyntaxicRelation'] = '-' + stack1.dependencyLabel
@staticmethod
def generateTriGram(token0, token1, token2, label, transDic):
tokens = Extractor.concatenateTokens([token0, token1, token2])
Extractor.getFeatureInfo(transDic, label + 'Token', tokens, 'ttt')
Extractor.getFeatureInfo(transDic, label + 'Lemma', tokens, 'lll')
Extractor.getFeatureInfo(transDic, label + 'POS', tokens, 'ppp')
Extractor.getFeatureInfo(transDic, label + 'LemmaPOSPOS', tokens, 'lpp')
Extractor.getFeatureInfo(transDic, label + 'POSLemmaPOS', tokens, 'plp')
Extractor.getFeatureInfo(transDic, label + 'POSPOSLemma', tokens, 'ppl')
Extractor.getFeatureInfo(transDic, label + 'LemmaLemmaPOS', tokens, 'llp')
Extractor.getFeatureInfo(transDic, label + 'LemmaPOSLemma', tokens, 'lpl')
Extractor.getFeatureInfo(transDic, label + 'POSLemmaLemma', tokens, 'pll')
@staticmethod
def generateBiGram(token0, token1, label, transDic):
tokens = Extractor.concatenateTokens([token0, token1])
Extractor.getFeatureInfo(transDic, label + 'Token', tokens, 'tt')
Extractor.getFeatureInfo(transDic, label + 'Lemma', tokens, 'll')
Extractor.getFeatureInfo(transDic, label + 'POS', tokens, 'pp')
Extractor.getFeatureInfo(transDic, label + 'LemmaPOS', tokens, 'lp')
Extractor.getFeatureInfo(transDic, label + 'POSLemma', tokens, 'pl')
@staticmethod
def concatenateTokens(tokens):
idx = 0
tokenDic = {}
result = []
for token in tokens:
if isinstance(token, Token):
result.append(Token(-1, token.text, token.lemma, token.posTag))
elif isinstance(token, list):
tokenDic[idx] = Token(-1, '', '', '')
for subToken in Sentence.getTokens(token):
tokenDic[idx].text += subToken.text + '_'
tokenDic[idx].lemma += subToken.lemma + '_'
tokenDic[idx].posTag += subToken.posTag + '_'
tokenDic[idx].text = tokenDic[idx].text[:-1]
tokenDic[idx].lemma = tokenDic[idx].lemma[:-1]
tokenDic[idx].posTag = tokenDic[idx].posTag[:-1]
result.append(tokenDic[idx])
idx += 1
return result
@staticmethod
def getFeatureInfo(dic, label, tokens, features):
feature = ''
idx = 0
for token in tokens:
if features[idx].lower() == 'l':
if FeatParams.useLemma:
if token.lemma.strip() != '':
feature += token.lemma.strip() + '_'
else:
feature += '*' + '_'
elif features[idx].lower() == 'p':
if FeatParams.usePOS:
if token.posTag.strip() != '':
feature += token.posTag.strip() + '_'
else:
feature += '*' + '_'
elif features[idx].lower() == 't':
if token.text.strip() != '':
feature += token.text.strip() + '_'
idx += 1
if len(feature) > 0:
feature = feature[:-1]
dic[label] = feature
return ''
@staticmethod
def areInLexic(tokensList):
if Sentence.getTokenLemmas(tokensList) in Corpus.mweDictionary.keys():
return True
return False
@staticmethod
def addTransitionHistory(transition, transDic):
if FeatParams.historyLength1:
Extractor.getTransitionHistory(transition, 1, 'TransHistory1', transDic)
if FeatParams.historyLength2:
Extractor.getTransitionHistory(transition, 2, 'TransHistory2', transDic)
if FeatParams.historyLength3:
Extractor.getTransitionHistory(transition, 3, 'TransHistory3', transDic)
@staticmethod
def getTransitionHistory(transition, length, label, transDic):
idx = 0
history = ''
transRef = transition
transition = transition.previous
while transition is not None and idx < length:
if transition.type is not None:
history += str(transition.type.value)
transition = transition.previous
idx += 1
if len(history) == length:
transDic[label] = history
transition = transRef
|
|
"""The tests for the MQTT Template light platform.
Configuration example with all features:
light:
platform: mqtt_template
name: mqtt_template_light_1
state_topic: 'home/rgb1'
command_topic: 'home/rgb1/set'
command_on_template: >
on,{{ brightness|d }},{{ red|d }}-{{ green|d }}-{{ blue|d }}
command_off_template: 'off'
state_template: '{{ value.split(",")[0] }}'
brightness_template: '{{ value.split(",")[1] }}'
color_temp_template: '{{ value.split(",")[2] }}'
white_value_template: '{{ value.split(",")[3] }}'
red_template: '{{ value.split(",")[4].split("-")[0] }}'
green_template: '{{ value.split(",")[4].split("-")[1] }}'
blue_template: '{{ value.split(",")[4].split("-")[2] }}'
If your light doesn't support brightness feature, omit `brightness_template`.
If your light doesn't support color temp feature, omit `color_temp_template`.
If your light doesn't support white value feature, omit `white_value_template`.
If your light doesn't support RGB feature, omit `(red|green|blue)_template`.
"""
import json
from unittest.mock import ANY, patch
from homeassistant.components import light, mqtt
from homeassistant.components.mqtt.discovery import async_start
from homeassistant.const import (
ATTR_ASSUMED_STATE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_setup_component,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_coro,
mock_registry,
)
async def test_setup_fails(hass, mqtt_mock):
"""Test that setup fails with missing required configuration items."""
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{light.DOMAIN: {"platform": "mqtt", "schema": "template", "name": "test"}},
)
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
}
},
)
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_on_template": "on",
}
},
)
assert hass.states.get("light.test") is None
with assert_setup_component(0, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_topic",
"command_off_template": "off",
}
},
)
assert hass.states.get("light.test") is None
async def test_state_change_via_topic(hass, mqtt_mock):
"""Test state change via topic."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "test_light_rgb", "on")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
async def test_state_brightness_color_effect_temp_white_change_via_topic(
hass, mqtt_mock
):
"""Test state, bri, color, effect, color temp, white val change."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,145,123,255-128-64,")
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("rgb_color") == (255, 128, 63)
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 145
assert state.attributes.get("white_value") == 123
assert state.attributes.get("effect") is None
# turn the light off
async_fire_mqtt_message(hass, "test_light_rgb", "off")
state = hass.states.get("light.test")
assert state.state == STATE_OFF
# lower the brightness
async_fire_mqtt_message(hass, "test_light_rgb", "on,100")
light_state = hass.states.get("light.test")
assert light_state.attributes["brightness"] == 100
# change the color temp
async_fire_mqtt_message(hass, "test_light_rgb", "on,,195")
light_state = hass.states.get("light.test")
assert light_state.attributes["color_temp"] == 195
# change the color
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("rgb_color") == (243, 249, 255)
# change the white value
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,134")
light_state = hass.states.get("light.test")
assert light_state.attributes["white_value"] == 134
# change the effect
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,,41-42-43,rainbow")
light_state = hass.states.get("light.test")
assert light_state.attributes.get("effect") == "rainbow"
async def test_optimistic(hass, mqtt_mock):
"""Test optimistic mode."""
fake_state = ha.State(
"light.test",
"on",
{
"brightness": 95,
"hs_color": [100, 100],
"effect": "random",
"color_temp": 100,
"white_value": 50,
},
)
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=mock_coro(fake_state),
):
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ white_value|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }}",
"command_off_template": "off",
"effect_list": ["colorloop", "random"],
"qos": 2,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 95
assert state.attributes.get("hs_color") == (100, 100)
assert state.attributes.get("effect") == "random"
assert state.attributes.get("color_temp") == 100
assert state.attributes.get("white_value") == 50
assert state.attributes.get(ATTR_ASSUMED_STATE)
async def test_flash(hass, mqtt_mock):
"""Test flash."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ flash }}",
"command_off_template": "off",
"qos": 0,
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_transition(hass, mqtt_mock):
"""Test for transition time being sent when included."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
async def test_invalid_values(hass, mqtt_mock):
"""Test that invalid values are ignored."""
with assert_setup_component(1, light.DOMAIN):
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"effect_list": ["rainbow", "colorloop"],
"state_topic": "test_light_rgb",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,"
"{{ brightness|d }},"
"{{ color_temp|d }},"
"{{ red|d }}-"
"{{ green|d }}-"
"{{ blue|d }},"
"{{ effect|d }}",
"command_off_template": "off",
"state_template": '{{ value.split(",")[0] }}',
"brightness_template": '{{ value.split(",")[1] }}',
"color_temp_template": '{{ value.split(",")[2] }}',
"white_value_template": '{{ value.split(",")[3] }}',
"red_template": '{{ value.split(",")[4].' 'split("-")[0] }}',
"green_template": '{{ value.split(",")[4].' 'split("-")[1] }}',
"blue_template": '{{ value.split(",")[4].' 'split("-")[2] }}',
"effect_template": '{{ value.split(",")[5] }}',
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_OFF
assert state.attributes.get("rgb_color") is None
assert state.attributes.get("brightness") is None
assert state.attributes.get("color_temp") is None
assert state.attributes.get("effect") is None
assert state.attributes.get("white_value") is None
assert not state.attributes.get(ATTR_ASSUMED_STATE)
# turn on the light, full white
async_fire_mqtt_message(
hass, "test_light_rgb", "on,255,215,222,255-255-255,rainbow"
)
state = hass.states.get("light.test")
assert state.state == STATE_ON
assert state.attributes.get("brightness") == 255
assert state.attributes.get("color_temp") == 215
assert state.attributes.get("rgb_color") == (255, 255, 255)
assert state.attributes.get("white_value") == 222
assert state.attributes.get("effect") == "rainbow"
# bad state value
async_fire_mqtt_message(hass, "test_light_rgb", "offf")
# state should not have changed
state = hass.states.get("light.test")
assert state.state == STATE_ON
# bad brightness values
async_fire_mqtt_message(hass, "test_light_rgb", "on,off,255-255-255")
# brightness should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("brightness") == 255
# bad color temp values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,off,255-255-255")
# color temp should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("color_temp") == 215
# bad color values
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c")
# color should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("rgb_color") == (255, 255, 255)
# bad white value values
async_fire_mqtt_message(hass, "test_light_rgb", "on,,,off,255-255-255")
# white value should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("white_value") == 222
# bad effect value
async_fire_mqtt_message(hass, "test_light_rgb", "on,255,a-b-c,white")
# effect should not have changed
state = hass.states.get("light.test")
assert state.attributes.get("effect") == "rainbow"
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"availability_topic": "availability-topic",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "online")
state = hass.states.get("light.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "offline")
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test_light_rgb/set",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"availability_topic": "availability-topic",
"payload_available": "good",
"payload_not_available": "nogood",
}
},
)
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "good")
state = hass.states.get("light.test")
assert state.state != STATE_UNAVAILABLE
async_fire_mqtt_message(hass, "availability-topic", "nogood")
state = hass.states.get("light.test")
assert state.state == STATE_UNAVAILABLE
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '{ "val": "100" }')
state = hass.states.get("light.test")
assert state.attributes.get("val") == "100"
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", '[ "list", "of", "things"]')
state = hass.states.get("light.test")
assert state.attributes.get("val") is None
assert "JSON result was not a dictionary" in caplog.text
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: {
"platform": "mqtt",
"schema": "template",
"name": "test",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"json_attributes_topic": "attr-topic",
}
},
)
async_fire_mqtt_message(hass, "attr-topic", "This is not JSON")
state = hass.states.get("light.test")
assert state.attributes.get("val") is None
assert "Erroneous JSON: This is not JSON" in caplog.text
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "schema": "template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off",'
' "json_attributes_topic": "attr-topic1" }'
)
data2 = (
'{ "name": "Beer",'
' "schema": "template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off",'
' "json_attributes_topic": "attr-topic2" }'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "100" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "100"
# Change json_attributes_topic
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
# Verify we are no longer subscribing to the old topic
async_fire_mqtt_message(hass, "attr-topic1", '{ "val": "50" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "100"
# Verify we are subscribing to the new topic
async_fire_mqtt_message(hass, "attr-topic2", '{ "val": "75" }')
state = hass.states.get("light.beer")
assert state.attributes.get("val") == "75"
async def test_unique_id(hass):
"""Test unique id option only creates one light per unique_id."""
await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test_topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(hass, "test-topic", "payload")
assert len(hass.states.async_entity_ids(light.DOMAIN)) == 1
async def test_discovery_removal(hass, mqtt_mock, caplog):
"""Test removal of discovered mqtt_json lights."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {"mqtt": {}}, entry)
data = (
'{ "name": "Beer",'
' "schema": "template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", "")
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
async def test_discovery_deprecated(hass, mqtt_mock, caplog):
"""Test discovery of mqtt template light with deprecated option."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {"mqtt": {}}, entry)
data = (
'{ "name": "Beer",'
' "platform": "mqtt_template",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async def test_discovery_update_light(hass, mqtt_mock, caplog):
"""Test update of discovered light."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = (
'{ "name": "Beer",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
data2 = (
'{ "name": "Milk",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("light.milk")
assert state is None
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(hass, "homeassistant", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = (
'{ "name": "Milk",'
' "schema": "template",'
' "state_topic": "test_topic",'
' "command_topic": "test_topic",'
' "command_on_template": "on",'
' "command_off_template": "off"}'
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data1)
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data2)
await hass.async_block_till_done()
state = hass.states.get("light.milk")
assert state is not None
assert state.name == "Milk"
state = hass.states.get("light.beer")
assert state is None
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT light device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "Test 1",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_hass(hass)
await async_start(hass, "homeassistant", {}, entry)
registry = await hass.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "Test 1",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "test-command-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/light/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
async def test_entity_id_update(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(hass, {})
mock_mqtt = await async_mock_mqtt_component(hass)
assert await async_setup_component(
hass,
light.DOMAIN,
{
light.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"schema": "template",
"state_topic": "test-topic",
"command_topic": "command-topic",
"command_on_template": "on,{{ transition }}",
"command_off_template": "off,{{ transition|d }}",
"availability_topic": "avty-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = hass.states.get("light.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("light.beer", new_entity_id="light.milk")
await hass.async_block_till_done()
state = hass.states.get("light.beer")
assert state is None
state = hass.states.get("light.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 2
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, "utf-8")
mock_mqtt.async_subscribe.assert_any_call("avty-topic", ANY, 0, "utf-8")
|
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simian network backoff detection module."""
import logging
import platform
import re
import socket
import urlparse
import requests
from simian.mac.client import flight_common
LINUX = 'Linux'
DARWIN = 'Darwin'
PLATFORM = platform.system()
ROUTE = {LINUX: ['/sbin/ip', 'route'], DARWIN: ['/usr/sbin/netstat', '-nr']}
ARP = {LINUX: '/usr/sbin/arp', DARWIN: '/usr/sbin/arp'}
HOST = '/usr/bin/host'
IFCONFIG = '/sbin/ifconfig'
IOS_WAP_DEFAULT_GATEWAY_IP = '172.20.10.1'
IOS_WAP_NETWORK_GATEWAY_SUBNET = '172.20.10/28'
INTERFACE_ANDROID_WAP = 'android_wap'
INTERFACE_WWAN = 'wwan'
INTERFACE_VPN = 'vpn'
BACKOFF_WLANS = frozenset([
'Fly-Fi',
'gogoinflight',
'Telekom_FlyNet',
'United_WiFi',
'United_Wi-Fi',
])
def _GetPlatform():
"""Returns a str like constants LINUX or DARWIN."""
platform_str = platform.system()
assert platform_str in [LINUX, DARWIN]
return platform_str
def GetAllInterfaceNames():
"""Get network interfaces info for this host.
Note that this list may include all types of interfaces
that are not normally interesting to this script, e.g. fw0.
Returns:
list, e.g. ['en0', 'en1', 'fw0', 'eth0']
"""
this_platform = _GetPlatform()
# Note slight difference in regex.
# BSD ifconfig writes "interface_name:\s+"
# while Linux writes "interface_name\s+"
if this_platform == LINUX:
intf_header = re.compile(r'^([a-z]+(?:[0-9]+)?)\s+')
elif this_platform == DARWIN:
intf_header = re.compile(r'^([a-z]+(?:[0-9]+)?):\s+')
return_code, stdout, stderr = flight_common.Exec('/sbin/ifconfig')
if return_code != 0 or stderr:
return []
interfaces = []
if stdout:
for l in stdout.splitlines(): # pylint: disable=maybe-no-member
m = intf_header.search(str(l))
if m:
interfaces.append(m.group(1))
return interfaces
def GetInterfaceNames(interface_type):
"""Get the network interface names for an interface type.
Args:
interface_type: str, like INTERFACE_* constant
Returns:
list of str, like ['ppp0'] or ['en0', 'en1']
Raises:
ValueError: if interface_type is unknown
PlatformError: if platform is not implemented
"""
this_platform = _GetPlatform()
all_interfaces = GetAllInterfaceNames()
if interface_type == INTERFACE_WWAN:
return [x for x in all_interfaces if x.startswith('ppp')
or x.startswith('bnep')]
elif interface_type == INTERFACE_ANDROID_WAP:
if this_platform == DARWIN:
return [x for x in all_interfaces if x.startswith('en')]
elif this_platform == LINUX:
return [x for x in all_interfaces if x.startswith('wlan')]
elif interface_type == INTERFACE_VPN:
if this_platform in [DARWIN, LINUX]:
return [x for x in all_interfaces if x.endswith('tun0')]
else:
raise ValueError('Unknown Platform: %s' % this_platform)
else:
raise ValueError(interface_type)
def GetNetworkGateway(network):
"""Get the gateway for a network.
Uses "netstat -nr" on Darwin and "ip route" on Linux to read the routing
table.
It searches for a route with destination exactly matching the network
parameter!
Args:
network: str, likely in CIDR format or default gateway,
e.g. "1.2.3/24" or "0.0.0.0"
Returns:
a string like "1.2.3.4" or "link#1" or "01:02:03:04:05:06" or
"dev wlan0", depending on the type of route and platform.
"""
route = ROUTE.get(_GetPlatform(), None)
logging.debug('Route: %s', str(route))
if not route:
return
try:
return_code, stdout, stderr = flight_common.Exec(route)
except OSError:
return_code = None
if return_code != 0 or stderr or not stdout:
return
gateway_pattern = (
r'^%s\s+(via[\s\t])?'
r'([\d\.]+|[0-9a-f:]+|link#\d+|dev [a-z\d]+)[\s\t]+' % network)
gateway = re.search(gateway_pattern, str(stdout), re.MULTILINE)
if gateway:
return gateway.group(2)
return
def GetDefaultGateway():
"""Gets the default gateway.
Returns:
a string like "192.168.0.1" or None if default gateway is unknown.
"""
if _GetPlatform() in [DARWIN, LINUX]:
default = 'default'
else:
logging.error('Unknown platform %s', _GetPlatform())
return GetNetworkGateway(default)
def GetHttpResource(host, path='/', port=80, redir=False):
"""Gets HTTP resource.
Args:
host: str, like "example.com", but not "http://example.com".
path: optional, str, like "/path", default "/".
port: optional, int, default 80.
redir: optional, bool, whether to follow redirects.
Returns:
(int response code, str response body)
(int -1, str error from http exception)
"""
if port != 80:
port_str = ':%d' % port
else:
port_str = ''
url = 'http://%s%s' % (host, port_str)
url = urlparse.urljoin(url, path)
try:
response = requests.get(url, allow_redirects=redir)
code = response.status_code
body = response.text
return code, body
except requests.RequestException as e:
return -1, str(e)
def IsOnWwan():
""""Checks WWAN device connection status.
Note: this may produce false-positives, and may not catch all WWAN
devices. Several Sprint and Verizon devices were tested, all of which
create ppp0 upon connection. However, L2TP VPN also creates ppp0
(Google no longer uses this as of Q2-2010 in favor of SSLVPN). A
stronger check is probably needed at some point.
As of 2011-12-6 OpenVPN interface is tun0 on Linux and Darwin.
Returns:
Boolean. True if WWAN device is active, False otherwise.
"""
wwan_ifaces = GetInterfaceNames(INTERFACE_WWAN)
for wwan_iface in wwan_ifaces:
try:
return_code, unused_out, unused_err = flight_common.Exec(
[IFCONFIG, wwan_iface])
except OSError:
return_code = None
# ifconfig exits with 1 if interface doesn't exist.
if return_code == 0:
return True
return False
def GetNetworkName():
"""Return network name (SSID for WLANs) a device is connected to.
Returns:
name of the matching network name if possible, None otherwise.
"""
this_platform = _GetPlatform()
if this_platform == LINUX:
cmdline = '/usr/bin/nmcli -t -f NAME,DEVICES conn status'
# Ignore "Auto " prefix on automatically connecting networks.
ssid_re = re.compile(r'^(Auto )?([^:]*):.*$')
try:
return_code, out, _ = flight_common.Exec(cmdline)
except OSError:
logging.exception('Error executing nmcli')
return
if out and not return_code:
for l in out.splitlines():
res = ssid_re.match(l)
if res:
return res.groups()[1]
elif this_platform == DARWIN:
cmdline = (
'/System/Library/PrivateFrameworks/Apple80211.framework/Versions/'
'Current/Resources/airport -I | '
'awk \'/ SSID/ {print substr($0, index($0, $2))}\'')
try:
return_code, out, _ = flight_common.Exec(cmdline)
except OSError:
logging.exception('Error executing airport')
return
if out and not return_code:
return out.strip() or None
def IsOnBackoffWLAN():
"""Returns True if on a Backoff WLAN, such as gogoinflight WiFi."""
return GetNetworkName() in BACKOFF_WLANS
def IsOnAndroidWap():
"""Checks if Android WiFi or Bluetooth tethering is connected.
Returns:
Boolean. True if Android tethering is connected, False otherwise.
"""
# ifconfig output looks a little bit different on Darwin vs Linux.
#
# Darwin:
# inet 169.254.135.20 netmask 0xffff0000 broadcast 169.254.255.255
# Linux:
# inet addr:172.26.113.45 Bcast:172.26.115.255 Mask:255.255.252.0
android_wap_match_regex = re.compile(
r'inet[\w\s]*[\s:]+192\.168\.(42|43|44)\.\d{1,3}\s+'
r'.*(?:netmask\s+0xffffff00\s+|Mask:255\.255\.255\.0)')
ifaces = GetInterfaceNames(INTERFACE_ANDROID_WAP)
for wifi_iface in ifaces:
# Android tethering uses very specific subnets*, as well as dnsmasq which
# reveals itself via the TXT VERSION.BIND record.
# * 192.168.42.0/24 for wired, 192.168.43.0/24 for WiFi, and
# 192.168.44.0/24 for Bluetooth.
try:
return_code, stdout, stderr = flight_common.Exec([IFCONFIG, wifi_iface])
except OSError:
return_code = None
if return_code != 0 or stderr: # interface was likely not found.
continue
android_wap_match = android_wap_match_regex.search(stdout)
# Look for an interface on 192.168.4[2-4].0/24.
if android_wap_match is not None:
# If the default gateway is not through a likely Android WAN interface,
# tethering may be active but is not likely to be used.
default_gateway = GetDefaultGateway()
logging.debug('Default gateway: %s', str(default_gateway))
default_gateway_prefix = '192.168.%s.' % android_wap_match.group(1)
if not default_gateway.startswith(default_gateway_prefix):
return False
# IP, netmask, gateway look like Android WAP, so check dnsmasq.
# Request needs to be explicitly top level, as Linux uses
# ndots:2 which would turn VERSION.BIND (without trailing dot) into
# VERSION.BIND.foo.example.com in some cases.
cmd = [HOST, '-W', '5', '-c', 'CHAOS', '-t', 'txt', 'VERSION.BIND.',
default_gateway]
try:
return_code, stdout, unused_err = flight_common.Exec(cmd)
except OSError:
return_code = None
if return_code != 0:
continue
dnsmasq_match = re.search(
r'VERSION\.BIND descriptive text "dnsmasq-.*"', stdout)
if dnsmasq_match is not None:
# IP, netmask and dnsmasq all match Android WAP tethering.
return True
return False
def IsOnIosWap():
"""Checks if the wireless connection is to an iOS WAP tether.
Returns:
Boolean. True if iOS WAP is connected, False otherwise.
"""
# iOS WAP looks like a 172.20.10/28 network. Gateway is
# 172.20.10.1 with TCP port 62078 open.
gateway = GetNetworkGateway(IOS_WAP_NETWORK_GATEWAY_SUBNET)
if not gateway:
return False
ip = GetDefaultGateway()
if not ip:
return False
if ip != IOS_WAP_DEFAULT_GATEWAY_IP:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ip, 62078))
if result == 0:
return True
return False
def IsOnMifi():
"""Checks if the wireless connection is to a MiFi-like device.
These devices are available from Verizon, Sprint, others, and usually
offer some kind of web access portal that says MiFi or Jetpack as a text
string.
Returns:
Bool, True if the connection is a likely MiFi-like device, False if not.
"""
ip = GetDefaultGateway()
if not ip:
return False
if ip.startswith('192.168.1.'): # Verizon and Sprint devices
http_status, body = GetHttpResource(ip, redir=True)
# MiFi-like devices usually run a http interface. It returns a long http
# response with various easily found "MiFi" or "Jetpack" strings in it
# when loaded. No http auth challenge is issued.
if http_status == 200 and body and ('MiFi' in body or 'Jetpack' in body):
return True
elif ip == '192.168.8.1': # common Huawei gateway
http_status, _ = GetHttpResource(ip, redir=False)
return http_status == 307
return False
|
|
import argparse
import json
import requests
import urlparse
def step(instructions, func=None, wait=True):
val = None
for line in instructions.split('\n'):
print line.strip()
if func:
val = func()
if wait:
raw_input('Press enter when you are done.')
return val
def setup_twitter():
step(
'''
Step 1
------
Create a Twitter app at https://dev.twitter.com/apps/new.
'''
)
step(
'''
Step 2
------
Go to the Settings tab and change access under Application Type
to Read and Write.
'''
)
step(
'''
Step 3
------
Go back to the Details tab and click on the Create my Access Token
button.
'''
)
step(
'''
Done!
-----
Your application's details tab now has all the required keys:
- consumer key
- consumer secret
- access token
- access token secret
''',
wait=False
)
def setup_facebook():
def get_app_info():
return (
raw_input('Enter your app id: '),
raw_input('Enter your app secret: ')
)
def get_code():
url = raw_input(
'Enter the URL you were redirected to after granting the '
'permissions: '
)
url = urlparse.urlparse(url)
query = urlparse.parse_qs(url.query)
return query['code'][0]
def exchange_code_for_token():
response = requests.get(
'https://graph.facebook.com/oauth/access_token'
'?client_id=%s'
'&redirect_uri=http://localhost/'
'&client_secret=%s'
'&code=%s' % (
app_id,
app_secret,
code
)
)
if response.status_code != 200:
print response.content
raise Exception(response.content)
data = urlparse.parse_qs(response.content)
return data['access_token'][0]
def obtain_long_lived_token():
response = requests.get(
'https://graph.facebook.com/oauth/access_token'
'?client_id=%s'
'&client_secret=%s'
'&grant_type=fb_exchange_token'
'&fb_exchange_token=%s' % (
app_id,
app_secret,
access_token
)
)
if response.status_code != 200:
print response.content
raise Exception(response.content)
data = urlparse.parse_qs(response.content)
return data['access_token'][0]
def fetch_page_access_token():
response = requests.get(
'https://graph.facebook.com/me/accounts?access_token=%s'
% access_token
)
if response.status_code != 200:
print response.content
raise Exception(response.content)
data = json.loads(response.content)
for page in data['data']:
if page['id'] == page_id:
return page['access_token']
step(
'''
Step 1
------
Create a Facebook app at https://developers.facebook.com/apps.
Make the app domain http://localhost/.
Select Website with Facebook Login.
Enter http://localhost/ as the site URL.
'''
)
step(
'''
Step 2
------
Create a Facebook Page at https://www.facebook.com/pages/create.php.
'''
)
app_id, app_secret = step(
'''
Step 3
------
''',
func=get_app_info,
wait=False
)
url = (
'https://www.facebook.com/dialog/oauth'
'?client_id=%s'
'&redirect_uri=http://localhost/'
'&scope=manage_pages,publish_stream'
'&state=ieo4wft' % app_id
)
step(
'''
Step 4
------
Go to %s
''' % url
)
code = step(
'''
Step 5
------
''',
func=get_code,
wait=False
)
access_token = step(
'''
Step 6
------
Exchanging code for access token...
''',
func=exchange_code_for_token,
wait=False
)
access_token = step(
'''
Step 7
------
Obtaining long-lived access token...
''',
func=obtain_long_lived_token,
wait=False
)
page_id = step(
'''
Step 8
------
''',
func=lambda: raw_input('Enter page id: '),
wait=False
)
page_access_token = step(
'''
Step 9
------
Fetching page access token...
''',
func=fetch_page_access_token,
wait=False
)
step(
'''
Done!
-----
Here is the information you need:
- page id: %s
- page access token: %s
'''
% (
page_id,
page_access_token
),
wait=False
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Get required keys for python-sharer'
)
parser.add_argument(
'sharer',
choices=[
'twitter',
'facebook',
],
help='The service you want to get keys for.'
)
args = parser.parse_args()
funcs = {
'twitter': setup_twitter,
'facebook': setup_facebook,
}
funcs[args.sharer]()
|
|
"""
Astra-Viso star map module.
"""
from __future__ import division
import pickle
import numpy as np
import pkg_resources as pkg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class StarMap:
"""
Star map class.
"""
def __init__(self, preset=None):
"""
Initialize new star catalog object. Option to load one of several
pre-defined catalogs. Options are:
"singlecenter" -- A single bright star aligned with the z-axis.
"sixfaces" -- Six bright stars oriented along each positive and
negative axis.
"random" -- A randomly generated catalog with a user-defined
number of stars.
"hipparcos" -- The Hipparcos star catalog. 117,955 total stars [1].
"tycho" -- The Tycho-2 star catalog. 1,055,115 total stars [2].
Parameters
----------
preset : str, optional
Name of preset star catalog to load.
Returns
-------
starmap : StarMap
Initialized star catalog object.
"""
# Stars
self.catalog = None
self.magnitude = None
self.size = 0
# Load catalog
if preset:
self.load_preset(preset)
else:
self.load_preset("singlecenter")
def load_preset(self, preset, *arg):
"""
Load a preset star catalog. Current available options are:
"singlecenter" -- A single bright star aligned with the z-axis.
"sixfaces" -- Six bright stars oriented along each positive and
negative axis.
"random" -- A randomly generated catalog with a user-defined
number of stars.
"hipparcos" -- The Hipparcos star catalog. 117,955 total stars [1].
"tycho" -- The Tycho-2 star catalog. 1,055,115 total stars [2].
Parameters
----------
preset : str
Desired preset option.
star_count : int, optional
Number of stars desired from the "random" preset. Required for the
"random" preset.
Returns
-------
None
Notes
-----
[1] Perryman, Michael AC, et al. "The HIPPARCOS catalogue." Astronomy
and Astrophysics 323 (1997).
[2] Hog, Erik, et al. "The Tycho-2 catalogue of the 2.5 million
brightest stars." Astronomy and Astrophysics 355 (2000): L27-L30.
Examples
--------
>>> catalog = StarMap("tycho")
>>> catalog.size
1055115
>>> catalog.load_preset("hipparcos")
>>> catalog.size
117955
"""
# Single star on boresight
if preset.lower() == "singlecenter":
self.catalog = np.array([[0, 0, 1]])
self.magnitude = np.array([-1])
# Six stars, one on each axis
elif preset.lower() == "sixfaces":
self.catalog = np.array([[0, 0, 1], [0, 0, -1], [0, 1, 0], [0, -1, 0], [1, 0, 0], \
[-1, 0, 0]])
self.magnitude = np.array([12, 8, 4, 0, -4, -8])
# Generate a random catalog
elif preset[0:6].lower() == "random":
# Pre-allocate catalog
self.catalog = np.zeros((arg[0], 3))
self.magnitude = 8 + 2*np.random.randn(arg[0])
# Generate random unit vectors
for i in range(len(self.catalog)):
theta = np.arccos(1 - 2 * np.random.rand())
phi = 2 * np.pi * np.random.rand()
self.catalog[i] = [np.sin(theta) * np.cos(phi),
np.sin(theta) * np.sin(phi),
np.cos(theta)]
# Handle any other option
else:
# Open pickle file, if it exists
try:
# Load file
filename = pkg.resource_filename("astraviso", "catalogs/" + preset.lower() + ".dat")
infile = open(filename, 'rb')
catalog_file = pickle.load(infile)
infile.close()
# Set catalog
self.catalog = catalog_file["catalog"]
self.magnitude = catalog_file["magnitude"]
except:
print("Failed to open catalog: %s" % preset)
# Set size variable
self.size = len(self.catalog)
def get_all(self):
"""
Export all catalog elements to a dict.
Parameters
----------
None
Returns
-------
map : dict
Dictionary containing the star catalog in the form of an Nx3 array
of unit vectors (key:"catalog") and an array of corresponding
visible magnitudes (key:"magnitude").
Examples
--------
>>> catalog = StarMap("singlecenter")
>>> map = catalog.get_all()
>>> map
{'catalog': array([[0, 0, 1]]), 'magnitude': array([-1])}
"""
return {"catalog" : self.catalog,
"magnitude" : self.magnitude}
def get_region(self, vector, angle):
"""
Extract catalog elements falling within a given angle of a specified
unit vector.
Parameters
----------
vector : ndarray
Three-element array containing a desired unit vector direction.
angle : float
Angle about the designated unit vector to accept stars. Measured in
degrees.
Returns
-------
map : dict
Dictionary containing the star catalog region in the form of an Nx3
array of unit vectors (key:"catalog") and an array of corresponding
visible magnitudes (key:"magnitude").
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> map = catalog.get_region(np.array([0, 0, 1]), 0.001)
>>> map
{'catalog': array([[ -1.68386803e-06, 4.35351891e-06, 1.00000000e+00],
[ -1.83452395e-06, -3.16303724e-06, 1.00000000e+00],
[ 1.51683717e-05, 4.10971724e-06, 1.00000000e+00]]),
'magnitude': array([ 9.03, 9.02, 8.69])}
"""
# Enforce normalization of input vector
if np.linalg.norm(vector) == 0:
raise ValueError("Central vector must be non-zero.")
vector = vector / np.linalg.norm(vector)
# Extract region
infield = [i for i in range(self.size) if \
np.arccos(np.dot(vector, self.catalog[i, :])) <= np.deg2rad(angle)]
# Return result
return {"catalog" : self.catalog[infield],
"magnitude" : self.magnitude[infield]}
def downselect(self, func, mode):
"""
Downselect current catalog according to a boolean-valued input function.
Culls the internal catalog.
Parameters
----------
func : function
Boolean-valued selection function. Must accept two inputs. See notes
for more information on the required input format.
mode : str
Target values for downselect operation. Options are "magnitude" or
"catalog".
Returns
-------
None
Notes
-----
For the "magnitude" mode option, the input function must be of the form:
bool = f(magnitude, index)
where the magnitude value is a scalar float and the index is a scalar
int. The index value corresponds to the index of the current element.
For the "catalog" mode option, the input function must be of the form:
bool = f(vector, index)
where the vector value is a 3-element array and the index is a scalar
int. The index value corresponds to the index of the current element.
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> catalog.size
117955
>>> select_fcn = lambda mag, idx: mag < 6 & idx < 100000
>>> catalog.downselect(select_fcn, "magnitude")
>>> catalog.size
1413
"""
# Check function input arguments
if func.__code__.co_argcount == 1:
fcn = lambda val, idx: func(val)
elif func.__code__.co_argcount == 2:
fcn = func
else:
print("Improper number of input arguments!")
return
# Downselect based on star magnitudes
if mode.lower() == "magnitude":
selected = [idx for idx in range(self.size) if fcn(self.magnitude[idx], idx)]
# Downselect based on star unit vectors
elif mode.lower() == "catalog":
selected = [idx for idx in range(self.size) if fcn(self.catalog[idx], idx)]
# Unsupported option
else:
print("Unsupported option: %s" % mode)
# Finalize downselect
self.catalog = self.catalog[selected]
self.magnitude = self.magnitude[selected]
self.size = len(self.catalog)
def downsample(self, factor, mode="random"):
"""
Downsample current catalog.
Parameters
----------
factor : float
Factor to downsample by. Resulting catalog length will be
approximately 1/factor.
mode : str, optional
Downsampling mode. Options are "random" or "interval". Default is
"random".
Returns
-------
None
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> catalog.size
117955
>>> catalog.downsample(10, mode="interval")
>>> catalog.size
11796
"""
# Check input
if factor <= 0:
return
# Downsample randomly
if mode.lower() == "random":
self.downselect(lambda x, idx: np.random.rand() <= 1/factor, "magnitude")
# Sample at interval
elif mode.lower() == "interval":
self.downselect(lambda x, idx: np.isclose(idx % factor, 0), "magnitude")
# Handle invalid mode
else:
raise ValueError("Invalid mode type. Options are: 'random' and 'interval'.")
def select_brighter(self, limit):
"""
Select only stars brighter than a given magnitude. Culls the internal
catalog only.
Parameters
----------
limit : float
Visible magnitude limit. Stars with magnitude values less than this
limit will be selected.
Returns
-------
None
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> catalog.size
117955
>>> catalog.select_brighter(8)
>>> catalog.size
41057
"""
self.downselect(lambda x: x < limit, "magnitude")
def select_dimmer(self, limit):
"""
Select only stars dimmer than a given magnitude. Culls the internal
catalog only.
Parameters
----------
limit : float
Visible magnitude limit. Stars with magnitude values greater than
this limit will be selected.
Returns
-------
None
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> catalog.size
117955
>>> catalog.select_dimmer(8)
>>> catalog.size
76561
"""
self.downselect(lambda x: x > limit, "magnitude")
def select_range(self, brightest, dimmest):
"""
Select only stars within a range of magnitudes. Culls the internal
catalog only.
Parameters
----------
brightest : float
Upper limit on star brightness. Stars with magnitude values greater
than this limit will be selected.
dimmest : float
Lower limit on star brightness. Stars with magnitude values less
than this limit will be selected.
Returns
-------
None
Examples
--------
>>> catalog = StarMap("hipparcos")
>>> catalog.size
117955
>>> catalog.select_dimmer(8)
>>> catalog.size
41393
"""
self.downselect(lambda x: x >= brightest and x <= dimmest, "magnitude")
def viewfield(self):
"""
Create 3D plot of the entire catalog.
Parameters
----------
None
Returns
-------
None
"""
# Plot data
fig = plt.figure()
axis = Axes3D(fig)
axis.scatter(self.catalog[:, 0], self.catalog[:, 1], self.catalog[:, 2], marker=".", \
color="black", s=3)
# Show plot
axis.set_xlim([-1, 1])
axis.set_ylim([-1, 1])
axis.set_zlim([-1, 1])
plt.show()
def viewregion(self, vector, angle):
"""
Create 3D plot of a region of the catalog.
Parameters
----------
vector : ndarray
Three-element array containing a desired unit vector direction.
angle : float
Angle about the designated unit vector to accept stars. Measured in
degrees.
Returns
-------
None
"""
# Select region
region = self.getregion(vector, angle)
# Plot data
fig = plt.figure()
axis = Axes3D(fig)
axis.scatter(region["catalog"][:, 0], region["catalog"][:, 1], region["catalog"][:, 2], \
marker=".", color="black", s=2)
# Plot input vector
axis.quiver(0, 0, 0, vector[0], vector[1], vector[2], color="red", linewidth=1.5)
# Show plot
axis.set_xlim([-1, 1])
axis.set_ylim([-1, 1])
axis.set_zlim([-1, 1])
plt.show()
|
|
"""The unit test for runtime.ast"""
import unittest
from runtime import ast, env, lib
NULL_LITERAL = ast.Literal(env.Value(env.NULL))
INT_LITERAL = ast.Literal(env.Value(lib.INTEGER, 0))
TRUE_LITERAL = ast.Literal(env.Value(lib.BOOLEAN, True))
FALSE_LITERAL = ast.Literal(env.Value(lib.BOOLEAN, False))
STRING_LITERAL = ast.Literal(
env.Value(lib.STRING, "Hallo Welt!", "identifier"))
class SumNode(ast.Node):
"""A sum node."""
name = "sum"
def __init__(self):
super().__init__()
def eval(self, context):
"""Sums up the value of its children."""
value = 0
for child in self.children:
value += child.eval(context).data
return env.Value(lib.INTEGER, value)
class AccessNode(ast.Node):
"""A access node."""
def __init__(self):
super().__init__()
@classmethod
def eval(cls, context):
"""Stores a string in the current namespace."""
context.store(STRING_LITERAL.value)
return STRING_LITERAL.eval(context)
class TestAst(unittest.TestCase):
"""The abstract syntax tree test cases."""
def test_sequence_node(self):
"""Test the sequence node."""
context = env.empty_context()
return_node = ast.Return()
return_node.children = [TRUE_LITERAL]
# empty sequence
empty_seq = ast.Sequence()
self.assertEqual(empty_seq.eval(context), NULL_LITERAL.value)
# non-empty sequence
non_seq = ast.Sequence()
non_seq.children = [NULL_LITERAL, TRUE_LITERAL]
self.assertEqual(non_seq.eval(context), TRUE_LITERAL.value)
non_seq.children = [TRUE_LITERAL, NULL_LITERAL]
self.assertEqual(non_seq.eval(context), NULL_LITERAL.value)
# sequence with return
ret_seq = ast.Sequence()
ret_seq.children = [return_node, NULL_LITERAL]
self.assertEqual(ret_seq.eval(context), TRUE_LITERAL.value)
# self.assertEqual(ret_seq.__str__(), "<Node (sequence)>")
def test_conditional_node(self):
"""Test the conditional node."""
context = env.empty_context()
# Test bad conditional error
bad_conditional = ast.Conditional()
bad_conditional.add(NULL_LITERAL) # if None:
bad_conditional.add(NULL_LITERAL) # then None
self.assertRaises(Exception, bad_conditional.eval, context)
# Test correct result
good_conditional = ast.Conditional()
good_conditional.add(TRUE_LITERAL)
good_conditional.add(NULL_LITERAL)
self.assertEqual(good_conditional.eval(context), NULL_LITERAL.value)
# self.assertEqual(good_conditional.__str__(), "<Node (conditional)>")
def test_branch_node(self):
"""Test the branch node."""
context = env.empty_context()
# always evaluates
true_cond = ast.Conditional()
true_cond.children = [TRUE_LITERAL, STRING_LITERAL]
false_cond = ast.Conditional()
false_cond.children = [FALSE_LITERAL, NULL_LITERAL]
# Test if branch
if_branch = ast.Branch()
if_branch.children = [true_cond, NULL_LITERAL]
self.assertEqual(if_branch.eval(context), STRING_LITERAL.value)
# Test if-else branch
ifelse_branch = ast.Branch()
ifelse_branch.children = [false_cond, STRING_LITERAL]
self.assertEqual(ifelse_branch.eval(context), STRING_LITERAL.value)
# Test if-elif-else branch
ifelifelse_branch = ast.Branch()
ifelifelse_branch.children = [false_cond, true_cond, NULL_LITERAL]
self.assertEqual(ifelifelse_branch.eval(context), STRING_LITERAL.value)
# self.assertEqual(if_branch.__str__(), "<Node (branch)>")
def test_loop_node(self):
"""Test the loop node."""
break_node = ast.Break()
return_node = ast.Return()
return_node.children = [TRUE_LITERAL]
context = env.empty_context()
# check for exception
bad_loop = ast.Loop()
bad_loop.children = [NULL_LITERAL, TRUE_LITERAL]
self.assertRaises(Exception, bad_loop.eval, context)
# check for break
break_loop = ast.Loop()
break_loop.children = [TRUE_LITERAL, break_node]
self.assertEqual(break_loop.eval(context), NULL_LITERAL.value)
self.assertEqual(context.behaviour, ast.DEFAULT_BEHAVIOUR)
# check for return
return_loop = ast.Loop()
return_loop.children = [TRUE_LITERAL, return_node]
self.assertEqual(return_loop.eval(context), TRUE_LITERAL.value)
self.assertEqual(context.behaviour, ast.RETURN_BEHAVIOUR)
# self.assertEqual(return_loop.__str__(), "<Node (loop)>")
def test_return_node(self):
"""Test the return node."""
# test empty return node
context = env.empty_context()
empty_return = ast.Return()
self.assertEqual(empty_return.eval(context), NULL_LITERAL.value)
self.assertEqual(context.behaviour, ast.RETURN_BEHAVIOUR)
# test return with value
context = env.empty_context()
value_return = ast.Return()
value_return.add(TRUE_LITERAL)
self.assertEqual(value_return.eval(context), TRUE_LITERAL.value)
self.assertEqual(context.behaviour, ast.RETURN_BEHAVIOUR)
#self.assertEqual(value_return.__str__(), "<Node (return)>")
def test_break_node(self):
"""Test the break node."""
context = env.empty_context()
break_node = ast.Break()
self.assertEqual(break_node.eval(context), NULL_LITERAL.value)
self.assertEqual(context.behaviour, ast.BREAK_BEHAVIOUR)
#self.assertEqual(break_node.__str__(), "<Node (break)>")
def test_continue_node(self):
"""Test the continue node."""
context = env.empty_context()
continue_node = ast.Continue()
self.assertEqual(continue_node.eval(context), NULL_LITERAL.value)
self.assertEqual(context.behaviour, ast.CONTINUE_BEHAVIOUR)
#self.assertEqual(continue_node.__str__(), "<Node (continue)>")
def test_call_node(self):
"""Test the function node."""
# Create sample namespace
sum_function = SumNode()
sgn1 = env.Value(lib.INTEGER, None, "a")
sgn2 = env.Value(lib.INTEGER, None, "b")
sum_function.children = [
ast.Identifier("a"),
ast.Identifier("b"),
]
context = env.empty_context()
func = env.Function([
env.Signature([sgn1, sgn2], sum_function),
], "my_func")
context.store(func)
arg1 = SumNode()
arg1.children = [
ast.Literal(env.Value(lib.INTEGER, 1)),
ast.Literal(env.Value(lib.INTEGER, 2)),
]
arg2 = SumNode()
arg2.children = [
ast.Literal(env.Value(lib.INTEGER, 3)),
ast.Literal(env.Value(lib.INTEGER, 4)),
]
call_node = ast.Call("my_func")
call_node.children = [arg1, arg2]
self.assertEqual(call_node.eval(context), env.Value(lib.INTEGER, 10))
bad_node = ast.Call("missing")
self.assertRaises(Exception, bad_node.eval, context)
def test_operation_node(self):
"""Test the operation node."""
# Works completely like the call node
# Create sample namespace
sum_function = SumNode()
sgn1 = env.Value(lib.INTEGER, None, "a")
sgn2 = env.Value(lib.INTEGER, None, "b")
sum_function.children = [
ast.Identifier("a"),
ast.Identifier("b"),
]
context = env.empty_context()
func = env.Function([
env.Signature([sgn1, sgn2], sum_function),
])
operator = env.Operator(func, "+")
context.store(operator)
arg1 = SumNode()
arg1.children = [
ast.Literal(env.Value(lib.INTEGER, 1)),
ast.Literal(env.Value(lib.INTEGER, 2)),
]
arg2 = SumNode()
arg2.children = [
ast.Literal(env.Value(lib.INTEGER, 3)),
ast.Literal(env.Value(lib.INTEGER, 4)),
]
call_node = ast.Operation("+")
call_node.children = [arg1, arg2]
self.assertEqual(call_node.eval(context), env.Value(lib.INTEGER, 10))
bad_node = ast.Operation("?")
self.assertRaises(Exception, bad_node.eval, context)
def test_cast_node(self):
"""Test the cast node."""
context = env.empty_context()
context.store(lib.INTEGER)
cast_node = ast.Cast(lib.INTEGER.name)
cast_node.children = [NULL_LITERAL]
self.assertEqual(cast_node.eval(context), INT_LITERAL.value)
bad_node = ast.Cast("missing")
self.assertRaises(Exception, bad_node.eval, context)
def test_identifier_node(self):
"""Test the identifier node."""
context = env.empty_context()
# Search in local ns
context.store(STRING_LITERAL.value)
ident_node = ast.Identifier(STRING_LITERAL.value.name)
self.assertEqual(ident_node.eval(context), STRING_LITERAL.value)
# Search in parent ns
context.substitute()
self.assertEqual(ident_node.eval(context), STRING_LITERAL.value)
# Identifier does not exist
bad_node = ast.Identifier("missing")
self.assertRaises(Exception, bad_node.eval, context)
def test_literal_node(self):
"""Test the literal node."""
context = env.empty_context()
self.assertEqual(NULL_LITERAL.eval(context), NULL_LITERAL.value)
self.assertEqual(STRING_LITERAL.eval(context), STRING_LITERAL.value)
self.assertEqual(TRUE_LITERAL.eval(context), TRUE_LITERAL.value)
self.assertEqual(FALSE_LITERAL.eval(context), FALSE_LITERAL.value)
def test_declaration_node(self):
"""Test the declaration node."""
context = env.empty_context()
context.store(env.NULL)
decl_node = ast.Declaration("val", "null")
self.assertEqual(decl_node.eval(context), NULL_LITERAL.value)
self.assertEqual(context.find("id", "val"), NULL_LITERAL.value)
self.assertRaises(env.RuntimeException, decl_node.eval, context)
def test_assignment_node(self):
"""Test the assignment node."""
context = env.empty_context()
context.store(env.Value(lib.INTEGER, 1, "value"))
missing_asgn = ast.Assignment("missing")
self.assertRaises(env.NamespaceException, missing_asgn.eval, context)
bad_asgn = ast.Assignment("value")
bad_asgn.add(STRING_LITERAL)
self.assertRaises(env.AssignmentException, bad_asgn.eval, context)
asgn_node = ast.Assignment("value")
asgn_node.add(INT_LITERAL)
self.assertEqual(asgn_node.eval(context), INT_LITERAL.value)
self.assertEqual(context.find("id", "value"), INT_LITERAL.value)
def test_syntax_tree(self):
"""Test the syntax_tree method."""
syntax_tree = ast.syntax_tree()
self.assertTrue(syntax_tree is not None)
self.assertEqual(syntax_tree.name, ast.Sequence.name)
def test_run_in_substitution(self):
"""Test the run_in_substitution method."""
context = env.empty_context()
access_node = AccessNode()
result = ast.run_in_substitution(access_node, context)
self.assertEqual(result, STRING_LITERAL.value)
self.assertRaises(Exception, context.find, "id",
STRING_LITERAL.value.name)
|
|
# -*- coding: utf-8 -*-
"""
Django settings for video_village project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import os
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (video_village/config/settings/common.py - 3 = video_village/)
APPS_DIR = ROOT_DIR.path('video_village')
env = environ.Env()
if os.environ.get('AWS_PATH'):
environ.Env.read_env(ROOT_DIR('.amazonenv'))
else:
environ.Env.read_env(ROOT_DIR('.env'))
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework',
'localflavor',
'taggit',
'django_tables2',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'video_village.users', # custom users app
# Your stuff: custom apps go here
'videos',
'schedules',
'pis',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'video_village.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Brian Painter""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='sqlite:///videos.db'),
# 'default': env.db(''),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'none'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', False)
ACCOUNT_ADAPTER = 'video_village.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'video_village.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
########## CELERY
INSTALLED_APPS += ('video_village.taskapp.celery.CeleryConfig',)
# if you are not using the django database broker (e.g. rabbitmq, redis, memcached), you can remove the next line.
INSTALLED_APPS += ('kombu.transport.django',)
BROKER_URL = env('CELERY_BROKER_URL', default='django://')
########## END CELERY
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
TAGGIT_CASE_INSENSITIVE = True
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
NGROK_AUTH_USER = env('NGROK_AUTH_USER', default='')
NGROK_AUTH_TOKEN = env('NGROK_AUTH_TOKEN', default='')
|
|
# -*- coding: utf-8 -*-
import logging
import secrets
import webapp2
from webapp2_extras import auth, sessions, jinja2
from jinja2.runtime import TemplateNotFound
from simpleauth import SimpleAuthHandler
import tweepy
import random
from tweepy.error import TweepError
# Consumer keys and access tokens, used for OAuth
consumer_key = secrets.consumer_key
consumer_secret = secrets.consumer_secret
class BaseRequestHandler(webapp2.RequestHandler):
def dispatch(self):
# Get a session store for this request.
self.session_store = sessions.get_store(request=self.request)
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch(self)
finally:
# Save all sessions.
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def jinja2(self):
"""Returns a Jinja2 renderer cached in the app registry"""
return jinja2.get_jinja2(app=self.app)
@webapp2.cached_property
def session(self):
"""Returns a session using the default cookie key"""
return self.session_store.get_session()
@webapp2.cached_property
def auth(self):
return auth.get_auth()
@webapp2.cached_property
def current_user(self):
"""Returns currently logged in user"""
user_dict = self.auth.get_user_by_session()
return self.auth.store.user_model.get_by_id(user_dict['user_id'])
@webapp2.cached_property
def logged_in(self):
"""Returns true if a user is currently logged in, false otherwise"""
return self.auth.get_user_by_session() is not None
def render(self, template_name, template_vars={}):
# Preset values for the template
values = {
'url_for': self.uri_for,
'logged_in': self.logged_in,
'flashes': self.session.get_flashes()
}
# Add manually supplied template values
values.update(template_vars)
# read the template or 404.html
try:
self.response.write(self.jinja2.render_template(template_name, **values))
except TemplateNotFound:
self.abort(404)
def head(self, *args):
"""Head is used by Twitter. If not there the tweet button shows 0"""
pass
class RootHandler(BaseRequestHandler):
def get(self):
"""Handles default landing page"""
if self.logged_in:
self.render('home.html', {
'user': self.current_user,
'session': self.auth.get_user_by_session()
})
else:
self.render('home.html')
class ProfileHandler(BaseRequestHandler):
def get(self):
"""Handles GET /profile"""
if self.logged_in:
self.render('profile.html', {
'user': self.current_user,
'session': self.auth.get_user_by_session()
})
else:
self.redirect('/')
class FollowersHandler(BaseRequestHandler):
def get(self):
"""Handles GET /followers"""
if self.logged_in:
user = self.current_user
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(user.access_token, user.access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
followers = list()
incomplete_list = False
try:
# the maximum per request is 200 according to
# https://dev.twitter.com/rest/reference/get/followers/list
for follower in tweepy.Cursor(api.followers,count=200).items():
followers.append(follower)
except TweepError as e:
logging.error(e)
limits = api.rate_limit_status('statuses')
logging.info(limits)
incomplete_list = True
winner = False
if len(followers) > 0:
random_number = random.randint(0, len(followers)-1)
winner = followers[random_number]
self.render('followers.html', {
'user': user,
'session': self.auth.get_user_by_session(),
'followers': followers,
'winner': winner,
'incomplete_list': incomplete_list,
})
else:
self.redirect('/')
class RetweetsHandler(BaseRequestHandler):
def get(self):
"""Handles GET /retweets"""
if self.logged_in:
user = self.current_user
tweet_id = self.request.get("id")
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(user.access_token, user.access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
if not tweet_id:
retweets_list = list()
incomplete_list = False
try:
for tweet in api.retweets_of_me(count=100):
retweets_list.append(tweet)
except TweepError as e:
logging.error(e)
limits = api.rate_limit_status('statuses')
logging.info(limits)
incomplete_list = True
self.render('retweets.html', {
'user': user,
'session': self.auth.get_user_by_session(),
'retweets': retweets_list,
'incomplete_list': incomplete_list,
})
else:
retweet = False
winner = False
incomplete_list = False
try:
retweet = api.get_status(tweet_id)
retweeters = api.retweeters(tweet_id)
if len(retweeters) > 0:
random_number = random.randint(0, len(retweeters)-1)
winner = api.get_user(retweeters[random_number])
except TweepError as e:
logging.error(e)
limits = api.rate_limit_status('statuses')
logging.info(limits)
incomplete_list = True
self.render('retweets.html', {
'user': user,
'session': self.auth.get_user_by_session(),
'winner': winner,
'retweet': retweet,
'incomplete_list': incomplete_list,
})
else:
self.redirect('/')
class SearchesHandler(BaseRequestHandler):
def get(self):
"""Handles GET /searches"""
if self.logged_in:
user = self.current_user
search_id = self.request.get("id")
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(user.access_token, user.access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
if not search_id:
searches_list = list()
incomplete_list = False
try:
for search in api.saved_searches():
searches_list.append(search)
except TweepError as e:
logging.error(e)
limits = api.rate_limit_status('statuses')
logging.info(limits)
incomplete_list = True
self.render('searches.html', {
'user': user,
'session': self.auth.get_user_by_session(),
'searches': searches_list,
'incomplete_list': incomplete_list,
})
else:
winner = False
tweet = False
incomplete_list = False
try:
search = api.get_saved_search(search_id)
statuses = list()
tweeters = list()
# max 100 https://dev.twitter.com/rest/reference/get/search/tweets
for status in api.search(q=search.query, count=100):
statuses.append(status)
tweeters.append(status.user)
if len(tweeters) > 0:
random_number = random.randint(0, len(tweeters)-1)
winner = tweeters[random_number]
tweet = statuses[random_number]
except TweepError as e:
logging.error(e)
limits = api.rate_limit_status('statuses')
logging.info(limits)
incomplete_list = True
self.render('searches.html', {
'user': user,
'session': self.auth.get_user_by_session(),
'winner': winner,
'tweet': tweet,
'incomplete_list': incomplete_list,
})
else:
self.redirect('/')
class AuthHandler(BaseRequestHandler, SimpleAuthHandler):
"""Authentication handler for OAuth 2.0, 1.0(a) and OpenID."""
# Enable optional OAuth 2.0 CSRF guard
OAUTH2_CSRF_STATE = True
USER_ATTRS = {
'facebook' : {
'id' : lambda id: ('avatar_url',
'http://graph.facebook.com/{0}/picture?type=large'.format(id)),
'name' : 'name',
'link' : 'link'
},
'google' : {
'picture': 'avatar_url',
'name' : 'name',
'profile': 'link'
},
'windows_live': {
'avatar_url': 'avatar_url',
'name' : 'name',
'link' : 'link'
},
'twitter' : {
'profile_image_url': 'avatar_url',
'screen_name' : 'name',
'link' : 'link'
},
'linkedin' : {
'picture-url' : 'avatar_url',
'first-name' : 'name',
'public-profile-url': 'link'
},
'linkedin2' : {
'picture-url' : 'avatar_url',
'first-name' : 'name',
'public-profile-url': 'link'
},
'foursquare' : {
'photo' : lambda photo: ('avatar_url', photo.get('prefix') + '100x100' + photo.get('suffix')),
'firstName': 'firstName',
'lastName' : 'lastName',
'contact' : lambda contact: ('email',contact.get('email')),
'id' : lambda id: ('link', 'http://foursquare.com/user/{0}'.format(id))
},
'openid' : {
'id' : lambda id: ('avatar_url', '/img/missing-avatar.png'),
'nickname': 'name',
'email' : 'link'
}
}
def _on_signin(self, data, auth_info, provider):
"""Callback whenever a new or existing user is logging in.
data is a user info dictionary.
auth_info contains access token or oauth token and secret.
"""
auth_id = '%s:%s' % (provider, data['id'])
logging.info('Looking for a user with id %s', auth_id)
user = self.auth.store.user_model.get_by_auth_id(auth_id)
_attrs = self._to_user_model_attrs(data, self.USER_ATTRS[provider])
if user:
logging.info('Found existing user to log in')
# Existing users might've changed their profile data so we update our
# local model anyway. This might result in quite inefficient usage
# of the Datastore, but we do this anyway for demo purposes.
#
# In a real app you could compare _attrs with user's properties fetched
# from the datastore and update local user in case something's changed.
user.populate(**_attrs)
user.access_token = auth_info['oauth_token']
user.access_token_secret = auth_info['oauth_token_secret']
user.put()
self.auth.set_session(
self.auth.store.user_to_dict(user))
else:
# check whether there's a user currently logged in
# then, create a new user if nobody's signed in,
# otherwise add this auth_id to currently logged in user.
if self.logged_in:
logging.info('Updating currently logged in user')
u = self.current_user
u.populate(**_attrs)
user.access_token = auth_info['oauth_token']
user.access_token_secret = auth_info['oauth_token_secret']
# The following will also do u.put(). Though, in a real app
# you might want to check the result, which is
# (boolean, info) tuple where boolean == True indicates success
# See webapp2_extras.appengine.auth.models.User for details.
u.add_auth_id(auth_id)
else:
logging.info('Creating a brand new user')
ok, user = self.auth.store.user_model.create_user(auth_id, **_attrs)
if ok:
user.access_token = auth_info['oauth_token']
user.access_token_secret = auth_info['oauth_token_secret']
user.put()
self.auth.set_session(self.auth.store.user_to_dict(user))
# Remember auth data during redirect, just for this demo. You wouldn't
# normally do this.
#self.session.add_flash(data, 'data - from _on_signin(...)')
#self.session.add_flash(auth_info, 'auth_info - from _on_signin(...)')
self.redirect('/')
def logout(self):
self.auth.unset_session()
self.redirect('/')
def handle_exception(self, exception, debug):
logging.error(exception)
self.render('error.html', {'exception': exception})
def _callback_uri_for(self, provider):
return self.uri_for('auth_callback', provider=provider, _full=True)
def _get_consumer_info_for(self, provider):
"""Returns a tuple (key, secret) for auth init requests."""
return secrets.AUTH_CONFIG[provider]
def _to_user_model_attrs(self, data, attrs_map):
"""Get the needed information from the provider dataset."""
user_attrs = {}
for k, v in attrs_map.iteritems():
attr = (v, data.get(k)) if isinstance(v, str) else v(data.get(k))
user_attrs.setdefault(*attr)
return user_attrs
|
|
"""
This collection of functions scrapes most of the important data about movie
observable characteristics from the film's summary page on Box Office Mojo.
Last Run: December, 2016
"""
import requests
from bs4 import BeautifulSoup
import re
import dateutil.parser
from string import ascii_uppercase
import pandas as pd
# import pickle
import time
import urllib.request
import csv
import requests
sess = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=10)
sess.mount('http://', adapter)
## functions
def get_movie_value(soup, field_name):
'''Grab a value from boxofficemojo HTML
Takes a string attribute of a movie on the page and
returns the string in the next sibling object
(the value for that attribute)
or None if nothing is found.
'''
obj = soup.find(text=re.compile(field_name))
if not obj:
return None
# this works for most of the values
next_sibling = obj.findNextSibling()
if next_sibling:
return next_sibling.text #.encode('ascii','ignore')
else:
return None
def get_movie_title(soup):
'''
Get the movie's title from the header table
'''
obj = soup.find('title')
if not obj:
return None
# this works for most of the values
try:
name = "(".join(obj.text.split('(')[:-1]).strip()
if name == "":
name = "".join(obj.text.split('-')[:-1]).strip()
return name #.encode('ascii','ignore')
except:
return None
def get_theaters(soup):
'''
Grabs the largest number of theatres that a film was shown over a release cycle
'''
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Widest'+nonBreakSpace+'Release:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
def get_close(soup):
'''
Grabs the last date that the movie was shown in cinemas
'''
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Close'+nonBreakSpace+'Date:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
def get_inrelease(soup):
'''
Grabs the number of days a film was in release
'''
#nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('In Release:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
def get_foreigntotal(soup):
'''
Grabs the foreign earnings of the film aggregated across markets outside the USA
'''
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Foreign:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
def get_openingweekend(soup):
'''
Grabs the opening weekend box office for a film that was released straight
to "wide release"
'''
try:
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Opening'+nonBreakSpace+'Weekend:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
except:
return None
def get_openingweekend_limited(soup):
'''
For a film that was first released in limited theatres, gets the opening
weekend box office of the limited release phase
'''
try:
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Limited'+nonBreakSpace+'Opening'+nonBreakSpace+'Weekend:'))
if not obj:
return None
next_obj = obj.findNext('b')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
except:
return None
def get_openingweekend_wide(soup):
'''
For a film that was first released in limited theatres, gets the opening
weekend box office of the wide release phase
'''
try:
nonBreakSpace = u'\xa0'
obj = soup.find(text=re.compile('Wide'+nonBreakSpace+'Opening'+nonBreakSpace+'Weekend:'))
if not obj:
return None
next_obj = obj.findNext('td')
if next_obj.contents[0]:
return next_obj.contents[0].strip().split()[0] #.encode('ascii','ignore')
else:
return None
except:
return None
def get_all_players(soup, field_name_list):
'''
Will return a string containing a list of people who were in a certain role
within the movie production.
Currently works for: director, producer, actor, writer, cinematographer
composer
'''
for item in set(field_name_list):
my_text = soup.find(text=item)
if my_text:
my_td = my_text.findNext('td').getText(separator=u',') #.encode('ascii','ignore')
return my_td
return None
def to_date(datestring):
'''
A helper function than transforms a date string into a "proper date format"
'''
try:
date = dateutil.parser.parse(datestring)
return date
except:
return datestring
def money_to_int(moneystring):
'''
A helper function to strip out dollar signs ($) and commas leaving any
dollar value as a integer
'''
try:
moneystring = moneystring.replace('$', '').replace(',', '')
return int(moneystring)
except:
return moneystring
def runtime_to_minutes(runtimestring):
'''
A helper function that converts the run time of movies posted as hours and
minutes into minutes
'''
try:
runtime = runtimestring.split()
try:
minutes = int(runtime[0])*60 + int(runtime[2])
return minutes
except:
return None
except:
return runtimestring
def process_movie(url):
'''
Takes a URL to a movie website on Box Office Mojo and collects all the
relevant observable characteristics from the summary page.
'''
headers = ['movie_id','movie_title',
'domestic_total_gross', 'foreign_total_gross', 'opening_weekend',
'opening_weekend_limited', 'opening_weekend_wide',
'release_date', 'close_date' , 'in_release_days' ,
'runtime_mins',
'rating', 'genre', 'distributor', 'director', 'producer',
'production_budget', 'widest_release_theaters',
'actors', 'writers', 'cinematographers', 'composers'
]
response = sess.get(url)
if response.status_code != 200:
return None
page = response.text
soup = BeautifulSoup(page,"lxml")
## --- Create a movie ID from the URL and get the title
movie_id = url.rsplit('=', 1)[-1].rsplit('.', 1)[0]
movie_title = get_movie_title(soup)
## --- Date Specific
raw_release_date = get_movie_value(soup,'Release Date')
release_date = to_date(raw_release_date)
raw_close_date = get_close(soup)
close_date = to_date(raw_close_date)
in_release = get_inrelease(soup)
## --- Box Office
raw_domestic_total_gross = get_movie_value(soup,'Domestic Total')
domestic_total_gross = money_to_int(raw_domestic_total_gross)
raw_foreign_total_gross = get_foreigntotal(soup)
foreign_total_gross = money_to_int(raw_foreign_total_gross)
raw_domestic_opening_weekend = get_openingweekend(soup)
domestic_opening_weekend = money_to_int(raw_domestic_opening_weekend)
raw_domestic_opening_weekend_limited = get_openingweekend_limited(soup)
domestic_opening_weekend_limited = money_to_int(raw_domestic_opening_weekend_limited)
raw_domestic_opening_weekend_wide = get_openingweekend_wide(soup)
domestic_opening_weekend_wide = money_to_int(raw_domestic_opening_weekend_wide)
## -- remaining characteristics
raw_runtime = get_movie_value(soup,'Runtime')
runtime = runtime_to_minutes(raw_runtime)
rating = get_movie_value(soup,'MPAA Rating')
genre = get_movie_value(soup,'Genre: ')
distributor = get_movie_value(soup,'Distributor: ')
production_budget = get_movie_value(soup, 'Production Budget: ')
widest_release_theaters = get_theaters(soup)
## --- People involved in the movie
director = get_all_players(soup,['Director:','Director'])
producer = get_all_players(soup,['Producer:','Producers:',
'Producer','Producers'])
actors = get_all_players(soup,['Actor:','Actors:','Actor','Actors'])
writers = get_all_players(soup,['Writer:','Writers:',
'Screenwriter:','Screenwriters:',
'Writer','Writers',
'Screenwriter','Screenwriters'])
cinematographers = get_all_players(soup, ['Cinematographer:','Cinematographer',
'Cinematographers:','Cinematographers'])
composers = get_all_players(soup, ['Composer:','Composers:',
'Composer','Composers'])
## --- Put the data collected into a pandas dataframe
df_movie = pd.DataFrame([[movie_id, movie_title,
domestic_total_gross, foreign_total_gross,
domestic_opening_weekend, domestic_opening_weekend_limited,
domestic_opening_weekend_wide,
release_date, close_date, in_release, runtime,
rating, genre, distributor, director, producer,
production_budget, widest_release_theaters,
actors, writers, cinematographers, composers
]],
columns=headers)
# return a line of data to the object assigned
return df_movie
|
|
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.failure import Failure
from twext.enterprise.locking import NamedLock
from twext.python.log import Logger
from txweb2 import responsecode
from txweb2.http import HTTPError, Response
from txweb2.http_headers import MimeType
from txdav.xml import element as davxml
from txweb2.dav.http import messageForFailure, statusForFailure, \
ErrorResponse
from twistedcaldav import caldavxml
from twistedcaldav.customxml import calendarserver_namespace
from twistedcaldav.accounting import accountingEnabled, emitAccounting
from twistedcaldav.config import config
from twistedcaldav.ical import Component
from txdav.caldav.datastore.scheduling import addressmapping
from txdav.caldav.datastore.scheduling.caldav.delivery import ScheduleViaCalDAV
from txdav.caldav.datastore.scheduling.cuaddress import EmailCalendarUser
from txdav.caldav.datastore.scheduling.cuaddress import InvalidCalendarUser, \
OtherServerCalendarUser, calendarUserFromCalendarUserAddress
from txdav.caldav.datastore.scheduling.cuaddress import LocalCalendarUser
from txdav.caldav.datastore.scheduling.cuaddress import RemoteCalendarUser
from txdav.caldav.datastore.scheduling.imip.delivery import ScheduleViaIMip
from txdav.caldav.datastore.scheduling.ischedule.delivery import ScheduleViaISchedule
from txdav.caldav.datastore.scheduling.itip import iTIPRequestStatus
from pycalendar.period import Period
import hashlib
from collections import namedtuple
"""
CalDAV/Server-to-Server scheduling behavior.
This module handles the delivery of scheduling messages to organizer and attendees. The basic idea is to first
confirm the integrity of the incoming scheduling message, check authorization. Appropriate L{DeliveryService}s
are then used to deliver the message to attendees or organizer. Delivery responses are processed and returned.
This takes into account podding of users by detecting the appropriate host for a calendar user and then
dispatching the delivery accordingly.
The L{Scheduler} class defines the basic behavior for processing deliveries. Sub-classes are defined for the
different ways a deliver can be triggered.
L{CalDAVScheduler} - handles deliveries for scheduling messages originating from inside the CalDAV server
i.e. user PUTs or POSTs.
L{IScheduleScheduler} - handles deliveries for scheduling messages being POSTed to the iSchedule inbox.
L{IMIPScheduler} - handles deliveries for POSTs on the iMIP inbox (coming from the mail gateway).
L{DirectScheduler} - used when doing some internal processing (e.g., inbox item processing during an
upgrade.
Here is a typical flow of activity for a iTIP between users on the server:
iTIP PUT request
\
\_L{ImplicitScheduler} - does CalDAV-schedule logic and sends iTIP message
\
\_L{CalDAVScheduler} - receives iTIP message
\
\_L{ScheduleViaCalDAV} - handles delivery of iTIP message
\
\_L{ImplicitProcessor} - dispatches iTIP message (also auto-accept)
\
\_L{iTipProcessing} - processes iTIP message
Here is a typical flow of activity for a iTIP between an organizer on the server and an iMIP attendee:
iTIP PUT request
\
\_L{ImplicitScheduler}
\
\_L{CalDAVScheduler}
\
\_L{ScheduleViaIMip}
Here is a typical flow of activity for a iTIP between an organizer not on the server and attendee on the server:
iTIP POST on /ischedule
\
\_L{IScheduleScheduler}
\
\_L{ScheduleViaCalDAV}
\
\_L{ImplicitProcessor}
\
\_L{iTipProcessing}
"""
__all__ = [
"Scheduler",
"RemoteScheduler",
"DirectScheduler",
]
log = Logger()
class Scheduler(object):
scheduleResponse = None
errorResponse = None # The class used for generating an HTTP XML error response
errorElements = {
"originator-missing": (),
"originator-invalid": (),
"originator-denied": (),
"recipient-missing": (),
"recipient-invalid": (),
"organizer-denied": (),
"attendee-denied": (),
"invalid-calendar-data-type": (),
"invalid-calendar-data": (),
"invalid-scheduling-message": (),
"max-recipients": (),
}
def __init__(self, txn, originator_uid, logItems=None, noAttendeeRefresh=False):
self.txn = txn
self.originator_uid = originator_uid
self.logItems = logItems
self.noAttendeeRefresh = noAttendeeRefresh
self.originator = None
self.recipients = None
self.recipientsNormalizationMap = {}
self.calendar = None
self.organizer = None
self.attendee = None
self.isiTIPRequest = None
self.timeRange = None
self.excludeUID = None
self.fakeTheResult = False
self.method = "Unknown"
self.internal_request = False
@inlineCallbacks
def doSchedulingViaPOST(self, originator, recipients, calendar):
"""
The Scheduling POST operation on an Outbox.
"""
self.calendar = calendar
yield self.preProcessCalendarData()
if self.logItems is not None:
self.logItems["recipients"] = len(recipients)
self.logItems["cl"] = str(len(str(calendar)))
# We might trigger an implicit scheduling operation here that will require consistency
# of data for all events with the same UID. So detect this and use a lock
if calendar.resourceType() != "VFREEBUSY":
uid = calendar.resourceUID()
yield NamedLock.acquire(self.txn, "ImplicitUIDLock:{}".format(hashlib.md5(uid).hexdigest(),))
result = (yield self.doSchedulingDirectly("POST", originator, recipients, calendar))
if self.logItems is not None:
if self.checkForFreeBusy():
self.logItems["freebusy"] = "true"
else:
self.logItems["itip-method"] = self.calendar.propertyValue("METHOD").lower()
returnValue(result)
def doSchedulingViaPUT(self, originator, recipients, calendar, internal_request=False, suppress_refresh=False):
"""
The implicit scheduling PUT operation.
"""
return self.doSchedulingDirectly("PUT", originator, recipients, calendar, internal_request, suppress_refresh)
def doSchedulingDirectly(self, descriptor, originator, recipients, calendar, internal_request=False, suppress_refresh=False):
"""
The implicit scheduling operation.
"""
self.method = descriptor
# Load various useful bits doing some basic checks on those
self.originator = originator
self.recipients = recipients
self.calendar = calendar
self.internal_request = internal_request
self.suppress_refresh = suppress_refresh
# Do some extra authorization checks
self.checkAuthorization()
return self.doScheduling()
@inlineCallbacks
def doScheduling(self):
# Check validity of Originator header.
yield self.checkOriginator()
# Get recipient details.
yield self.checkRecipients()
# Check calendar data.
self.checkCalendarData()
# Check validity of ORGANIZER
yield self.checkOrganizer()
# Do security checks (e.g. spoofing)
yield self.securityChecks()
# Generate accounting information
self.doAccounting()
# Do some final checks after we have gathered all our information
self.finalChecks()
# Do scheduling tasks
result = (yield self.generateSchedulingResponse())
returnValue(result)
def preProcessCalendarData(self):
"""
After loading calendar data from the request, do some optional processing of it. This method will be
overridden by those schedulers that need to do special things to the data.
"""
pass
def checkAuthorization(self):
raise NotImplementedError
def checkOriginator(self):
raise NotImplementedError
def checkRecipients(self):
raise NotImplementedError
def checkOrganizer(self):
raise NotImplementedError
def checkOrganizerAsOriginator(self):
raise NotImplementedError
def checkAttendeeAsOriginator(self):
raise NotImplementedError
def checkCalendarData(self):
# Skip all the valid data checks for an internal request as we are going to assume all the internal
# request data has been generated properly.
if not self.internal_request:
# Must be a valid calendar
try:
self.calendar.validCalendarData()
except ValueError, e:
log.error(
"{method} request calendar component is not valid:{exc} {cal}",
method=self.method,
exc=e,
cal=self.calendar,
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-calendar-data"],
description="Calendar component is not valid"
))
# Must have a METHOD
if not self.calendar.isValidMethod():
log.error(
"{method} request must have valid METHOD property in calendar component: {cal}",
method=self.method,
cal=self.calendar,
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
description="Must have valid METHOD property"
))
# Verify iTIP behavior
if not self.calendar.isValidITIP():
log.error(
"{method} request must have a calendar component that satisfies iTIP requirements: {cal}",
method=self.method,
cal=self.calendar,
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
description="Must have a calendar component that satisfies iTIP requirements"
))
# X-CALENDARSERVER-ACCESS is not allowed in Outbox POSTs
if self.calendar.hasProperty(Component.ACCESS_PROPERTY):
log.error(
"X-CALENDARSERVER-ACCESS not allowed in a calendar component {method} request: {cal}",
method=self.method,
cal=self.calendar,
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
(calendarserver_namespace, "no-access-restrictions"),
"Private events cannot be scheduled",
))
# Determine iTIP method mode
if self.calendar.propertyValue("METHOD") in ("PUBLISH", "REQUEST", "ADD", "CANCEL", "DECLINECOUNTER"):
self.isiTIPRequest = True
elif self.calendar.propertyValue("METHOD") in ("REPLY", "COUNTER", "REFRESH"):
self.isiTIPRequest = False
# Verify that there is a single ATTENDEE property
attendees = self.calendar.getAttendees()
# Must have only one
if len(attendees) != 1:
log.error(
"Wrong number of ATTENDEEs in calendar data: {cal}",
cal=str(self.calendar),
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
"Wrong number of attendees",
))
self.attendee = attendees[0]
else:
msg = "Unknown iTIP METHOD: {}".format(self.calendar.propertyValue("METHOD"),)
log.error(msg)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
description=msg
))
def checkForFreeBusy(self):
if not hasattr(self, "isfreebusy"):
if (self.calendar.propertyValue("METHOD") == "REQUEST") and (self.calendar.mainType() == "VFREEBUSY"):
# Extract time range from VFREEBUSY object
vfreebusies = [v for v in self.calendar.subcomponents() if v.name() == "VFREEBUSY"]
if len(vfreebusies) != 1:
log.error(
"iTIP data is not valid for a VFREEBUSY request: {cal}",
cal=str(self.calendar),
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
"iTIP data is not valid for a VFREEBUSY request",
))
dtstart = vfreebusies[0].getStartDateUTC()
dtend = vfreebusies[0].getEndDateUTC()
if dtstart is None or dtend is None:
log.error(
"VFREEBUSY start/end not valid: {cal}",
cal=str(self.calendar),
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
"VFREEBUSY start/end not valid",
))
# Some clients send floating instead of UTC - coerce to UTC
if not dtstart.utc() or not dtend.utc():
log.error(
"VFREEBUSY start or end not UTC: {cal}",
cal=self.calendar,
)
raise HTTPError(self.errorResponse(
responsecode.FORBIDDEN,
self.errorElements["invalid-scheduling-message"],
"VFREEBUSY start or end not UTC",
))
self.timeRange = Period(dtstart, dtend)
# Look for masked UID
self.excludeUID = self.calendar.getMaskUID()
# Do free busy operation
self.isfreebusy = True
else:
# Do regular invite (fan-out)
self.isfreebusy = False
return self.isfreebusy
def securityChecks(self):
raise NotImplementedError
def doAccounting(self):
#
# Accounting
#
# Note that we associate logging with the organizer, not the
# originator, which is good for looking for why something
# shows up in a given principal's calendars, rather than
# tracking the activities of a specific user.
#
if isinstance(self.organizer, LocalCalendarUser):
accountingType = "iTIP-VFREEBUSY" if self.calendar.mainType() == "VFREEBUSY" else "iTIP"
if accountingEnabled(accountingType, self.organizer.record):
emitAccounting(
accountingType,
self.organizer.record,
"Originator: {o}\nRecipients:\n{r}Method:{method}\n\n{cal}".format(
o=str(self.originator),
r=str("".join([" {}\n".format(recipient,) for recipient in self.recipients])),
method=str(self.method),
cal=str(self.calendar),
)
)
def finalChecks(self):
"""
Final checks before doing the actual scheduling.
"""
pass
@inlineCallbacks
def generateSchedulingResponse(self):
log.info(
"METHOD: {method}, Component: {comp}",
method=self.calendar.propertyValue("METHOD"),
comp=self.calendar.mainType(),
)
# For free-busy do immediate determination of iTIP result rather than fan-out
freebusy = self.checkForFreeBusy()
# Prepare for multiple responses
responses = self.scheduleResponse(self.method, responsecode.OK, self.mapRecipientAddress)
# Loop over each recipient and aggregate into lists by service types.
caldav_recipients = []
otherserver_recipients = []
remote_recipients = []
imip_recipients = []
for ctr, recipient in enumerate(self.recipients):
# Check for freebusy limit
if freebusy and config.Scheduling.Options.LimitFreeBusyAttendees and ctr >= config.Scheduling.Options.LimitFreeBusyAttendees:
err = HTTPError(self.errorResponse(
responsecode.NOT_FOUND,
self.errorElements["max-recipients"],
"Too many attendees",
))
responses.add(recipient.cuaddr, Failure(exc_value=err), reqstatus=iTIPRequestStatus.SERVICE_UNAVAILABLE)
continue
if self.fakeTheResult:
responses.add(recipient.cuaddr, responsecode.OK, reqstatus=iTIPRequestStatus.SUCCESS if freebusy else iTIPRequestStatus.MESSAGE_DELIVERED)
elif isinstance(recipient, LocalCalendarUser):
caldav_recipients.append(recipient)
elif isinstance(recipient, OtherServerCalendarUser):
otherserver_recipients.append(recipient)
elif isinstance(recipient, RemoteCalendarUser):
remote_recipients.append(recipient)
elif isinstance(recipient, EmailCalendarUser):
imip_recipients.append(recipient)
else:
err = HTTPError(self.errorResponse(
responsecode.NOT_FOUND,
self.errorElements["recipient-invalid"],
"Unknown recipient",
))
responses.add(recipient.cuaddr, Failure(exc_value=err), reqstatus=iTIPRequestStatus.INVALID_CALENDAR_USER)
# Now process local recipients
if caldav_recipients:
yield self.generateLocalSchedulingResponses(caldav_recipients, responses, freebusy)
# Now process other server recipients
if otherserver_recipients:
yield self.generateRemoteSchedulingResponses(otherserver_recipients, responses, freebusy, getattr(self.txn, 'doing_attendee_refresh', False))
# To reduce chatter, we suppress certain messages
if not self.suppress_refresh or self.calendar.mainType() == "VPOLL":
# Now process remote recipients
if remote_recipients:
yield self.generateRemoteSchedulingResponses(remote_recipients, responses, freebusy)
# Now process iMIP recipients
if imip_recipients:
yield self.generateIMIPSchedulingResponses(imip_recipients, responses, freebusy)
# Return with final response if we are done
returnValue(responses)
def generateLocalSchedulingResponses(self, recipients, responses, freebusy):
"""
Generate scheduling responses for CalDAV recipients.
"""
# Create the scheduler and run it.
requestor = ScheduleViaCalDAV(self, recipients, responses, freebusy)
return requestor.generateSchedulingResponses()
def generateRemoteSchedulingResponses(self, recipients, responses, freebusy, refreshOnly=False):
"""
Generate scheduling responses for remote recipients.
"""
# Create the scheduler and run it.
requestor = ScheduleViaISchedule(self, recipients, responses, freebusy)
return requestor.generateSchedulingResponses(refreshOnly)
def generateIMIPSchedulingResponses(self, recipients, responses, freebusy):
"""
Generate scheduling responses for iMIP recipients.
"""
# Create the scheduler and run it.
requestor = ScheduleViaIMip(self, recipients, responses, freebusy)
return requestor.generateSchedulingResponses()
def mapRecipientAddress(self, cuaddr):
return self.recipientsNormalizationMap.get(cuaddr, cuaddr)
class RemoteScheduler(Scheduler):
def checkOrganizer(self):
"""
Delay ORGANIZER check until we know what their role is.
"""
pass
@inlineCallbacks
def checkRecipients(self):
"""
Check the validity of the Recipient header values. These must all be local as there
is no concept of server-to-server relaying.
"""
results = []
for recipient in self.recipients:
# Get the calendar user object for this recipient
recipientAddress = yield calendarUserFromCalendarUserAddress(recipient, self.txn)
# If no calendar user we may have a remote recipient but we should check whether
# the address is one that ought to be on our server and treat that as a missing
# user. Also if server-to-server is not enabled then remote addresses are not allowed.
if not recipientAddress.hosted():
localUser = (yield addressmapping.mapper.isCalendarUserInMyDomain(recipient))
if localUser:
log.error(
"No record for calendar user address: {r}",
r=recipient,
)
else:
log.error(
"Unknown calendar user address: {r}",
r=recipient,
)
results.append(InvalidCalendarUser(recipient))
else:
# Map recipient to their inbox and cache on calendar user object
inbox = None
if recipientAddress.validRecipient():
if isinstance(recipientAddress, LocalCalendarUser):
recipient_home = yield self.txn.calendarHomeWithUID(recipientAddress.record.uid, create=True)
if recipient_home:
inbox = (yield recipient_home.calendarWithName("inbox"))
else:
inbox = "dummy"
recipientAddress.inbox = inbox
if inbox:
results.append(recipientAddress)
else:
log.error(
"No scheduling for calendar user: {r}",
r=recipient,
)
results.append(InvalidCalendarUser(recipient))
self.recipients = results
class DirectScheduler(Scheduler):
""" An implicit scheduler meant for use by local processes which don't
need to go through all these checks. """
errorResponse = ErrorResponse
def checkAuthorization(self):
pass
def checkOrganizer(self):
pass
def checkOrganizerAsOriginator(self):
pass
def checkAttendeeAsOriginator(self):
pass
def securityChecks(self):
pass
def checkOriginator(self):
pass
def checkRecipients(self):
pass
class ScheduleResponseResponse (Response):
"""
ScheduleResponse L{Response} object.
Renders itself as a CalDAV:schedule-response XML document.
"""
def __init__(self, schedule_response_element, xml_responses, location=None):
"""
@param xml_responses: an iterable of davxml.Response objects.
@param location: the value of the location header to return in the response,
or None.
"""
Response.__init__(self, code=responsecode.OK,
stream=schedule_response_element(*xml_responses).toxml())
self.headers.setHeader("content-type", MimeType("text", "xml"))
if location is not None:
self.headers.setHeader("location", location)
class ScheduleResponseQueue (object):
"""
Stores a list of (typically error) responses for use in a
L{ScheduleResponse}.
"""
log = Logger()
schedule_response_element = caldavxml.ScheduleResponse
response_element = caldavxml.Response
recipient_element = caldavxml.Recipient
recipient_uses_href = True
request_status_element = caldavxml.RequestStatus
error_element = davxml.Error
response_description_element = davxml.ResponseDescription
calendar_data_element = caldavxml.CalendarData
ScheduleResonseDetails = namedtuple(
"ScheduleResonseDetails",
["recipient", "reqstatus", "calendar", "error", "message", ]
)
def __init__(self, method, success_response, recipient_mapper=None):
"""
@param method: the name of the method generating the queue.
@param success_response: the response to return in lieu of a
L{ScheduleResponse} if no responses are added to this queue.
"""
self.responses = []
self.method = method
self.success_response = success_response
self.recipient_mapper = recipient_mapper
self.location = None
def setLocation(self, location):
"""
@param location: the value of the location header to return in the response,
or None.
"""
self.location = location
def add(self, recipient, what, reqstatus=None, calendar=None, suppressErrorLog=False):
"""
Add a response.
@param recipient: the recipient for this response.
@param what: a status code or a L{Failure} for the given recipient.
@param status: the iTIP request-status for the given recipient.
@param calendar: the calendar data for the given recipient response.
@param suppressErrorLog: whether to suppress a log message for errors; primarily
this is used when trying to process a VFREEBUSY over iMIP, which isn't
supported.
"""
if type(what) is int:
code = what
error = None
message = responsecode.RESPONSES[code]
elif isinstance(what, Failure):
code = statusForFailure(what)
error = self.errorForFailure(what)
message = messageForFailure(what)
else:
raise AssertionError("Unknown data type: {}".format(what,))
if self.recipient_mapper is not None:
recipient = self.recipient_mapper(recipient)
if not suppressErrorLog and code > 400: # Error codes only
self.log.error(
"Error during {method} for {r}: {msg}",
method=self.method,
r=recipient,
msg=message,
)
details = ScheduleResponseQueue.ScheduleResonseDetails(
self.recipient_element(davxml.HRef.fromString(recipient)) if self.recipient_uses_href else self.recipient_element.fromString(recipient),
self.request_status_element(reqstatus),
calendar,
error,
self.response_description_element(message) if message is not None else None,
)
self.responses.append(details)
def errorForFailure(self, failure):
if failure.check(HTTPError) and isinstance(failure.value.response, ErrorResponse):
return self.error_element(failure.value.response.error)
else:
return None
def clone(self, recipient, request_status, calendar_data, error, desc):
"""
Add a response cloned from existing data.
@param clone: the response to clone.
"""
details = ScheduleResponseQueue.ScheduleResonseDetails(
self.recipient_element(davxml.HRef.fromString(recipient)) if self.recipient_uses_href else self.recipient_element.fromString(recipient),
self.request_status_element.fromString(request_status),
calendar_data,
self.error_element(*error) if error is not None else None,
self.response_description_element.fromString(desc) if desc is not None else None,
)
self.responses.append(details)
def response(self, format=None):
"""
Generate a L{ScheduleResponseResponse} with the responses contained in the
queue or, if no such responses, return the C{success_response} provided
to L{__init__}.
@return: the response.
"""
if self.responses:
# Convert our queue to all XML elements
xml_responses = []
for response in self.responses:
children = []
children.append(response.recipient)
children.append(response.reqstatus)
if response.calendar is not None:
children.append(self.calendar_data_element.fromCalendar(response.calendar, format))
if response.error is not None:
children.append(response.error)
if response.message is not None:
children.append(response.message)
xml_responses.append(self.response_element(*children))
return ScheduleResponseResponse(self.schedule_response_element, xml_responses, self.location)
else:
return self.success_response
|
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""The WebDriver implementation."""
import base64
import warnings
from contextlib import contextmanager
from .command import Command
from .webelement import WebElement
from .remote_connection import RemoteConnection
from .errorhandler import ErrorHandler
from .switch_to import SwitchTo
from .mobile import Mobile
from .file_detector import FileDetector, LocalFileDetector
from selenium.common.exceptions import (InvalidArgumentException,
WebDriverException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.html5.application_cache import ApplicationCache
try:
str = basestring
except NameError:
pass
class WebDriver(object):
"""
Controls a browser by sending commands to a remote server.
This server is expected to be running the WebDriver wire protocol
as defined at
https://github.com/SeleniumHQ/selenium/wiki/JsonWireProtocol
:Attributes:
- session_id - String ID of the browser session started and controlled by this WebDriver.
- capabilities - Dictionaty of effective capabilities of this browser session as returned
by the remote server. See https://github.com/SeleniumHQ/selenium/wiki/DesiredCapabilities
- command_executor - remote_connection.RemoteConnection object used to execute commands.
- error_handler - errorhandler.ErrorHandler object used to handle errors.
"""
_web_element_cls = WebElement
def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub',
desired_capabilities=None, browser_profile=None, proxy=None,
keep_alive=False, file_detector=None):
"""
Create a new driver that will issue commands using the wire protocol.
:Args:
- command_executor - Either a string representing URL of the remote server or a custom
remote_connection.RemoteConnection object. Defaults to 'http://127.0.0.1:4444/wd/hub'.
- desired_capabilities - A dictionary of capabilities to request when
starting the browser session. Required parameter.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object.
Only used if Firefox is requested. Optional.
- proxy - A selenium.webdriver.common.proxy.Proxy object. The browser session will
be started with given proxy settings, if possible. Optional.
- keep_alive - Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive. Defaults to False.
- file_detector - Pass custom file detector object during instantiation. If None,
then default LocalFileDetector() will be used.
"""
if desired_capabilities is None:
raise WebDriverException("Desired Capabilities can't be None")
if not isinstance(desired_capabilities, dict):
raise WebDriverException("Desired Capabilities must be a dictionary")
if proxy is not None:
warnings.warn("Please use FirefoxOptions to set proxy",
DeprecationWarning)
proxy.add_to_capabilities(desired_capabilities)
self.command_executor = command_executor
if type(self.command_executor) is bytes or isinstance(self.command_executor, str):
self.command_executor = RemoteConnection(command_executor, keep_alive=keep_alive)
self._is_remote = True
self.session_id = None
self.capabilities = {}
self.error_handler = ErrorHandler()
self.start_client()
if browser_profile is not None:
warnings.warn("Please use FirefoxOptions to set browser profile",
DeprecationWarning)
self.start_session(desired_capabilities, browser_profile)
self._switch_to = SwitchTo(self)
self._mobile = Mobile(self)
self.file_detector = file_detector or LocalFileDetector()
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}")>'.format(
type(self), self.session_id)
@contextmanager
def file_detector_context(self, file_detector_class, *args, **kwargs):
"""
Overrides the current file detector (if necessary) in limited context.
Ensures the original file detector is set afterwards.
Example:
with webdriver.file_detector_context(UselessFileDetector):
someinput.send_keys('/etc/hosts')
:Args:
- file_detector_class - Class of the desired file detector. If the class is different
from the current file_detector, then the class is instantiated with args and kwargs
and used as a file detector during the duration of the context manager.
- args - Optional arguments that get passed to the file detector class during
instantiation.
- kwargs - Keyword arguments, passed the same way as args.
"""
last_detector = None
if not isinstance(self.file_detector, file_detector_class):
last_detector = self.file_detector
self.file_detector = file_detector_class(*args, **kwargs)
try:
yield
finally:
if last_detector is not None:
self.file_detector = last_detector
@property
def mobile(self):
return self._mobile
@property
def name(self):
"""Returns the name of the underlying browser for this instance.
:Usage:
- driver.name
"""
if 'browserName' in self.capabilities:
return self.capabilities['browserName']
else:
raise KeyError('browserName not specified in session capabilities')
def start_client(self):
"""
Called before starting a new session. This method may be overridden
to define custom startup behavior.
"""
pass
def stop_client(self):
"""
Called after executing a quit command. This method may be overridden
to define custom shutdown behavior.
"""
pass
def start_session(self, capabilities, browser_profile=None):
"""
Creates a new session with the desired capabilities.
:Args:
- browser_name - The name of the browser to request.
- version - Which browser version to request.
- platform - Which platform to request the browser on.
- javascript_enabled - Whether the new session should support JavaScript.
- browser_profile - A selenium.webdriver.firefox.firefox_profile.FirefoxProfile object. Only used if Firefox is requested.
"""
if not isinstance(capabilities, dict):
raise InvalidArgumentException("Capabilities must be a dictionary")
w3c_caps = {"firstMatch": [], "alwaysMatch": {}}
if browser_profile:
if "moz:firefoxOptions" in capabilities:
capabilities["moz:firefoxOptions"]["profile"] = browser_profile.encoded
else:
capabilities.update({'firefox_profile': browser_profile.encoded})
w3c_caps["alwaysMatch"].update(capabilities)
parameters = {"capabilities": w3c_caps,
"desiredCapabilities": capabilities}
response = self.execute(Command.NEW_SESSION, parameters)
if 'sessionId' not in response:
response = response['value']
self.session_id = response['sessionId']
self.capabilities = response.get('value')
# if capabilities is none we are probably speaking to
# a W3C endpoint
if self.capabilities is None:
self.capabilities = response.get('capabilities')
# Double check to see if we have a W3C Compliant browser
self.w3c = response.get('status') is None
def _wrap_value(self, value):
if isinstance(value, dict):
converted = {}
for key, val in value.items():
converted[key] = self._wrap_value(val)
return converted
elif isinstance(value, self._web_element_cls):
return {'ELEMENT': value.id, 'element-6066-11e4-a52e-4f735466cecf': value.id}
elif isinstance(value, list):
return list(self._wrap_value(item) for item in value)
else:
return value
def create_web_element(self, element_id):
"""Creates a web element with the specified `element_id`."""
return self._web_element_cls(self, element_id, w3c=self.w3c)
def _unwrap_value(self, value):
if isinstance(value, dict):
if 'ELEMENT' in value or 'element-6066-11e4-a52e-4f735466cecf' in value:
wrapped_id = value.get('ELEMENT', None)
if wrapped_id:
return self.create_web_element(value['ELEMENT'])
else:
return self.create_web_element(value['element-6066-11e4-a52e-4f735466cecf'])
else:
for key, val in value.items():
value[key] = self._unwrap_value(val)
return value
elif isinstance(value, list):
return list(self._unwrap_value(item) for item in value)
else:
return value
def execute(self, driver_command, params=None):
"""
Sends a command to be executed by a command.CommandExecutor.
:Args:
- driver_command: The name of the command to execute as a string.
- params: A dictionary of named parameters to send with the command.
:Returns:
The command's JSON response loaded into a dictionary object.
"""
if self.session_id is not None:
if not params:
params = {'sessionId': self.session_id}
elif 'sessionId' not in params:
params['sessionId'] = self.session_id
params = self._wrap_value(params)
response = self.command_executor.execute(driver_command, params)
if response:
self.error_handler.check_response(response)
response['value'] = self._unwrap_value(
response.get('value', None))
return response
# If the server doesn't send a response, assume the command was
# a success
return {'success': 0, 'value': None, 'sessionId': self.session_id}
def get(self, url):
"""
Loads a web page in the current browser session.
"""
self.execute(Command.GET, {'url': url})
@property
def title(self):
"""Returns the title of the current page.
:Usage:
driver.title
"""
resp = self.execute(Command.GET_TITLE)
return resp['value'] if resp['value'] is not None else ""
def find_element_by_id(self, id_):
"""Finds an element by id.
:Args:
- id\_ - The id of the element to be found.
:Usage:
driver.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""
Finds multiple elements by id.
:Args:
- id\_ - The id of the elements to be found.
:Usage:
driver.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
"""
Finds an element by xpath.
:Args:
- xpath - The xpath locator of the element to find.
:Usage:
driver.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""
Finds multiple elements by xpath.
:Args:
- xpath - The xpath locator of the elements to be found.
:Usage:
driver.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
"""
Finds an element by link text.
:Args:
- link_text: The text of the element to be found.
:Usage:
driver.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
"""
Finds elements by link text.
:Args:
- link_text: The text of the elements to be found.
:Usage:
driver.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
"""
Finds an element by a partial match of its link text.
:Args:
- link_text: The text of the element to partially match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""
Finds elements by a partial match of their link text.
:Args:
- link_text: The text of the element to partial match on.
:Usage:
driver.find_element_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
"""
Finds an element by name.
:Args:
- name: The name of the element to find.
:Usage:
driver.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""
Finds elements by name.
:Args:
- name: The name of the elements to find.
:Usage:
driver.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
"""
Finds an element by tag name.
:Args:
- name: The tag name of the element to find.
:Usage:
driver.find_element_by_tag_name('foo')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""
Finds elements by tag name.
:Args:
- name: The tag name the use when finding elements.
:Usage:
driver.find_elements_by_tag_name('foo')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
"""
Finds an element by class name.
:Args:
- name: The class name of the element to find.
:Usage:
driver.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""
Finds elements by class name.
:Args:
- name: The class name of the elements to find.
:Usage:
driver.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""
Finds an element by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""
Finds elements by css selector.
:Args:
- css_selector: The css selector to use when finding elements.
:Usage:
driver.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def execute_script(self, script, *args):
"""
Synchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_script('document.title')
"""
converted_args = list(args)
command = None
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT
else:
command = Command.EXECUTE_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
def execute_async_script(self, script, *args):
"""
Asynchronously Executes JavaScript in the current window/frame.
:Args:
- script: The JavaScript to execute.
- \*args: Any applicable arguments for your JavaScript.
:Usage:
driver.execute_async_script('document.title')
"""
converted_args = list(args)
if self.w3c:
command = Command.W3C_EXECUTE_SCRIPT_ASYNC
else:
command = Command.EXECUTE_ASYNC_SCRIPT
return self.execute(command, {
'script': script,
'args': converted_args})['value']
@property
def current_url(self):
"""
Gets the URL of the current page.
:Usage:
driver.current_url
"""
return self.execute(Command.GET_CURRENT_URL)['value']
@property
def page_source(self):
"""
Gets the source of the current page.
:Usage:
driver.page_source
"""
return self.execute(Command.GET_PAGE_SOURCE)['value']
def close(self):
"""
Closes the current window.
:Usage:
driver.close()
"""
self.execute(Command.CLOSE)
def quit(self):
"""
Quits the driver and closes every associated window.
:Usage:
driver.quit()
"""
try:
self.execute(Command.QUIT)
finally:
self.stop_client()
@property
def current_window_handle(self):
"""
Returns the handle of the current window.
:Usage:
driver.current_window_handle
"""
if self.w3c:
return self.execute(Command.W3C_GET_CURRENT_WINDOW_HANDLE)['value']
else:
return self.execute(Command.GET_CURRENT_WINDOW_HANDLE)['value']
@property
def window_handles(self):
"""
Returns the handles of all windows within the current session.
:Usage:
driver.window_handles
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_HANDLES)['value']
else:
return self.execute(Command.GET_WINDOW_HANDLES)['value']
def maximize_window(self):
"""
Maximizes the current window that webdriver is using
"""
command = Command.MAXIMIZE_WINDOW
if self.w3c:
command = Command.W3C_MAXIMIZE_WINDOW
self.execute(command, {"windowHandle": "current"})
@property
def switch_to(self):
return self._switch_to
# Target Locators
def switch_to_active_element(self):
""" Deprecated use driver.switch_to.active_element
"""
warnings.warn("use driver.switch_to.active_element instead", DeprecationWarning)
return self._switch_to.active_element
def switch_to_window(self, window_name):
""" Deprecated use driver.switch_to.window
"""
warnings.warn("use driver.switch_to.window instead", DeprecationWarning)
self._switch_to.window(window_name)
def switch_to_frame(self, frame_reference):
""" Deprecated use driver.switch_to.frame
"""
warnings.warn("use driver.switch_to.frame instead", DeprecationWarning)
self._switch_to.frame(frame_reference)
def switch_to_default_content(self):
""" Deprecated use driver.switch_to.default_content
"""
warnings.warn("use driver.switch_to.default_content instead", DeprecationWarning)
self._switch_to.default_content()
def switch_to_alert(self):
""" Deprecated use driver.switch_to.alert
"""
warnings.warn("use driver.switch_to.alert instead", DeprecationWarning)
return self._switch_to.alert
# Navigation
def back(self):
"""
Goes one step backward in the browser history.
:Usage:
driver.back()
"""
self.execute(Command.GO_BACK)
def forward(self):
"""
Goes one step forward in the browser history.
:Usage:
driver.forward()
"""
self.execute(Command.GO_FORWARD)
def refresh(self):
"""
Refreshes the current page.
:Usage:
driver.refresh()
"""
self.execute(Command.REFRESH)
# Options
def get_cookies(self):
"""
Returns a set of dictionaries, corresponding to cookies visible in the current session.
:Usage:
driver.get_cookies()
"""
return self.execute(Command.GET_ALL_COOKIES)['value']
def get_cookie(self, name):
"""
Get a single cookie by name. Returns the cookie if found, None if not.
:Usage:
driver.get_cookie('my_cookie')
"""
cookies = self.get_cookies()
for cookie in cookies:
if cookie['name'] == name:
return cookie
return None
def delete_cookie(self, name):
"""
Deletes a single cookie with the given name.
:Usage:
driver.delete_cookie('my_cookie')
"""
self.execute(Command.DELETE_COOKIE, {'name': name})
def delete_all_cookies(self):
"""
Delete all cookies in the scope of the session.
:Usage:
driver.delete_all_cookies()
"""
self.execute(Command.DELETE_ALL_COOKIES)
def add_cookie(self, cookie_dict):
"""
Adds a cookie to your current session.
:Args:
- cookie_dict: A dictionary object, with required keys - "name" and "value";
optional keys - "path", "domain", "secure", "expiry"
Usage:
driver.add_cookie({'name' : 'foo', 'value' : 'bar'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/'})
driver.add_cookie({'name' : 'foo', 'value' : 'bar', 'path' : '/', 'secure':True})
"""
self.execute(Command.ADD_COOKIE, {'cookie': cookie_dict})
# Timeouts
def implicitly_wait(self, time_to_wait):
"""
Sets a sticky timeout to implicitly wait for an element to be found,
or a command to complete. This method only needs to be called one
time per session. To set the timeout for calls to
execute_async_script, see set_script_timeout.
:Args:
- time_to_wait: Amount of time to wait (in seconds)
:Usage:
driver.implicitly_wait(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'implicit': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.IMPLICIT_WAIT, {
'ms': float(time_to_wait) * 1000})
def set_script_timeout(self, time_to_wait):
"""
Set the amount of time that the script should wait during an
execute_async_script call before throwing an error.
:Args:
- time_to_wait: The amount of time to wait (in seconds)
:Usage:
driver.set_script_timeout(30)
"""
if self.w3c:
self.execute(Command.SET_TIMEOUTS, {
'script': int(float(time_to_wait) * 1000)})
else:
self.execute(Command.SET_SCRIPT_TIMEOUT, {
'ms': float(time_to_wait) * 1000})
def set_page_load_timeout(self, time_to_wait):
"""
Set the amount of time to wait for a page load to complete
before throwing an error.
:Args:
- time_to_wait: The amount of time to wait
:Usage:
driver.set_page_load_timeout(30)
"""
try:
self.execute(Command.SET_TIMEOUTS, {
'pageLoad': int(float(time_to_wait) * 1000)})
except WebDriverException:
self.execute(Command.SET_TIMEOUTS, {
'ms': float(time_to_wait) * 1000,
'type': 'page load'})
def find_element(self, by=By.ID, value=None):
"""
'Private' method used by the find_element_by_* methods.
:Usage:
Use the corresponding find_element_by_* instead of this.
:rtype: WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENT, {
'using': by,
'value': value})['value']
def find_elements(self, by=By.ID, value=None):
"""
'Private' method used by the find_elements_by_* methods.
:Usage:
Use the corresponding find_elements_by_* instead of this.
:rtype: list of WebElement
"""
if self.w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self.execute(Command.FIND_ELEMENTS, {
'using': by,
'value': value})['value']
@property
def desired_capabilities(self):
"""
returns the drivers current desired capabilities being used
"""
return self.capabilities
def get_screenshot_as_file(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.get_screenshot_as_file('/Screenshots/foo.png')
"""
png = self.get_screenshot_as_png()
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
def save_screenshot(self, filename):
"""
Gets the screenshot of the current window. Returns False if there is
any IOError, else returns True. Use full paths in your filename.
:Args:
- filename: The full path you wish to save your screenshot to.
:Usage:
driver.save_screenshot('/Screenshots/foo.png')
"""
return self.get_screenshot_as_file(filename)
def get_screenshot_as_png(self):
"""
Gets the screenshot of the current window as a binary data.
:Usage:
driver.get_screenshot_as_png()
"""
return base64.b64decode(self.get_screenshot_as_base64().encode('ascii'))
def get_screenshot_as_base64(self):
"""
Gets the screenshot of the current window as a base64 encoded string
which is useful in embedded images in HTML.
:Usage:
driver.get_screenshot_as_base64()
"""
return self.execute(Command.SCREENSHOT)['value']
def set_window_size(self, width, height, windowHandle='current'):
"""
Sets the width and height of the current window. (window.resizeTo)
:Args:
- width: the width in pixels to set the window to
- height: the height in pixels to set the window to
:Usage:
driver.set_window_size(800,600)
"""
command = Command.SET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_SET_WINDOW_SIZE
self.execute(command, {
'width': int(width),
'height': int(height),
'windowHandle': windowHandle})
def get_window_size(self, windowHandle='current'):
"""
Gets the width and height of the current window.
:Usage:
driver.get_window_size()
"""
command = Command.GET_WINDOW_SIZE
if self.w3c:
command = Command.W3C_GET_WINDOW_SIZE
size = self.execute(command, {'windowHandle': windowHandle})
if size.get('value', None) is not None:
return size['value']
else:
return size
def set_window_position(self, x, y, windowHandle='current'):
"""
Sets the x,y position of the current window. (window.moveTo)
:Args:
- x: the x-coordinate in pixels to set the window position
- y: the y-coordinate in pixels to set the window position
:Usage:
driver.set_window_position(0,0)
"""
if self.w3c:
return self.execute(Command.W3C_SET_WINDOW_POSITION, {
'x': int(x),
'y': int(y)
})
else:
self.execute(Command.SET_WINDOW_POSITION,
{
'x': int(x),
'y': int(y),
'windowHandle': windowHandle
})
def get_window_position(self, windowHandle='current'):
"""
Gets the x,y position of the current window.
:Usage:
driver.get_window_position()
"""
if self.w3c:
return self.execute(Command.W3C_GET_WINDOW_POSITION)['value']
else:
return self.execute(Command.GET_WINDOW_POSITION, {
'windowHandle': windowHandle})['value']
def get_window_rect(self):
"""
Gets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.get_window_rect()
"""
return self.execute(Command.GET_WINDOW_RECT)['value']
def set_window_rect(self, x=None, y=None, width=None, height=None):
"""
Sets the x, y coordinates of the window as well as height and width of
the current window.
:Usage:
driver.set_window_rect(x=10, y=10)
driver.set_window_rect(width=100, height=200)
driver.set_window_rect(x=10, y=10, width=100, height=200)
"""
if (x is None and y is None) and (height is None and width is None):
raise InvalidArgumentException("x and y or height and width need values")
return self.execute(Command.SET_WINDOW_RECT, {"x": x, "y": y,
"width": width,
"height": height})['value']
@property
def file_detector(self):
return self._file_detector
@file_detector.setter
def file_detector(self, detector):
"""
Set the file detector to be used when sending keyboard input.
By default, this is set to a file detector that does nothing.
see FileDetector
see LocalFileDetector
see UselessFileDetector
:Args:
- detector: The detector to use. Must not be None.
"""
if detector is None:
raise WebDriverException("You may not set a file detector that is null")
if not isinstance(detector, FileDetector):
raise WebDriverException("Detector has to be instance of FileDetector")
self._file_detector = detector
@property
def orientation(self):
"""
Gets the current orientation of the device
:Usage:
orientation = driver.orientation
"""
return self.execute(Command.GET_SCREEN_ORIENTATION)['value']
@orientation.setter
def orientation(self, value):
"""
Sets the current orientation of the device
:Args:
- value: orientation to set it to.
:Usage:
driver.orientation = 'landscape'
"""
allowed_values = ['LANDSCAPE', 'PORTRAIT']
if value.upper() in allowed_values:
self.execute(Command.SET_SCREEN_ORIENTATION, {'orientation': value})
else:
raise WebDriverException("You can only set the orientation to 'LANDSCAPE' and 'PORTRAIT'")
@property
def application_cache(self):
""" Returns a ApplicationCache Object to interact with the browser app cache"""
return ApplicationCache(self)
@property
def log_types(self):
"""
Gets a list of the available log types
:Usage:
driver.log_types
"""
return self.execute(Command.GET_AVAILABLE_LOG_TYPES)['value']
def get_log(self, log_type):
"""
Gets the log for a given log type
:Args:
- log_type: type of log that which will be returned
:Usage:
driver.get_log('browser')
driver.get_log('driver')
driver.get_log('client')
driver.get_log('server')
"""
return self.execute(Command.GET_LOG, {'type': log_type})['value']
|
|
import logging
import random
import json
import time
import numpy
from influxdb import InfluxDBClient
from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError
from requests.exceptions import ConnectionError
from ryu.lib import hub
from nsodbc import nsodbc_factory, init_switch_db, init_flow_db
def watcher_factory(conf):
"""Return a Gauge object based on type.
Arguments:
gauge_conf -- a GaugeConf object with the configuration for this valve.
"""
WATCHER_TYPES = {
'port_state': {
'text': GaugePortStateLogger,
'influx': GaugePortStateInfluxDBLogger,
},
'port_stats': {
'text': GaugePortStatsPoller,
'influx': GaugePortStatsInfluxDBPoller,
},
'flow_table': {
'text': GaugeFlowTablePoller,
'gaugedb': GaugeFlowTableDBLogger,
},
}
w_type = conf.type
db_type = conf.db_type
if w_type in WATCHER_TYPES and db_type in WATCHER_TYPES[w_type]:
return WATCHER_TYPES[w_type][db_type]
else:
return None
def _rcv_time(rcv_time):
return time.strftime('%b %d %H:%M:%S', time.localtime(rcv_time))
class InfluxShipper(object):
"""Convenience class for shipping values to influx db.
Inheritors must have a WatcherConf object as conf.
"""
conf = None
def ship_points(self, points):
try:
client = InfluxDBClient(
host=self.conf.influx_host,
port=self.conf.influx_port,
username=self.conf.influx_user,
password=self.conf.influx_pwd,
database=self.conf.influx_db,
timeout=self.conf.influx_timeout)
return client.write_points(points=points, time_precision='s')
except (ConnectionError, InfluxDBClientError, InfluxDBServerError):
return False
def make_point(self, dp_name, port_name, rcv_time, stat_name, stat_val):
port_tags = {
'dp_name': dp_name,
'port_name': port_name,
}
# InfluxDB has only one integer type, int64. We are logging OF
# stats that are uint64. Use float64 to prevent an overflow.
# q.v. https://docs.influxdata.com/influxdb/v1.2/write_protocols/line_protocol_reference/
point = {
'measurement': stat_name,
'tags': port_tags,
'time': int(rcv_time),
# pylint: disable=no-member
'fields': {'value': numpy.float64(stat_val)}}
return point
class GaugeDBHelper(object):
"""
Helper class for gaugedb operations
Inheritors must have a WatcherConf object as conf.
"""
conf = None
db_update_counter = None
conn_string = None
switch_database = None
flow_database = None
conn = None
def setup(self):
self.conn_string = (
'driver={0};server={1};port={2};uid={3};pwd={4}'.format(
self.conf.driver, self.conf.db_ip, self.conf.db_port,
self.conf.db_username, self.conf.db_password))
nsodbc = nsodbc_factory()
self.conn = nsodbc.connect(self.conn_string)
self.switch_database, exists = self.conn.create(self.conf.switches_doc)
if not exists:
init_switch_db(self.switch_database)
self.flow_database, exists = self.conn.create(self.conf.flows_doc)
if not exists:
init_flow_db(self.flow_database)
self.db_update_counter = int(self.conf.db_update_counter)
def refresh_switchdb(self):
self.conn.delete(self.conf.switches_doc)
self.switch_database, _ = self.conn.create(self.conf.switches_doc)
init_switch_db(self.switch_database)
def refresh_flowdb(self):
self.conn.delete(self.conf.flows_doc)
self.flow_database, _ = self.conn.create(self.conf.flows_doc)
init_flow_db(self.flow_database)
class GaugePortStateLogger(object):
def __init__(self, conf, logname):
self.dp = conf.dp
self.conf = conf
self.logger = logging.getLogger(
logname + '.{0}'.format(self.conf.type)
)
def update(self, rcv_time, msg):
rcv_time_str = _rcv_time(rcv_time)
reason = msg.reason
port_no = msg.desc.port_no
ofp = msg.datapath.ofproto
log_msg = None
if reason == ofp.OFPPR_ADD:
log_msg = 'port %s added' % port_no
elif reason == ofp.OFPPR_DELETE:
log_msg = 'port %s deleted' % port_no
elif reason == ofp.OFPPR_MODIFY:
link_down = (msg.desc.state & ofp.OFPPS_LINK_DOWN)
if link_down:
log_msg = 'port %s down' % port_no
else:
log_msg = 'port %s up' % port_no
else:
log_msg = 'port %s unknown state %s' % (port_no, reason)
self.logger.info(log_msg)
if self.conf.file:
with open(self.conf.file, 'a') as logfile:
logfile.write('\t'.join((rcv_time_str, log_msg)) + '\n')
def start(self, ryudp):
pass
def stop(self):
pass
class GaugePortStateInfluxDBLogger(GaugePortStateLogger, InfluxShipper):
"""
> use faucet
Using database faucet
> precision rfc3339
> select * from port_state_reason where port_name = 'port1.0.1' order by time desc limit 10;
name: port_state_reason
-----------------------
time dp_name port_name value
2017-02-21T02:12:29Z windscale-faucet-1 port1.0.1 2
2017-02-21T02:12:25Z windscale-faucet-1 port1.0.1 2
2016-07-27T22:05:08Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:33:00Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:32:57Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:31:21Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:31:18Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:27:07Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:27:04Z windscale-faucet-1 port1.0.1 2
2016-05-25T04:24:53Z windscale-faucet-1 port1.0.1 2
"""
def __init__(self, conf, logname):
super(GaugePortStateInfluxDBLogger, self).__init__(conf, logname)
def update(self, rcv_time, msg):
super(GaugePortStateInfluxDBLogger, self).update(rcv_time, msg)
reason = msg.reason
port_no = msg.desc.port_no
if port_no in self.dp.ports:
port_name = self.dp.ports[port_no].name
points = [
self.make_point(
self.dp.name, port_name, rcv_time, 'port_state_reason', reason)]
if not self.ship_points(points):
self.logger.warning('error shipping port_state_reason points')
class GaugePoller(object):
"""A ryu thread object for sending and receiving openflow stats requests.
The thread runs in a loop sending a request, sleeping then checking a
response was received before sending another request.
The methods send_req, update and no_response should be implemented by
subclasses.
"""
def __init__(self, conf, logname):
self.dp = conf.dp
self.conf = conf
self.thread = None
self.reply_pending = False
self.interval = self.conf.interval
self.logger = logging.getLogger(
logname + '.{0}'.format(self.conf.type)
)
self.ryudp = None
def start(self, ryudp):
self.ryudp = ryudp
self.stop()
self.thread = hub.spawn(self)
def stop(self):
if self.running():
hub.kill(self.thread)
hub.joinall([self.thread])
self.thread = None
def __call__(self):
"""Send request loop.
Delays the initial request for a random interval to reduce load.
Then sends a request to the datapath, waits the specified interval and
checks that a response has been received in a loop."""
#TODO: this should use a deterministic method instead of random
hub.sleep(random.randint(1, self.conf.interval))
while True:
self.send_req()
self.reply_pending = True
hub.sleep(self.conf.interval)
if self.reply_pending:
self.no_response()
def running(self):
return self.thread is not None
def send_req(self):
"""Send a stats request to a datapath."""
raise NotImplementedError
def update(self, rcv_time, msg):
"""Handle the responses to requests.
Called when a reply to a stats request sent by this object is received
by the controller.
It should acknowledge the receipt by setting self.reply_pending to
false.
Arguments:
rcv_time -- the time the response was received
msg -- the stats reply message
"""
raise NotImplementedError
def no_response(self):
"""Called when a polling cycle passes without receiving a response."""
raise NotImplementedError
def _stat_port_name(self, msg, stat):
if stat.port_no == msg.datapath.ofproto.OFPP_CONTROLLER:
return 'CONTROLLER'
elif stat.port_no == msg.datapath.ofproto.OFPP_LOCAL:
return 'LOCAL'
elif stat.port_no in self.dp.ports:
return self.dp.ports[stat.port_no].name
else:
self.logger.info('stats for unknown port %u', stat.port_no)
return None
def _format_port_stats(self, delim, stat):
formatted_port_stats = []
for stat_name_list, stat_val in (
(('packets', 'out'), stat.tx_packets),
(('packets', 'in'), stat.rx_packets),
(('bytes', 'out'), stat.tx_bytes),
(('bytes', 'in'), stat.rx_bytes),
(('dropped', 'out'), stat.tx_dropped),
(('dropped', 'in'), stat.rx_dropped),
(('errors', 'in'), stat.rx_errors)):
# For openvswitch, unsupported statistics are set to
# all-1-bits (UINT64_MAX), skip reporting them
if stat_val != 2**64-1:
stat_name = delim.join(stat_name_list)
formatted_port_stats.append((stat_name, stat_val))
return formatted_port_stats
class GaugePortStatsPoller(GaugePoller):
"""Periodically sends a port stats request to the datapath and parses
and outputs the response.
"""
def __init__(self, conf, logname):
super(GaugePortStatsPoller, self).__init__(conf, logname)
def send_req(self):
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(self.ryudp, 0, ofp.OFPP_ANY)
self.ryudp.send_msg(req)
def _update_line(self, rcv_time_str, stat_name, stat_val):
return '\t'.join((rcv_time_str, stat_name, str(stat_val))) + '\n'
def update(self, rcv_time, msg):
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
rcv_time_str = _rcv_time(rcv_time)
self.reply_pending = False
for stat in msg.body:
port_name = self._stat_port_name(msg, stat)
if port_name is not None:
with open(self.conf.file, 'a') as logfile:
log_lines = []
for stat_name, stat_val in self._format_port_stats('-', stat):
dp_port_name = '-'.join((
self.dp.name, port_name, stat_name))
log_lines.append(
self._update_line(
rcv_time_str, dp_port_name, stat_val))
logfile.writelines(log_lines)
def no_response(self):
self.logger.info(
'port stats request timed out for %s', self.dp.name)
class GaugePortStatsInfluxDBPoller(GaugePoller, InfluxShipper):
"""Periodically sends a port stats request to the datapath and parses
and outputs the response.
> use faucet
Using database faucet
> show measurements
name: measurements
------------------
bytes_in
bytes_out
dropped_in
dropped_out
errors_in
packets_in
packets_out
port_state_reason
> precision rfc3339
> select * from packets_out where port_name = 'port1.0.1' order by time desc limit 10;
name: packets_out
-----------------
time dp_name port_name value
2017-03-06T05:21:42Z windscale-faucet-1 port1.0.1 76083431
2017-03-06T05:21:33Z windscale-faucet-1 port1.0.1 76081172
2017-03-06T05:21:22Z windscale-faucet-1 port1.0.1 76078727
2017-03-06T05:21:12Z windscale-faucet-1 port1.0.1 76076612
2017-03-06T05:21:02Z windscale-faucet-1 port1.0.1 76074546
2017-03-06T05:20:52Z windscale-faucet-1 port1.0.1 76072730
2017-03-06T05:20:42Z windscale-faucet-1 port1.0.1 76070528
2017-03-06T05:20:32Z windscale-faucet-1 port1.0.1 76068211
2017-03-06T05:20:22Z windscale-faucet-1 port1.0.1 76065982
2017-03-06T05:20:12Z windscale-faucet-1 port1.0.1 76063941
"""
def __init__(self, conf, logname):
super(GaugePortStatsInfluxDBPoller, self).__init__(conf, logname)
def send_req(self):
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(self.ryudp, 0, ofp.OFPP_ANY)
self.ryudp.send_msg(req)
def update(self, rcv_time, msg):
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
self.reply_pending = False
points = []
for stat in msg.body:
port_name = self._stat_port_name(msg, stat)
for stat_name, stat_val in self._format_port_stats('_', stat):
points.append(
self.make_point(
self.dp.name, port_name, rcv_time, stat_name, stat_val))
if not self.ship_points(points):
self.logger.warn('error shipping port_stats points')
def no_response(self):
self.logger.info(
'port stats request timed out for %s', self.dp.name)
class GaugeFlowTablePoller(GaugePoller):
"""Periodically dumps the current datapath flow table as a yaml object.
Includes a timestamp and a reference ($DATAPATHNAME-flowtables). The
flow table is dumped as an OFFlowStatsReply message (in yaml format) that
matches all flows.
"""
def __init__(self, conf, logname):
super(GaugeFlowTablePoller, self).__init__(conf, logname)
def send_req(self):
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
match = ofp_parser.OFPMatch()
req = ofp_parser.OFPFlowStatsRequest(
self.ryudp, 0, ofp.OFPTT_ALL, ofp.OFPP_ANY, ofp.OFPG_ANY,
0, 0, match)
self.ryudp.send_msg(req)
def update(self, rcv_time, msg):
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
rcv_time_str = _rcv_time(rcv_time)
self.reply_pending = False
jsondict = msg.to_jsondict()
with open(self.conf.file, 'a') as logfile:
ref = '-'.join((self.dp.name, 'flowtables'))
logfile.write(
'\n'.join((
'---',
'time: %s' % rcv_time_str,
'ref: %s' % ref,
'msg: %s' % json.dumps(jsondict, indent=4))))
def no_response(self):
self.logger.info(
'flow dump request timed out for %s', self.dp.name)
class GaugeFlowTableDBLogger(GaugePoller, GaugeDBHelper):
"""Periodically dumps the current datapath flow table as a yaml object.
Includes a timestamp and a reference ($DATAPATHNAME-flowtables). The
flow table is dumped as an OFFlowStatsReply message (in yaml format) that
matches all flows.
"""
def __init__(self, conf, logname):
super(GaugeFlowTableDBLogger, self).__init__(conf, logname)
self.setup()
def send_req(self):
ofp = self.ryudp.ofproto
ofp_parser = self.ryudp.ofproto_parser
match = ofp_parser.OFPMatch()
req = ofp_parser.OFPFlowStatsRequest(
self.ryudp, 0, ofp.OFPTT_ALL, ofp.OFPP_ANY, ofp.OFPG_ANY,
0, 0, match)
self.ryudp.send_msg(req)
def update(self, rcv_time, msg):
# TODO: it may be worth while verifying this is the correct stats
# response before doing this
self.reply_pending = False
jsondict = msg.to_jsondict()
if self.db_update_counter == self.conf.db_update_counter:
self.refresh_switchdb()
switch_object = {'_id': str(hex(self.dp.dp_id)),
'data': {'flows': []}}
self.switch_database.insert_update_doc(switch_object,
'data')
try:
rows = self.switch_database.get_docs(
self.conf.views['switch_view'],
key=str(hex(self.dp.dp_id)))
switch = rows[0]
except IndexError:
switch = None
if switch:
self.refresh_flowdb()
for f_msg in jsondict['OFPFlowStatsReply']['body']:
flow_object = {'data': f_msg, 'tags': []}
flow_id = self.flow_database.insert_update_doc(
flow_object, '')
switch.value['data']['flows'].append(flow_id)
self.switch_database.insert_update_doc(
switch.value, 'data')
self.db_update_counter -= 1
if not self.db_update_counter:
self.db_update_counter = self.conf.db_update_counter
def no_response(self):
self.logger.info(
'flow dump request timed out for %s', self.dp.name)
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationpolicylabel_authenticationpolicy_binding(base_resource) :
""" Binding class showing the authenticationpolicy that can be bound to authenticationpolicylabel.
"""
def __init__(self) :
self._policyname = ""
self._priority = 0
self._gotopriorityexpression = ""
self._nextfactor = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""Specifies the priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Specifies the priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def nextfactor(self) :
"""On success invoke label.
"""
try :
return self._nextfactor
except Exception as e:
raise e
@nextfactor.setter
def nextfactor(self, nextfactor) :
"""On success invoke label.
"""
try :
self._nextfactor = nextfactor
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policyname(self) :
"""Name of the authentication policy to bind to the policy label.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Name of the authentication policy to bind to the policy label.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the authentication policy label to which to bind the policy.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the authentication policy label to which to bind the policy.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationpolicylabel_authenticationpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationpolicylabel_authenticationpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.labelname) :
return str(self.labelname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = authenticationpolicylabel_authenticationpolicy_binding()
updateresource.labelname = resource.labelname
updateresource.policyname = resource.policyname
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.nextfactor = resource.nextfactor
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [authenticationpolicylabel_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].labelname = resource[i].labelname
updateresources[i].policyname = resource[i].policyname
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].nextfactor = resource[i].nextfactor
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = authenticationpolicylabel_authenticationpolicy_binding()
deleteresource.labelname = resource.labelname
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [authenticationpolicylabel_authenticationpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].labelname = resource[i].labelname
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, labelname) :
""" Use this API to fetch authenticationpolicylabel_authenticationpolicy_binding resources.
"""
try :
obj = authenticationpolicylabel_authenticationpolicy_binding()
obj.labelname = labelname
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, labelname, filter_) :
""" Use this API to fetch filtered set of authenticationpolicylabel_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicylabel_authenticationpolicy_binding()
obj.labelname = labelname
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, labelname) :
""" Use this API to count authenticationpolicylabel_authenticationpolicy_binding resources configued on NetScaler.
"""
try :
obj = authenticationpolicylabel_authenticationpolicy_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, labelname, filter_) :
""" Use this API to count the filtered set of authenticationpolicylabel_authenticationpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationpolicylabel_authenticationpolicy_binding()
obj.labelname = labelname
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationpolicylabel_authenticationpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationpolicylabel_authenticationpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationpolicylabel_authenticationpolicy_binding = [authenticationpolicylabel_authenticationpolicy_binding() for _ in range(length)]
|
|
"""
read_flashes
filterer.filt_ev_fl
brancher(events_flashes_receiever, other_analysis_targets=timeframe_targets)
ev_fl_rx ->
histogrammer
energy calculation
plotter
timeframe_targets is
ev_fl_rx ->
bound_filt (on time)
read_flashes -> filterer.filt_ev_fl -> length_for_these_flashes -> brancher -> timeframe_targets
"""
from __future__ import absolute_import
from __future__ import print_function
import os
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
from numpy.lib.recfunctions import stack_arrays #, append_fields
from lmatools import coordinateSystems
from lmatools.flash_stats import length_from_area, volumetric_length_from_points, vertical_length_distribution, gen_flash_events
from lmatools.grid.make_grids import time_edges, seconds_since_start_of_day
from stormdrain.pipeline import coroutine, Branchpoint
from stormdrain.support.matplotlib.formatters import SecDayFormatter
def gen_fractal_length_for_flashes(events, flashes, D, b_s, alt_bins):
""" Given events and flashes, calculate the fractal length
as described in Bruning and MacGorman (2015).
D is the fractal dimension, and b_s is the minimum box size.
This function returns a dictionary with the 2D and 3D length as well as the
convex hull data
"""
GeoSys = coordinateSystems.GeographicSystem()
for fl_srcs, fl in gen_flash_events(events, flashes):
if fl_srcs.shape[0] < 5:
print("Skipping flash because source count less than 5 prevents calculation of volume")
continue
x,y,z = GeoSys.toECEF(fl_srcs['lon'], fl_srcs['lat'], fl_srcs['alt'])
x,y,z = x/1000.0, y/1000.0, z/1000.0
# These calls will fail if the chi2 and stations criteria are too stringent and reduce the number of points below the minimum needed to construct the geometry (5, according to QHULL's error)
simplex_centroids, simplex_volumes, volume, L_fractal, length_weighted = volumetric_length_from_points(x,y,z,D,b_s)
simplex_lon,simplex_lat,simplex_alt = GeoSys.fromECEF(simplex_centroids[:,0]*1000.0, simplex_centroids[:,1]*1000.0, simplex_centroids[:,2]*1000.0)
alt_bins, bin_total_src, bin_total_length = vertical_length_distribution(fl_srcs['alt']/1000.0, simplex_alt/1000.0, length_weighted, alt_bins, norm=True)
L_capacitor = length_from_area(fl['area'],D,b_s)
results = {'2D':{'length':L_capacitor},
'3D':{'length':L_fractal, 'volume':volume,
'subvolumes':{'centroids':simplex_centroids, 'volumes':simplex_volumes, 'lengths':length_weighted},
'vertical_profile':{'alt_bins':alt_bins, 'src_density':bin_total_src, 'length_density':bin_total_length}
},
'flash':fl}
yield results
class FractalLengthProfileCalculator(object):
def __init__(self, D, b_s, alt_bins):
self.alt_bins = alt_bins
self.D = D
self.b_s = b_s
def process_events_flashes(self, events, flashes):
""" Given a table of flashes and their corresponding events table,
calculate the Bruning and Thomas (2015, JGR) fractal length
for each flash and produce a vertical profile of flash lengths
for all flashes.
Returns:
per_flash_data: List of dictionaries containing the flash length
and triangulation geometry data, including the flash volume.
IC_totals: array of summary data for each profile; see dtype below.
CG_totals: as above but for CGs only.
These data are not normalized by time.
totals_dtype = [
('fractal_length_2D_hull', 'f8'),
('fractal_length_3D_hull', 'f8'),
('source_density_profile', 'f8', alt_bin_shape),
('length_density_profile', 'f8', alt_bin_shape),
]
"""
results = [result for result in gen_fractal_length_for_flashes(
events, flashes, self.D, self.b_s, self.alt_bins)
]
totals_CG = self._sum_profiles(results, is_CG_flag=True)
totals_IC = self._sum_profiles(results, is_CG_flag=False)
# IC_totals = self._process_totals(totals_IC, duration)
# CG_totals = self._process_totals(totals_CG, duration)
return results, totals_IC, totals_CG
def normalize_profiles(self, totals_seq, durations):
""" Given a sequence of results from repeated calls to self.process_events_flashes,
return profiles normalized by durations (an array of durations for each interval
corresponding to the items in total_seq). also return a source density
returns:
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin
total 2D and 3D lengths, source and 3D length profiles, and the maximum
length per unit time per km altitude across all bins.
"""
totals = np.asarray(list(totals_seq))
L_profile_rate = np.squeeze(totals['length_density_profile'].T/durations)
max_L_bin = L_profile_rate.max()
src_profile_rate = totals['source_density_profile'].T/durations
scaled_sources = np.squeeze(max_L_bin * src_profile_rate / src_profile_rate.max())
lengths_2D = np.squeeze(totals['fractal_length_2D_hull'])/durations
lengths_3D = np.squeeze(totals['fractal_length_3D_hull'])/durations
return lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin
def _sum_profiles(self, results, is_CG_flag=False):
alt_bins = self.alt_bins
# one fewer density value (bin interval) than bin edges.
alt_bin_shape = (alt_bins.shape[0]-1,)
res_dtype = [ # this the dtype of the return value, i.e., the sums
('fractal_length_2D_hull', 'f8'),
('fractal_length_3D_hull', 'f8'),
('source_density_profile', 'f8', alt_bin_shape),
('length_density_profile', 'f8', alt_bin_shape), #, (alt_bins.shape[0]-1,) ,)
]
if len(results) == 0:
return np.zeros((1,), dtype=res_dtype)
# Check to see if there are CG flags in the flash data table
# just look at the first flash and assume the rest have it if one does.
if 'CG' in results[0]['flash'].dtype.names:
results_iter = (ri for ri in results if
(ri['flash']['CG'] == is_CG_flag) )
else:
results_iter = results
# def get_res_iter():
res_iter = ( (
r['2D']['length'],
r['3D']['length'],
tuple(r['3D']['vertical_profile']['src_density']),
tuple(r['3D']['vertical_profile']['length_density'])
) for r in results_iter
)
# http://stackoverflow.com/questions/19201868/how-to-set-dtype-for-nested-numpy-ndarray
each_result = np.fromiter(res_iter, dtype=res_dtype)
total = np.empty((1,), dtype=res_dtype)
for colname in total.dtype.names:
total[colname] = each_result[colname].sum(axis=0)
return total
def make_time_series_plot(self, basedate, t_edges,
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin,
label_t_every=3600., figsize=(7.5,10)):
""" Arguments:
t_edges: N+1 bin boundaries corresponding to the N time series values in the
other arguments. Units: seconds since start of day.
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin:
The values returned by normalize_profiles
label_t_every: interval in seconds at which to label the time axis
figsize: (width, height) of figure in inches (passed to matplotlib.figure)
"""
import matplotlib.pyplot as plt
cmap = 'cubehelix_r'
fig = plt.figure(figsize=figsize)
ax_L = fig.add_subplot(311)
ax_prof_L = fig.add_subplot(312)
ax_prof_src = fig.add_subplot(313)
min_L_bin = 0*max_L_bin
starts, ends = t_edges[:-1], t_edges[1:]
t_centers = (starts+ends)/2.0
src_pm = ax_prof_src.pcolormesh(t_edges, self.alt_bins, scaled_sources,
cmap=cmap, vmin=min_L_bin, vmax=max_L_bin)
src_pm.set_rasterized(True)
cbar_src = fig.colorbar(src_pm, ax=ax_prof_src, orientation='horizontal')
cbar_src.set_label('Source count per height interval per time\n(scaled to max length, km/km/min)')
L_pm = ax_prof_L.pcolormesh(t_edges, self.alt_bins, L_profile_rate,
cmap=cmap, vmin=min_L_bin, vmax=max_L_bin)
L_pm.set_rasterized(True)
cbar_L = fig.colorbar(L_pm, ax=ax_prof_L, orientation='horizontal')
cbar_L.set_label('Length per height interval per time\n(km/km/min)')
for ax in (ax_prof_src, ax_prof_L):
ax.set_ylabel('Altitude (km)')
ax_L.plot(t_centers, lengths_2D,
label='2D Hull Area')
ax_L.plot(t_centers, lengths_3D,
label='3D Hull Volume')
ax_L.legend()
ax_L.set_ylabel('Fractal Length (km/min)')
for ax in (ax_L, ax_prof_L, ax_prof_src):
ax.xaxis.set_major_formatter(SecDayFormatter(basedate, ax.xaxis))
ax.set_xlabel('Time (UTC)')
ax.xaxis.set_major_locator(MultipleLocator(label_t_every))
return fig
def write_profile_data(self, basedate, t_edge, outfilebase,
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin,
partition_kind='total'):
""" Arguments:
outfilebase: file base name, including directory.
basedate: date against which t_edge is referenced.
t_edge: N+1 bin boundaries corresponding to the N time series values in the
other arguments. Units: seconds since start of day given by basedate.
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin:
The values returned by normalize_profiles
partition_kind: one of 'total', 'IC', 'CG', or another unique tag
classifying this kind of profile. Appended to filename just before.
The filename is calculated as [outfile_base]_[parition_kind].txt
"""
starts = t_edge[:-1]
ends = t_edge[1:]
header = ""
header += "# LMA channel length distribution\n"
header += "# Base date = " + basedate.isoformat() +"\n"
header += "# Altitude_bins = " + str(self.alt_bins.tolist()) +"\n"
header += "# starts, ends, lengths_2D, lengths_3D, [scaled_sources x Nbins], [L_profile_rate x Nbins]\n"
text_dump = open(outfilebase+'_{0}.txt'.format(partition_kind), 'w')
text_dump.write(header)
# shape of scaled_sources and L_profile_rate are (N_alt_bins, N_times), and loop is over the first dimension, so take transpose so loop is over N_times
for s0, e1, l2, l3, Sprof, Lprof in zip(starts, ends, lengths_2D, lengths_3D, scaled_sources.T, L_profile_rate.T):
text_dump.write('{0}, {1}, {2}, {3}, {4}, {5}\n'.format(
s0, e1, l2, l3,
str(Sprof.tolist()), str(Lprof.tolist())
))
@coroutine
def length_for_these_flashes(D, b_s, alt_bins, chi2=5.0, stations=5, target=None):
""" Receive events, flashes arrays. Calculate flash length using fractal dimension D and step length b_s
For each flash, will send out a dictionary with:
capacitor_length: total length determined from flash area
volumetric_length: total length determined from flash volume
"""
GeoSys = coordinateSystems.GeographicSystem()
while True:
pts, flashes = (yield) # pts == events
areas = flashes['area']
capacitor_length = length_from_area(areas,D,b_s)
for L_capacitor, fl in zip(capacitor_length, flashes):
fl_id=fl['flash_id']
this_flash = (pts['flash_id']==fl_id)
good = (pts['stations'] >= stations) & (pts['chi2']<=chi2)
fl_srcs = pts[this_flash & good]
if fl_srcs.shape[0] < 5:
print(("Skipping flash because original source count reduced from {0}={4} to {1} for flash {2} at {3}".format(
pts[this_flash].shape[0], fl_srcs.shape[0], fl_id, fl['start'], fl['n_points']
)))
continue
x,y,z = GeoSys.toECEF(fl_srcs['lon'], fl_srcs['lat'], fl_srcs['alt'])
x,y,z = x/1000.0, y/1000.0, z/1000.0
# These calls will fail if the chi2 and stations criteria are too stringent and reduce the number of points below the minimum needed to construct the geometry (5, according to QHULL's error)
simplex_centroids, simplex_volumes, volume, L_fractal, length_weighted = volumetric_length_from_points(x,y,z,D,b_s)
simplex_lon,simplex_lat,simplex_alt = GeoSys.fromECEF(simplex_centroids[:,0]*1000.0, simplex_centroids[:,1]*1000.0, simplex_centroids[:,2]*1000.0)
alt_bins, bin_total_src, bin_total_length = vertical_length_distribution(fl_srcs['alt']/1000.0, simplex_alt/1000.0, length_weighted, alt_bins, norm=True)
results = {'2D':{'length':L_capacitor},
'3D':{'length':L_fractal,
'subvolumes':{'centroids':simplex_centroids, 'volumes':simplex_volumes, 'lengths':length_weighted},
'vertical_profile':{'alt_bins':alt_bins, 'src_density':bin_total_src, 'length_density':bin_total_length}
},
'flash':fl
}
if target is not None:
target.send(results)
@coroutine
def in_time_range(t_min, t_max, target):
results_in_range = []
try:
while True:
results = (yield)
start = results['flash']['start']
if (start >= t_min) & (start < t_max):
results_in_range.append(results)
except GeneratorExit:
target.send((t_min, t_max, results_in_range))
class StatResults(object):
def __init__(self, alt_bins, basedate=None):
self.basedate = basedate
# t_start, t_end, flashes_in_range should be equal length
# and mutually indexable because of how stats_rcvr builds them
self.t_start = []
self.t_end = []
self.results_in_range = []
# it's imperative this be passed in so that _sum_profiles can
# return an 0-filled array of the right shape if no flashes are in
# any time interal.
self.alt_bins = alt_bins
def _sum_profiles(self, results, is_CG_flag=False):
alt_bins = self.alt_bins
# one fewer density value (bin interval) than bin edges.
alt_bin_shape = (alt_bins.shape[0]-1,)
res_dtype = [ # this the dtype of the return value, i.e., the sums
('fractal_length_2D_hull', 'f8'),
('fractal_length_3D_hull', 'f8'),
('source_density_profile', 'f8', alt_bin_shape),
('length_density_profile', 'f8', alt_bin_shape), #, (alt_bins.shape[0]-1,) ,)
]
if len(results) == 0:
return np.zeros((1,), dtype=res_dtype)
# def get_res_iter():
res_iter = ( (
r['2D']['length'],
r['3D']['length'],
tuple(r['3D']['vertical_profile']['src_density']),
tuple(r['3D']['vertical_profile']['length_density'])
) for r in results if (r['flash']['CG'] == is_CG_flag)
)
# return res_iter
# print res_dtype
# for ri in get_res_iter():
# print ri
# http://stackoverflow.com/questions/19201868/how-to-set-dtype-for-nested-numpy-ndarray
each_result = np.fromiter(res_iter, dtype=res_dtype)
# print each_result.shape, each_result.dtype
total = np.empty((1,), dtype=res_dtype)
for colname in total.dtype.names:
print((colname, each_result[colname].shape))
total[colname] = each_result[colname].sum(axis=0)
return total
def total_all_profiles(self):
""" List of profiles, summed over all flashes for all times. """
total_CG = np.asarray([self._sum_profiles(r, is_CG_flag=True) for r in self.results_in_range])
total_IC = np.asarray([self._sum_profiles(r, is_CG_flag=False) for r in self.results_in_range])
# total_all = total_IC + total_CG
return total_IC, total_CG
def process_totals(self, totals, durations):
L_profile_rate = np.squeeze(totals['length_density_profile'].T/durations)
max_L_bin = L_profile_rate.max()
src_profile_rate = totals['source_density_profile'].T/durations
scaled_sources = np.squeeze(max_L_bin * src_profile_rate / src_profile_rate.max())
lengths_2D = np.squeeze(totals['fractal_length_2D_hull'])/durations
lengths_3D = np.squeeze(totals['fractal_length_3D_hull'])/durations
return lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin
def plot_stats(self, outfile):
outfile_base, outfile_ext = os.path.splitext(outfile)
n_frames = len(self.results_in_range)
starts = np.asarray(self.t_start)
ends = np.asarray(self.t_end)
t_edges = np.asarray(self.t_start + self.t_end[-1:])
t_centers = (starts+ends)/2.0
alt_bins = self.alt_bins
durations = (t_edges[1:] - t_edges[:-1])/60.0 #minutes
totals_IC, totals_CG = self.total_all_profiles()
# fig_total = make_plot(totals_total)
IC_totals = process_totals(totals_IC, durations)
CG_totals = process_totals(totals_CG, durations)
def make_plot(lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin):
import matplotlib.pyplot as plt
cmap = 'cubehelix_r'
fig = plt.figure(figsize=(7.5,10))
ax_L = fig.add_subplot(311)
ax_prof_L = fig.add_subplot(312)
ax_prof_src = fig.add_subplot(313)
min_L_bin = 0*max_L_bin
src_pm = ax_prof_src.pcolormesh(t_edges, alt_bins, scaled_sources,
cmap=cmap, vmin=min_L_bin, vmax=max_L_bin)
src_pm.set_rasterized(True)
cbar_src = fig.colorbar(src_pm, ax=ax_prof_src, orientation='horizontal')
cbar_src.set_label('Source count per height interval per time\n(scaled to max length, km/km/min)')
L_pm = ax_prof_L.pcolormesh(t_edges, alt_bins, L_profile_rate,
cmap=cmap, vmin=min_L_bin, vmax=max_L_bin)
L_pm.set_rasterized(True)
cbar_L = fig.colorbar(L_pm, ax=ax_prof_L, orientation='horizontal')
cbar_L.set_label('Length per height interval per time\n(km/km/min)')
for ax in (ax_prof_src, ax_prof_L):
ax.set_ylabel('Altitude (km)')
ax_L.plot(t_centers, lengths_2D,
label='2D Hull Area')
ax_L.plot(t_centers, lengths_3D,
label='3D Hull Volume')
ax_L.legend()
ax_L.set_ylabel('Fractal Length (km/min)')
for ax in (ax_L, ax_prof_L, ax_prof_src):
ax.xaxis.set_major_formatter(SecDayFormatter(self.basedate, ax.xaxis))
ax.set_xlabel('Time (UTC)')
ax.xaxis.set_major_locator(MultipleLocator(3600))
return fig
fig_IC = make_plot(*IC_totals)
fig_CG = make_plot(*CG_totals)
# fig_total.savefig(outfile_base+'_total'+outfile_ext)
# fig_total.clf()
fig_IC.savefig(outfile_base+'_IC'+outfile_ext)
fig_IC.clf()
fig_CG.savefig(outfile_base+'_CG'+outfile_ext)
fig_CG.clf()
for diagnose in CG_totals:
print((diagnose.dtype, diagnose.shape))
for partition_kind, kind_totals in zip (('IC', 'CG'), (IC_totals, CG_totals)):
lengths_2D, lengths_3D, scaled_sources, L_profile_rate, max_L_bin = kind_totals
header = ""
header += "# LMA channel length distribution\n"
header += "# Base date = " + self.basedate.isoformat() +"\n"
header += "# Altitude_bins = " + str(alt_bins.tolist()) +"\n"
header += "# starts, ends, lengths_2D, lengths_3D, [scaled_sources x Nbins], [L_profile_rate x Nbins]\n"
text_dump = open(outfile_base+'_{0}.txt'.format(partition_kind), 'w')
text_dump.write(header)
# shape of scaled_sources and L_profile_rate are (N_alt_bins, N_times), and loop is over the first dimension, so take transpose so loop is over N_times
for s0, e1, l2, l3, Sprof, Lprof in zip(starts, ends, lengths_2D, lengths_3D, scaled_sources.T, L_profile_rate.T):
text_dump.write('{0}, {1}, {2}, {3}, {4}, {5}\n'.format(
s0, e1, l2, l3,
str(Sprof.tolist()), str(Lprof.tolist())
))
text_dump.close()
@coroutine
def timeframe_results_rcvr(self):
t_min, t_max, results_in_range = (yield)
self.t_start.append(t_min)
self.t_end.append(t_max)
self.results_in_range.append(results_in_range)
def length_stats_for_intervals(t_start, t_end, dt, D, b_s, chi2=5.0, stations=5):
"""
"""
t_edges, duration = time_edges(t_start, t_end, dt.total_seconds())
t_ref, t_edges_seconds = seconds_since_start_of_day(t_start, t_edges)
n_frames = len(t_edges)-1
max_alt, d_alt = 20.0, 0.5
alt_bins = np.arange(0.0,max_alt+d_alt, d_alt)
results_aggregator = StatResults(alt_bins, basedate=t_ref)
all_frame_targets = []
for n in range(n_frames):
t0, t1 = t_edges_seconds[n:n+2]
statframer = results_aggregator.timeframe_results_rcvr()
this_frame = in_time_range(t0, t1, statframer)
all_frame_targets.append(this_frame)
brancher = Branchpoint(all_frame_targets)
ev_fl_rcvr = length_for_these_flashes(D, b_s, alt_bins,
chi2=chi2, stations=stations, target=brancher.broadcast())
return ev_fl_rcvr, all_frame_targets, results_aggregator
|
|
import sys
sys.path.append('..')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
import os
from time import time
import numpy as np
from tqdm import tqdm
from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import inits
from lib import updates
from lib.vis import grayscale_grid_vis
from lib.rng import py_rng, np_rng, t_rng
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout
from logz.rbm_hais_logz import ais_logZ
from load import mnist_with_valid_set
desc = 'rbm_adv'
model_dir = 'models/%s' % desc
samples_dir = 'samples/%s' % desc
dir_list = [model_dir, samples_dir]
for dir in dir_list:
if not os.path.exists(dir):
os.makedirs(dir)
''' load mnist data'''
trX, vaX, teX, trY, vaY, teY = mnist_with_valid_set()
trX, vaX, teX = trX/255., vaX/255., teX/255.
ntrain, nvalid, ntest = len(trX), len(vaX), len(teX)
print ntrain, nvalid, ntest
def transform(X):
return (floatX(X)).reshape(-1, nc, npx, npx)
def inverse_transform(X):
X = X.reshape(-1, npx, npx)
return X
lr = 3e-4 # learning rate for model
b1 = .5
l2 = 1e-5
nc = 1 # # of channels in image
ny = 10 # # of classes
nbatch = 100 # # of examples in batch
npx = 28 # # of pixels width/height of images
nz = 100 # # of dim for Z
ngfc = 1024 # # of gen units for fully connected layers
ndfc = 1024 # # of discrim units for fully connected layers
ngf = 64 # # of gen filters in first conv layer
ndf = 64 # # of discrim filters in first conv layer
nx = npx*npx*nc # # of dimensions in X
niter = 100 # # of iter at starting learning rate
niter_decay = 100 # # of iter to linearly decay learning rate to zero
n_hidden = 10
n_observe = trX.shape[1]
r_gifn = inits.Uniform(scale=4*np.sqrt(6./(n_observe+n_hidden)))
r_bias_fn = inits.Constant()
gB = r_gifn((n_observe, n_hidden), 'gB')
gb = r_bias_fn((n_observe,), 'gb')
gc = r_bias_fn((n_hidden,), 'gc')
rbm_params = [gB, gb, gc]
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
tanh = activations.Tanh()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
gifn = inits.Normal(scale=0.02)
gw = gifn((nz, ngfc), 'gw')
gw2 = gifn((ngfc, ngf*2*7*7), 'gw2')
gw3 = gifn((ngf*2, ngf, 5, 5), 'gw3')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
gen_params = [gw, gw2, gw3, gwx]
def gen(Z, w, w2, w3, gwx):
h = relu(batchnorm(T.dot(Z, w)))
h2 = relu(batchnorm(T.dot(h, w2)))
h2 = h2.reshape((h2.shape[0], ngf*2, 7, 7))
h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2))))
x = sigmoid(deconv(h3, gwx, subsample=(2, 2), border_mode=(2, 2)))
return x
def rbf_kernel(X):
XY = T.dot(X, X.T)
x2 = T.sum(X**2, axis=1).dimshuffle(0, 'x')
X2e = T.repeat(x2, X.shape[0], axis=1)
H = X2e + X2e.T - 2. * XY
V = H.flatten()
# median distance
h = T.switch(T.eq((V.shape[0] % 2), 0),
# if even vector
T.mean(T.sort(V)[ ((V.shape[0] // 2) - 1) : ((V.shape[0] // 2) + 1) ]),
# if odd vector
T.sort(V)[V.shape[0] // 2])
h = T.sqrt(.5 * h / T.log(H.shape[0].astype('float32') + 1.))
# compute the rbf kernel
kxy = T.exp(-H / (h ** 2) / 2.0)
dxkxy = -T.dot(kxy, X)
sumkxy = T.sum(kxy, axis=1).dimshuffle(0, 'x')
dxkxy = T.add(dxkxy, T.mul(X, sumkxy)) / (h ** 2)
return kxy, dxkxy
def discrim(X):
return logp_rbm(X)
def logp_rbm(X):
y = T.dot(X, gB) + gc
y_max = T.max(T.maximum(y, -y), axis=1).dimshuffle(0,'x')
log_sum = y_max + T.log(T.exp(y - y_max) + T.exp(-y - y_max)) # apply the log sum trick
log_sum = T.sum(log_sum, axis=1)
logp = T.dot(X, gb.dimshuffle(0, 'x')).flatten() - .5 * T.sum(X*X, axis=1) + log_sum
return logp
def dlogp_rbm(X):
y = T.dot(X, gB) + gc
phi = 1. - 2. / (1+T.exp(2*y))
score = gb - X + T.dot(phi, gB.T)
return score
def svgd_gradient(X):
grad = dlogp_rbm(X)
kxy, dxkxy = rbf_kernel(X)
svgd_grad = (T.dot(kxy, grad) + dxkxy) / T.sum(kxy, axis=1).dimshuffle(0, 'x')
return grad, svgd_grad
X = T.matrix()
X0 = T.matrix() # samples
# vgd gradient
deltaX = T.tensor4()
# random noise
Z = T.matrix()
f_real = discrim(X) # data
f_gen = discrim(X0) # vgd particles
cost_data = -1 * f_real.mean()
cost_vgd = -1 * f_gen.mean()
gX = gen(Z, *gen_params)
g_cost = -1 * T.sum(T.sum(T.flatten(gX, 2) * T.flatten(deltaX, 2), axis=1)) #update generate models by minimize reconstruct mse
balance_weight = sharedX(1.)
d_cost = cost_data - balance_weight * cost_vgd # for discriminative model, minimize cost
lrt = sharedX(lr)
d_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
g_updater = updates.Adam(lr=lrt, b1=b1, regularizer=updates.Regularizer(l2=l2))
d_updates = d_updater(rbm_params, d_cost)
g_updates = g_updater(gen_params, g_cost)
print 'COMPILING'
t = time()
_train_d = theano.function([X, X0], d_cost, updates=d_updates)
_train_g = theano.function([Z, deltaX], g_cost, updates=g_updates)
_gen = theano.function([Z], gen(Z, *gen_params))
_logp_rbm = theano.function([X], logp_rbm(X))
_svgd_gradient = theano.function([X], svgd_gradient(X))
print '%.2f seconds to compile theano functions'%(time()-t)
nbatch = 100
n_iter = 20
n_updates = 0
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(200, nz)))
for iter in tqdm(range(1, n_iter+1)):
trX = shuffle(trX)
for imb in iter_data(trX, size=nbatch):
imb = floatX(imb)
zmb = floatX(np_rng.uniform(-1., 1., size=(nbatch, nz)))
# generate samples
samples = floatX(_gen(zmb).reshape(-1, nx))
grad, svgd_grad = _svgd_gradient(samples)
_train_g(zmb, floatX(svgd_grad.reshape(-1, nc, npx, npx))) # generator
_train_d(imb, floatX(samples)) # discriminator
n_updates += 1
if iter % 50 == 0:
joblib.dump([p.get_value() for p in gen_params], 'models/%s/%d_gen_params.jl'%(desc, iter))
joblib.dump([p.get_value() for p in rbm_params], 'models/%s/%d_rbm_params.jl'%(desc, iter))
samples = np.asarray(_gen(sample_zmb))
grayscale_grid_vis(inverse_transform(samples), (10, 20), '%s/%d.png' % (samples_dir, iter))
# adversarial
logz_approx = ais_logZ(gB.get_value(), gb.get_value(), gc.get_value())
ll_train = _logp_rbm(floatX(trX)) - logz_approx
ll_test = _logp_rbm(floatX(teX)) - logz_approx
print iter, 'train', np.mean(ll_train), 'test', ll_test.mean(), 'logz', logz_approx
#np.savez('adv_ll_train.npz', ll=ll_train)
#np.savez('adv_ll_test.npz', ll=ll_test)
print 'DONE!'
|
|
import Tkinter
from Tkinter import*
def mod(a,b):
return ( a % b)
def StandardKnot():
global m
knot = []
L = len(points) - 2
if L <= m -2 :
print 'make more points than m'
return []
for i in range(m - 1):
knot.append(0)
for i in range(L - m + 3):
knot.append(i)
for i in range(m):
knot.append(L - m + 3)
return knot
def Nopen(k,m,t,knot):
if m <= 1:
if t < knot[k] or t >= knot[k + 1]:
Sum = 0.0
else:
Sum = 1.0
else:
d = knot[k+m-1]-knot[k]
if d <> 0:
Sum = (t-knot[k])*Nopen(k,m-1,t,knot)/d
else:
Sum = 0.0
d = knot[k+m] - knot[k+1]
if d <> 0:
Sum = Sum + (knot[k+m] - t)*Nopen(k + 1,m-1,t,knot)/d
return Sum
def Nclosed(k,m,t,knot):
L = len(points)
z = mod(t-k,L)
if z <= 0:
z += L
return Nopen(0,m,z,knot)
def P(t,knot,Ncycle):
L = len(points)
SumX = 0.0
SumY = 0.0
for k in range(L):
n = Ncycle(k,m,t,knot)
SumX = SumX + n * points[k][0]
SumY = SumY + n * points[k][1]
return [SumX,SumY]
def plot():
global m
global points
if lOpen:
knot = StandardKnot()
if len(knot) == 0:
return
print knot
print points
x = points[0][0]
y = points[0][1]
t = 0.0
step = 0.1
L = len(points)
while t <= L - m + 1:
p = P(t,knot,Nopen)
C.create_line(x, y, p[0], p[1])
x = p[0]
y = p[1]
t = t + step
else:
L = len(points)
knot = range(L)
print knot
print points
p = P(0.0,knot,Nclosed)
x = p[0]
y = p[1]
step = 0.1
t = step
L = len(points)
while t <= L:
p = P(t,knot,Nclosed)
C.create_line(x, y, p[0], p[1])
x = p[0]
y = p[1]
t = t + step
def Spaceout():
global points
newpoints = []
L = len(points)
if lOpen:
newpoints.append(points[0])
knot = StandardKnot()
if len(knot) == 0:
return
for t in range( 1, L - m + 2):
p = P(t - 0.5,knot,Nopen)
x = points[t][0] - p[0]
y = points[t][1] - p[1]
newpoints.append([points[t][0] + x,points[t][1] + y])
newpoints.append(points[-1])
else:
knot = range(L)
for t in range(L):
p = P(mod(t + 1.5,L),knot,Nclosed)
x = points[t][0] - p[0]
y = points[t][1] - p[1]
newpoints.append([points[t][0] + x,points[t][1] + y])
points = newpoints
plot()
def do_mouse(eventname):
def mouse_binding(event):
global points
if eventname == "Button-1":
x = event.x
y = event.y
print x, y
if x > 50 and y > 50:
points.append([x,y])
C.create_oval(x - 6,y - 6, x + 6, y + 6)
C.create_text(x, y, text = '%d' %(len(points)))
fram.bind_all( '<%s>' %eventname, mouse_binding)
def wipe():
global points
C.delete( ALL)
points = []
def fetch():
global m
m = mEntry.get()
m = int(m)
print 'm is now %d' %m
def CycleClosed(i):
global lOpen
lOpen = i
if lOpen:
print 'Cycle is open'
else:
print 'Cycle is closed'
def Blending():
global points
L = len(points)
if L <= m:
print 'Make some more points first!'
return
if lOpen:
knot = StandardKnot()
if len(knot) == 0:
return
else:
knot = range(L)
print knot
x0 = ScreenW * 0.2
y0 = ScreenH * 0.2
scaley = ScreenH * 0.7
scalex = ScreenW / L * 0.7
for k in knot:
x1 = k * scalex + x0
y1 = ScreenH - y0
C.create_line(x1, y1, x1, y1 + 20)
for k in range(L):
t = 0.0
step = 0.1
x1 = x0
y1 = y0
if lOpen:
z = L - m + 1
else:
z = L
while t <= z:
if lOpen:
p = Nopen(k,m,t,knot)
else:
p = Nopen(0,m,mod(t-k,L),knot)
x2 = t * scalex + x0
y2 = ScreenH - p * scaley - y0
C.create_line(x1, y1, x2, y2)
x1 = x2
y1 = y2
t = t + step
m = 3
lOpen = True
ScreenH = 600
ScreenW = 800
root = Tk()
root.title('Left click to add some points, then click [Plot] to draw the b-spline.')
fram = Frame(root)
rad1 = Radiobutton(fram, text = 'Open',value=1,command=(lambda : CycleClosed(True)))
rad1.pack(side = LEFT)
rad2 = Radiobutton(fram, text = 'Closed',value=0,command=(lambda : CycleClosed(False)))
rad2.pack(side = LEFT)
rad1.select()
Label(fram, text = 'M:').pack(side = LEFT,padx = 20)
mEntry = Entry(fram)
mEntry.insert(0,m)
mEntry.focus()
mEntry.bind('<Return>', (lambda event: fetch()))
mEntry.pack(side = LEFT)
butt1 = Button(fram, text = ' Plot ',command = plot)
butt1.pack(side = LEFT)
butt3 = Button(fram, text = ' Wipe ',command = wipe)
butt3.pack(side = LEFT)
butt4 = Button(fram, text = 'Blending',command = Blending)
butt4.pack(side = LEFT)
butt5 = Button(fram, text = 'Spaceout',command = Spaceout)
butt5.pack(side = LEFT)
fram.pack(side = TOP)
C = Canvas(root, width = ScreenW, height = ScreenH)
C.pack()
do_mouse('Button-1')
do_mouse('Button-3')
points = []
root.mainloop()
|
|
"""Module with main classes related to Authentication."""
import datetime
import getpass
import hashlib
import logging
import time
from functools import wraps
from http import HTTPStatus
import jwt
from flask import jsonify, request
from kytos.core.config import KytosConfig
from kytos.core.events import KytosEvent
__all__ = ['authenticated']
LOG = logging.getLogger(__name__)
def authenticated(func):
"""Handle tokens from requests."""
@wraps(func)
def wrapper(*args, **kwargs):
"""Verify the requires of token."""
try:
content = request.headers.get("Authorization")
if content is None:
raise ValueError("The attribute 'content' has an invalid "
"value 'None'.")
token = content.split("Bearer ")[1]
jwt.decode(token, key=Auth.get_jwt_secret())
except (
ValueError,
IndexError,
jwt.ExpiredSignature,
jwt.exceptions.DecodeError,
) as exc:
msg = f"Token not sent or expired: {exc}"
return jsonify({"error": msg}), HTTPStatus.UNAUTHORIZED.value
return func(*args, **kwargs)
return wrapper
class Auth:
"""Module used to provide Kytos authentication routes."""
def __init__(self, controller):
"""Init method of Auth class takes the parameters below.
Args:
controller(kytos.core.controller): A Controller instance.
"""
self.controller = controller
self.namespace = "kytos.core.auth.users"
self.token_expiration_minutes = self.get_token_expiration()
if self.controller.options.create_superuser is True:
self._create_superuser()
@staticmethod
def get_token_expiration():
"""Return token expiration time in minutes defined in kytos conf."""
options = KytosConfig().options['daemon']
return options.token_expiration_minutes
@classmethod
def get_jwt_secret(cls):
"""Return JWT secret defined in kytos conf."""
options = KytosConfig().options['daemon']
return options.jwt_secret
@classmethod
def _generate_token(cls, username, time_exp):
"""Generate a jwt token."""
return jwt.encode(
{
'username': username,
'iss': "Kytos NApps Server",
'exp': time_exp,
},
Auth.get_jwt_secret(),
algorithm='HS256',
)
def _create_superuser(self):
"""Create a superuser using Storehouse."""
def _create_superuser_callback(_event, box, error):
if error:
LOG.error('Superuser was not created. Error: %s', error)
if box:
LOG.info("Superuser successfully created")
def get_username():
return input("Username: ")
def get_email():
return input("Email: ")
username = get_username()
email = get_email()
while True:
password = getpass.getpass()
re_password = getpass.getpass('Retype password: ')
if password == re_password:
break
print('Passwords do not match. Try again')
user = {
"username": username,
"email": email,
"password": hashlib.sha512(password.encode()).hexdigest(),
}
content = {
"namespace": self.namespace,
"box_id": user["username"],
"data": user,
"callback": _create_superuser_callback,
}
event = KytosEvent(name="kytos.storehouse.create", content=content)
self.controller.buffers.app.put(event)
def register_core_auth_services(self):
"""
Register /kytos/core/ services over authentication.
It registers create, authenticate, list all, list specific, delete and
update users.
"""
self.controller.api_server.register_core_endpoint(
"auth/login/", self._authenticate_user
)
self.controller.api_server.register_core_endpoint(
"auth/users/", self._list_users
)
self.controller.api_server.register_core_endpoint(
"auth/users/<uid>", self._list_user
)
self.controller.api_server.register_core_endpoint(
"auth/users/", self._create_user, methods=["POST"]
)
self.controller.api_server.register_core_endpoint(
"auth/users/<uid>", self._delete_user, methods=["DELETE"]
)
self.controller.api_server.register_core_endpoint(
"auth/users/<uid>", self._update_user, methods=["PATCH"]
)
def _authenticate_user(self):
"""Authenticate a user using Storehouse."""
username = request.authorization["username"]
password = request.authorization["password"].encode()
try:
user = self._find_user(username)[0].get("data")
if user.get("password") != hashlib.sha512(password).hexdigest():
raise KeyError
time_exp = datetime.datetime.utcnow() + datetime.timedelta(
minutes=self.token_expiration_minutes
)
token = self._generate_token(username, time_exp)
return {"token": token.decode()}, HTTPStatus.OK.value
except (AttributeError, KeyError) as exc:
result = f"Incorrect username or password: {exc}"
return result, HTTPStatus.UNAUTHORIZED.value
def _find_user(self, uid):
"""Find a specific user using Storehouse."""
response = {}
def _find_user_callback(_event, box, error):
nonlocal response
if not box:
response = {
"answer": f'User with uid {uid} not found',
"code": HTTPStatus.NOT_FOUND.value
}
elif error:
response = {
"answer": "User data cannot be shown",
"code": HTTPStatus.INTERNAL_SERVER_ERROR.value,
}
else:
response = {
"answer": {"data": box.data},
"code": HTTPStatus.OK.value,
}
content = {
"box_id": uid,
"namespace": self.namespace,
"callback": _find_user_callback,
}
event = KytosEvent(name="kytos.storehouse.retrieve", content=content)
self.controller.buffers.app.put(event)
while True:
time.sleep(0.1)
if response:
break
return response["answer"], response["code"]
@authenticated
def _list_user(self, uid):
"""List a specific user using Storehouse."""
answer, code = self._find_user(uid)
if code == HTTPStatus.OK.value:
del answer['data']['password']
return answer, code
@authenticated
def _list_users(self):
"""List all users using Storehouse."""
response = {}
def _list_users_callback(_event, boxes, error):
nonlocal response
if error:
response = {
"answer": "Users cannot be listed",
"code": HTTPStatus.INTERNAL_SERVER_ERROR.value,
}
else:
response = {
"answer": {"users": boxes},
"code": HTTPStatus.OK.value,
}
content = {
"namespace": self.namespace,
"callback": _list_users_callback,
}
event = KytosEvent(name="kytos.storehouse.list", content=content)
self.controller.buffers.app.put(event)
while True:
time.sleep(0.1)
if response:
break
return response["answer"], response["code"]
@authenticated
def _create_user(self):
"""Save a user using Storehouse."""
response = {}
def _create_user_callback(_event, box, error):
nonlocal response
if not box:
response = {
"answer": f'User already exists',
"code": HTTPStatus.CONFLICT.value,
}
elif error:
response = {
"answer": "User has not been created",
"code": HTTPStatus.INTERNAL_SERVER_ERROR.value,
}
else:
response = {
"answer": "User successfully created",
"code": HTTPStatus.OK.value,
}
req = request.json
password = req["password"].encode()
data = {
"username": req["username"],
"email": req["email"],
"password": hashlib.sha512(password).hexdigest(),
}
content = {
"namespace": self.namespace,
"box_id": data["username"],
"data": data,
"callback": _create_user_callback,
}
event = KytosEvent(name="kytos.storehouse.create", content=content)
self.controller.buffers.app.put(event)
while True:
time.sleep(0.1)
if response:
break
return response["answer"], response["code"]
@authenticated
def _delete_user(self, uid):
"""Delete a user using Storehouse."""
response = {}
def _delete_user_callback(_event, box, error):
nonlocal response
if not box:
response = {
"answer": f'User with uid {uid} not found',
"code": HTTPStatus.NOT_FOUND.value
}
elif error:
response = {
"answer": "User has not been deleted",
"code": HTTPStatus.INTERNAL_SERVER_ERROR.value,
}
else:
response = {
"answer": "User successfully deleted",
"code": HTTPStatus.OK.value,
}
content = {
"box_id": uid,
"namespace": self.namespace,
"callback": _delete_user_callback,
}
event = KytosEvent(name="kytos.storehouse.delete", content=content)
self.controller.buffers.app.put(event)
while True:
time.sleep(0.1)
if response:
break
return response["answer"], response["code"]
@authenticated
def _update_user(self, uid):
"""Update user data using Storehouse."""
response = {}
def _update_user_callback(_event, box, error):
nonlocal response
if not box:
response = {
"answer": f'User with uid {uid} not found',
"code": HTTPStatus.NOT_FOUND.value
}
elif error:
response = {
"answer": "User has not been updated",
"code": HTTPStatus.INTERNAL_SERVER_ERROR.value,
}
else:
response = {
"answer": "User successfully updated",
"code": HTTPStatus.OK.value,
}
req = request.json
allowed = ["username", "email", "password"]
data = {}
for key, value in req.items():
if key in allowed:
data[key] = value
content = {
"namespace": self.namespace,
"box_id": uid,
"data": data,
"callback": _update_user_callback,
}
event = KytosEvent(name="kytos.storehouse.update", content=content)
self.controller.buffers.app.put(event)
while True:
time.sleep(0.1)
if response:
break
return response["answer"], response["code"]
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import defaultdict
from operator import itemgetter
from flask import flash, jsonify, redirect, request, session
from sqlalchemy import func, inspect
from sqlalchemy.orm import joinedload, lazyload
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from indico.core.db import db
from indico.core.logger import Logger
from indico.modules.events.controllers.base import RHDisplayEventBase
from indico.modules.events.management.controllers import RHManageEventBase
from indico.modules.vc.exceptions import VCRoomError, VCRoomNotFoundError
from indico.modules.vc.forms import VCRoomListFilterForm
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomEventAssociation, VCRoomLinkType, VCRoomStatus
from indico.modules.vc.notifications import notify_created
from indico.modules.vc.util import find_event_vc_rooms, get_managed_vc_plugins, get_vc_plugins, resolve_title
from indico.modules.vc.views import WPVCEventPage, WPVCManageEvent, WPVCService
from indico.util.date_time import as_utc, get_day_end, get_day_start, now_utc
from indico.util.i18n import _
from indico.util.iterables import group_list
from indico.web.flask.util import url_for
from indico.web.forms.base import FormDefaults
from indico.web.rh import RHProtected
from indico.web.util import _pop_injected_js, jsonify_data, jsonify_template
def process_vc_room_association(plugin, event, vc_room, form, event_vc_room=None, allow_same_room=False):
# disable autoflush, so that the new event_vc_room does not influence the result
with db.session.no_autoflush:
if event_vc_room is None:
event_vc_room = VCRoomEventAssociation()
plugin.update_data_association(event, vc_room, event_vc_room, form.data)
existing = set()
if event_vc_room.link_object is not None:
# check whether there is a room-event association already present
# for the given event, room and plugin
q = (VCRoomEventAssociation.query
.filter(VCRoomEventAssociation.event == event,
VCRoomEventAssociation.link_object == event_vc_room.link_object)
.join(VCRoom))
if allow_same_room:
q = q.filter(VCRoom.id != vc_room.id)
existing = {x.vc_room for x in q}
if event_vc_room.link_type != VCRoomLinkType.event and existing:
db.session.rollback()
flash(_("There is already a VC room attached to '{link_object_title}'.").format(
link_object_title=resolve_title(event_vc_room.link_object)), 'error')
return None
elif event_vc_room.link_type == VCRoomLinkType.event and vc_room in existing:
db.session.rollback()
flash(_("This {plugin_name} room is already attached to the event.").format(plugin_name=plugin.friendly_name),
'error')
return None
else:
return event_vc_room
class RHVCManageEventBase(RHManageEventBase):
pass
class RHEventVCRoomMixin:
normalize_url_spec = {
'locators': {
lambda self: self.event_vc_room
}
}
def _process_args(self):
self.event_vc_room = VCRoomEventAssociation.get_or_404(request.view_args['event_vc_room_id'])
self.vc_room = self.event_vc_room.vc_room
class RHVCManageEvent(RHVCManageEventBase):
"""List the available videoconference rooms."""
def _process(self):
room_event_assocs = VCRoomEventAssociation.find_for_event(self.event, include_hidden=True,
include_deleted=True).all()
event_vc_rooms = [event_vc_room for event_vc_room in room_event_assocs if event_vc_room.vc_room.plugin]
return WPVCManageEvent.render_template('manage_event.html', self.event,
event_vc_rooms=event_vc_rooms, plugins=list(get_vc_plugins().values()))
class RHVCManageEventSelectService(RHVCManageEventBase):
"""
List available videoconference plugins to create a new
videoconference room.
"""
def _process(self):
action = request.args.get('vc_room_action', '.manage_vc_rooms_create')
attach = request.args.get('attach', '')
return jsonify_template('vc/manage_event_select.html', event=self.event, vc_room_action=action,
plugins=list(get_vc_plugins().values()), attach=attach)
class RHVCManageEventCreateBase(RHVCManageEventBase):
def _process_args(self):
RHVCManageEventBase._process_args(self)
try:
self.plugin = get_vc_plugins()[request.view_args['service']]
except KeyError:
raise NotFound
class RHVCManageEventCreate(RHVCManageEventCreateBase):
"""Load the form for the selected VC plugin."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to create {plugin_name} rooms for this event.').format(
plugin_name=self.plugin.friendly_name), 'error')
raise Forbidden
form = self.plugin.create_form(event=self.event)
if form.validate_on_submit():
vc_room = VCRoom(created_by_user=session.user)
vc_room.type = self.plugin.service_name
vc_room.status = VCRoomStatus.created
event_vc_room = process_vc_room_association(self.plugin, self.event, vc_room, form)
if not event_vc_room:
return jsonify_data(flash=False)
with db.session.no_autoflush:
self.plugin.update_data_vc_room(vc_room, form.data, is_new=True)
try:
# avoid flushing the incomplete vc room to the database
with db.session.no_autoflush:
self.plugin.create_room(vc_room, self.event)
notify_created(self.plugin, vc_room, event_vc_room, self.event, session.user)
except VCRoomError as err:
if err.field is None:
raise
field = getattr(form, err.field)
field.errors.append(str(err))
db.session.rollback() # otherwise the incomplete vc room would be added to the db!
else:
db.session.add(vc_room)
flash(_("{plugin_name} room '{room.name}' created").format(
plugin_name=self.plugin.friendly_name, room=vc_room), 'success')
return jsonify_data(flash=False)
form_html = self.plugin.render_form(plugin=self.plugin, event=self.event, form=form,
skip_fields=form.skip_fields | {'name'})
return jsonify(html=form_html, js=_pop_injected_js())
class RHVCSystemEventBase(RHEventVCRoomMixin, RHVCManageEventBase):
def _process_args(self):
RHVCManageEventBase._process_args(self)
RHEventVCRoomMixin._process_args(self)
if self.vc_room.type != request.view_args['service']:
raise NotFound
self.plugin = self.vc_room.plugin
class RHVCManageEventModify(RHVCSystemEventBase):
"""Modify an existing VC room."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to modify {} rooms for this event.').format(self.plugin.friendly_name),
'error')
raise Forbidden
form = self.plugin.create_form(self.event,
existing_vc_room=self.vc_room,
existing_event_vc_room=self.event_vc_room)
if form.validate_on_submit():
self.plugin.update_data_vc_room(self.vc_room, form.data)
event_vc_room = process_vc_room_association(
self.plugin, self.event, self.vc_room, form, event_vc_room=self.event_vc_room, allow_same_room=True)
if not event_vc_room:
return jsonify_data(flash=False)
self.vc_room.modified_dt = now_utc()
try:
self.plugin.update_room(self.vc_room, self.event)
except VCRoomNotFoundError as err:
Logger.get('modules.vc').warning("VC room %r not found. Setting it as deleted.", self.vc_room)
self.vc_room.status = VCRoomStatus.deleted
flash(str(err), 'error')
return jsonify_data(flash=False)
except VCRoomError as err:
if err.field is None:
raise
field = getattr(form, err.field)
field.errors.append(str(err))
db.session.rollback()
else:
# TODO
# notify_modified(self.vc_room, self.event, session.user)
flash(_("{plugin_name} room '{room.name}' updated").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return jsonify_data(flash=False)
form_html = self.plugin.render_form(plugin=self.plugin, event=self.event, form=form,
existing_vc_room=self.vc_room,
skip_fields=form.skip_fields | {'name'})
return jsonify(html=form_html, js=_pop_injected_js())
class RHVCManageEventRefresh(RHVCSystemEventBase):
"""Refresh an existing VC room, fetching information from the VC system."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to refresh {plugin_name} rooms for this event.').format(
plugin_name=self.plugin.friendly_name), 'error')
raise Forbidden
Logger.get('modules.vc').info("Refreshing VC room %r from event %r", self.vc_room, self.event)
try:
self.plugin.refresh_room(self.vc_room, self.event)
except VCRoomNotFoundError as err:
Logger.get('modules.vc').warning("VC room %r not found. Setting it as deleted.", self.vc_room)
self.vc_room.status = VCRoomStatus.deleted
flash(str(err), 'error')
return redirect(url_for('.manage_vc_rooms', self.event))
flash(_("{plugin_name} room '{room.name}' refreshed").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return redirect(url_for('.manage_vc_rooms', self.event))
class RHVCManageEventRemove(RHVCSystemEventBase):
"""Remove an existing VC room."""
def _process(self):
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_('You are not allowed to remove {} rooms from this event.').format(self.plugin.friendly_name),
'error')
raise Forbidden
delete_all = request.args.get('delete_all') == '1'
self.event_vc_room.delete(session.user, delete_all=delete_all)
flash(_("{plugin_name} room '{room.name}' removed").format(
plugin_name=self.plugin.friendly_name, room=self.vc_room), 'success')
return redirect(url_for('.manage_vc_rooms', self.event))
class RHVCEventPage(RHDisplayEventBase):
"""List the VC rooms in an event page."""
def _process(self):
event_vc_rooms = [event_vc_room
for event_vc_room in VCRoomEventAssociation.find_for_event(self.event).all()
if event_vc_room.vc_room.plugin]
vc_plugins_available = bool(get_vc_plugins())
linked_to = defaultdict(lambda: defaultdict(list))
for event_vc_room in event_vc_rooms:
linked_to[event_vc_room.link_type.name][event_vc_room.link_object].append(event_vc_room)
return WPVCEventPage.render_template('event_vc.html', self.event,
event_vc_rooms=event_vc_rooms, linked_to=linked_to,
vc_plugins_available=vc_plugins_available)
class RHVCManageAttach(RHVCManageEventCreateBase):
"""Attach a room to the event."""
def _process(self):
defaults = FormDefaults(self.plugin.get_vc_room_attach_form_defaults(self.event))
form = self.plugin.vc_room_attach_form(prefix='vc-', obj=defaults, event=self.event,
service=self.plugin.service_name)
if form.validate_on_submit():
vc_room = form.data['room']
if not self.plugin.can_manage_vc_rooms(session.user, self.event):
flash(_("You are not allowed to attach {plugin_name} rooms to this event.").format(
plugin_name=self.plugin.friendly_name), 'error')
elif not self.plugin.can_manage_vc_room(session.user, vc_room):
flash(_("You are not authorized to attach the room '{0}'").format(vc_room.name), 'error')
else:
event_vc_room = process_vc_room_association(self.plugin, self.event, vc_room, form)
if event_vc_room:
flash(_("The room has been attached to the event."), 'success')
db.session.add(event_vc_room)
return jsonify_data(flash=False)
return jsonify_template('vc/attach_room.html', event=self.event, form=form,
skip_fields=form.conditional_fields | {'room'},
plugin=self.plugin)
class RHVCManageSearch(RHVCManageEventCreateBase):
"""Search for a room based on its name."""
def _process_args(self):
RHVCManageEventCreateBase._process_args(self)
self.query = request.args.get('q', '')
if len(self.query) < 3:
raise BadRequest("A query has to be provided, with at least 3 characters")
def _iter_allowed_rooms(self):
query = (db.session.query(VCRoom, func.count(VCRoomEventAssociation.id).label('event_count'))
.filter(func.lower(VCRoom.name).contains(self.query.lower()), VCRoom.status != VCRoomStatus.deleted,
VCRoom.type == self.plugin.service_name)
.join(VCRoomEventAssociation)
# Plugins might add eager-loaded extensions to the table - since we cannot group by them
# we need to make sure everything is lazy-loaded here.
.options((lazyload(r) for r in inspect(VCRoom).relationships.keys()),
joinedload('events').joinedload('event').joinedload('acl_entries'))
.group_by(VCRoom.id)
.order_by(db.desc('event_count'))
.limit(10))
return ((room, count) for room, count in query if room.plugin.can_manage_vc_room(session.user, room))
def _process(self):
result = [{'id': room.id, 'name': room.name} for room, count in self._iter_allowed_rooms()]
return jsonify(result)
class RHVCRoomList(RHProtected):
"""Provide a list of videoconference rooms."""
def _check_access(self):
RHProtected._check_access(self)
if not get_managed_vc_plugins(session.user):
raise Forbidden
def _process(self):
form = VCRoomListFilterForm(request.args, csrf_enabled=False)
results = None
if request.args and form.validate():
reverse = form.direction.data == 'desc'
from_dt = as_utc(get_day_start(form.start_date.data)) if form.start_date.data else None
to_dt = as_utc(get_day_end(form.end_date.data)) if form.end_date.data else None
results = find_event_vc_rooms(from_dt=from_dt, to_dt=to_dt, distinct=True)
results = group_list((r for r in results if r.event),
key=lambda r: r.event.start_dt.date(),
sort_by=lambda r: r.event.start_dt,
sort_reverse=reverse)
results = dict(sorted(results.items(), key=itemgetter(0), reverse=reverse))
return WPVCService.render_template('vc_room_list.html', form=form, results=results,
action=url_for('.vc_room_list'))
|
|
#!/usr/bin/env python
#coding=utf-8
# by Qiyuan Gong
# [email protected]
# http://github.com/qiyuangong
# http://cn.linkedin.com/pub/qiyuan-gong/6b/831/407/
import pdb
from models.gentree import GenTree
from models.bucket import Bucket
from itertools import combinations
_DEBUG = True
gl_treelist = {}
gl_att_tree = {}
gl_treesupport = 0
gl_elementcount = 0
gl_result = []
gl_data = []
# compare fuction for sort tree node
def node_cmp(node1, node2):
"""compare node1(str) and node2(str)
Compare two nodes accroding to their support
"""
support1 = gl_att_tree[node1].support
support2 = gl_att_tree[node2].support
if support1 != support2:
return cmp(support1, support2)
else:
return cmp(node1, node2)
def list_to_str(value_list, cmpfun=node_cmp, sep=';'):
"""covert sorted str list (sorted by cmpfun) to str
value (splited by sep). This fuction is value safe, which means
value_list will not be changed.
"""
temp = value_list[:]
temp.sort(cmp=cmpfun)
return sep.join(temp)
def information_gain(bucket, pick_value=''):
"""get information gain from bucket accroding to pick_value
"""
ig = 0.0
parent_value = bucket.value
cover_number = 0
# Herein, all ncp will be divided by the same denominator.
# So I don't computing true ncp, only use numerator part.
if pick_value == '':
# compute bucket's information gain
for gen_value in bucket.value:
if gl_att_tree[gen_value].support == 0:
continue
for temp in bucket.member_index:
ig = ig + trans_information_gain(gl_data[temp], gen_value)
else:
# pick node's information gain
if gl_att_tree[pick_value].support == 0:
return 0
for temp in bucket.member_index:
ig = ig + trans_information_gain(gl_data[temp], pick_value)
return ig
def trans_information_gain(tran, pick_value):
"""get information gain for trans accroding to pick_value
"""
ig = 0.0
ncp = gl_att_tree[pick_value].support
for t in tran:
if pick_value in gl_treelist[t]:
ig += ncp
return ig
def pick_node(bucket):
"""find the split node with largest information gain.
Then split bucket to buckets accroding to this node.
"""
buckets = {}
result_list = []
max_ig = -10000
max_value = ''
check_list = [t for t in bucket.value if t not in bucket.split_list]
for t in check_list:
if len(gl_att_tree[t].child) != 0:
ig = information_gain(bucket, t)
if ig > max_ig:
max_ig = ig
max_value = t
# begin to expand node on pick_value
if max_value == '':
print "Error: list empty!!"
return ('', {})
# get index of max_value
index = bucket.value.index(max_value)
child_value = [t.value for t in gl_att_tree[max_value].child]
for i in range(1, len(child_value)+1):
temp = combinations(child_value, i)
temp = [list(t) for t in temp]
result_list.extend(temp)
# generate child buckets
child_level = bucket.level[:]
child_value = bucket.value[:]
now_level = bucket.level[index] + 1
del child_level[index]
del child_value[index]
for temp in result_list:
temp_level = child_level[:]
temp_value = child_value[:]
for t in temp:
temp_level.insert(index, now_level)
temp_value.insert(index, t)
str_value = list_to_str(temp)
buckets[str_value] = Bucket([], temp_value, temp_level)
bucket.split_list.append(max_value)
return (max_value, buckets)
def distribute_data(bucket, buckets, pick_value):
"""distribute records from parent_bucket to buckets (splited buckets)
accroding to records elements.
"""
if len(buckets) == 0:
print "Error: buckets is empty!"
return
data_index = bucket.member_index[:]
for temp in data_index:
gen_list = []
for t in gl_data[temp]:
treelist = gl_treelist[t]
try:
pos = treelist.index(pick_value)
# if covered, then replaced with new value
if pos > 0:
gen_list.append(treelist[pos-1])
else:
print "Error: pick node is leaf, which cannot be splited"
except:
continue
gen_list = list(set(gen_list))
# sort to ensure the order
str_value = list_to_str(gen_list)
try:
buckets[str_value].member_index.append(temp)
except:
pdb.set_trace()
print "Error: Cannot find key."
def balance_partitions(parent_bucket, buckets, K, pick_value):
"""handel buckets with less than K records
"""
global gl_result
left_over = []
for k, t in buckets.items():
if len(t.member_index) < K:
# add records of buckets with less than K elemnts
# to left_over partition
left_over.extend(t.member_index[:])
del buckets[k]
if len(left_over) == 0:
# left over bucket is empty, skip balance step
return
# re-distribute transactions with least information gain from
# buckets over k to left_over, to enshure number of
# records in left_over is larger than K
# using flag to denote if re-distribute is successful or not
flag = True
while len(left_over) < K:
# each iterator pick least information gain transaction from buckets over K
check_list = [t for t in buckets.values() if len(t.member_index) > K]
if len(check_list) == 0:
flag = False
break
min_ig = 10000000000000000
min_key = (0, 0)
for i, temp in enumerate(check_list):
for j, t in enumerate(temp.member_index):
ig = trans_information_gain(gl_data[t], pick_value)
if ig < min_ig:
min_ig = ig
min_key = (i, j)
left_over.append(check_list[min_key[0]].member_index[min_key[1]])
del check_list[min_key[0]].member_index[min_key[1]]
if flag == False:
# Note: if flag == False, means that split is unsuccessful.
# So we need to pop a bucket from buckets to merge with left_over
# The bucket poped is larger than K, so left over will larger than K
parent_bucket.splitable = False
try:
min_ig = 10000000000000000
min_key = ''
for k, t in buckets.items():
ig = information_gain(t, pick_value)
if ig < min_ig:
min_ig = ig
min_key = k
left_over.extend(buckets[min_key].member_index[:])
del buckets[min_key]
except:
print "Error: buckets is empty"
pdb.set_trace()
parent_bucket.member_index = left_over[:]
str_value = list_to_str(parent_bucket.value)
buckets[str_value] = parent_bucket
def check_splitable(bucket, K):
"""check if bucket can further drill down
"""
if len(bucket.member_index) == K:
bucket.splitable = False
return False
check_list = [t for t in bucket.value if t not in bucket.split_list]
if bucket.splitable:
for t in check_list:
if len(gl_att_tree[t].child) != 0:
return True
bucket.splitable = False
return False
def anonymize(bucket, K):
"""recursively split dataset to create anonymization buckets
"""
global gl_result
if check_splitable(bucket, K) == False:
gl_result.append(bucket)
return
(pick_value, expandNode) = pick_node(bucket)
distribute_data(bucket, expandNode, pick_value)
balance_partitions(bucket, expandNode, K, pick_value)
for t in expandNode.values():
anonymize(t, K)
def iloss(tran, middle):
"""return iloss caused by anon tran to middle
"""
iloss = 0.0
for t in tran:
ntemp = gl_att_tree[t]
checktemp = ntemp.parent[:]
checktemp.insert(0, ntemp)
for ptemp in checktemp:
if ptemp.value in middle:
break
else:
print "Program Error!!!! t=%s middle=%s" % (t, middle)
pdb.set_trace()
if ptemp.value == t:
continue
iloss = iloss + ptemp.support
# only one attribute is involved, so we can simplfy NCP
iloss = iloss * 1.0 / gl_treesupport
return iloss
def setalliloss(buckets):
"""return iloss sum of buckets, recompute iloss foreach bucket
"""
alliloss = 0.0
for gtemp in buckets:
gloss = 0.0
for mtemp in gtemp.member_index:
gloss = gloss + iloss(gl_data[mtemp], gtemp.value)
gtemp.iloss = gloss
alliloss += gloss
alliloss = alliloss * 1.0 / gl_elementcount
return alliloss
def partition(att_tree, data, K):
"""partition tran part of microdata
"""
result = []
global gl_treesupport, gl_treelist, gl_att_tree, gl_elementcount, gl_data, gl_result
gl_result = []
gl_treelist = {}
gl_elementcount = 0
gl_treesupport = 0
gl_data = data[:]
for t in gl_data:
gl_elementcount += len(t)
gl_att_tree = att_tree
gl_treesupport = gl_att_tree['*'].support
for k, v in gl_att_tree.iteritems():
if v.support == 0:
gl_treelist[k] = [t.value for t in v.parent]
gl_treelist[k].insert(0, k)
print '-'*30
print "K=%d" % K
if _DEBUG:
print "Begin Partition!"
anonymize(Bucket(range(len(gl_data)), ['*'], [0]), K)
print "Publishing Result Data..."
# changed to percentage
all_loss = 100.0 * setalliloss(gl_result)
if _DEBUG:
# print [len(t.member_index) for t in gl_result]
print "Number of buckets %d" % len(gl_result)
print "iloss = %0.2f" % all_loss + "%"
# transform result
result = [(t.member_index[:], t.value) for t in gl_result]
return result
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from octavia.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
# NOTE(flaper87): The following globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS_2 = []
_SANITIZE_PATTERNS_1 = []
# NOTE(amrith): Some regular expressions have only one parameter, some
# have two parameters. Use different lists of patterns here.
_FORMAT_PATTERNS_1 = [r'(%(key)s\s*[=]\s*)[^\s^\'^\"]+']
_FORMAT_PATTERNS_2 = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(%(key)s\s+[\"\']).*?([\"\'])',
r'([-]{2}%(key)s\s+)[^\'^\"^=^\s]+([\s]*)',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?'
'[\'"]).*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS_2:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS_2.append(reg_ex)
for pattern in _FORMAT_PATTERNS_1:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS_1.append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
substitute = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS_2:
message = re.sub(pattern, substitute, message)
substitute = r'\g<1>' + secret
for pattern in _SANITIZE_PATTERNS_1:
message = re.sub(pattern, substitute, message)
return message
|
|
import time
import datetime
import requests_mock
from django.conf import settings
from selenium.common.exceptions import NoSuchElementException
from events.models import User, Playlist
from functional_tests.selenium_test_case import SeleniumTestCase
from functional_tests.utils import navbar_active_element_text
class PlaylistPageTests(SeleniumTestCase):
@requests_mock.Mocker()
def test_can_open_playlist_page(self, m):
m.get(settings.API_BASE_ADDRESS + '/playlist/', json={
"response": {
"status": {
"version": 0.1,
"code": 0,
"status": "Success"
},
"tracks": [
],
"pagination": {
"total": 2,
"offset": 0,
"results": 2
}
}
}, status_code=200)
self.browser.get(self.live_server_url + '/playlist')
today = datetime.datetime.now()
date_text = '%s, %s' % (today.strftime('%B'), today.strftime('%d'))
self.assertIn('Playlist a day for %s' % date_text, self.browser.title)
header = self.browser.find_element_by_class_name(
'page-header'
).find_element_by_tag_name('h1').text
self.assertIn(
'Playlist based on the events that happened on this day in music...',
header
)
active_element = navbar_active_element_text(self.browser)
self.assertIn("Playlist", active_element)
self.assertRaises(NoSuchElementException, self.browser.find_element_by_id, 'date_picker')
@requests_mock.Mocker()
def test_playlist_page_shows_list_of_songs(self, m):
m.get(settings.API_BASE_ADDRESS + '/playlist/', json={
"response": {
"status": {
"version": 0.1,
"code": 0,
"status": "Success"
},
"tracks": [
{
"name": "Zij Gelooft In Mij (2006 Digital Remaster)",
"artist": "Andre Hazes",
"spotifyId": "spotify-WW:track:4ZHCNqDss0HQchbrhleipg",
"event": "1951-06-30 - [Birth] Andre Hazes, Dutch barkeeper/singer (We Love Orange) was born"
},
{
"name": "He's Got the Whole World",
"artist": "Andrew Scott",
"spotifyId": "spotify-WW:track:3ZQsFBQVbWD7nDKkoSKB3q",
"event": "1949-06-30 - [Birth] Andrew Scott, Wales, rock guitarist (Sweet) was born"
}
],
"pagination": {
"total": 2,
"offset": 0,
"results": 2
}
}
}, status_code=200)
self.browser.get(self.live_server_url + '/playlist')
track_container = self.browser.find_element_by_class_name('well')
self.assertIsNotNone(track_container)
tracks = track_container.find_elements_by_tag_name('li')
self.assertEqual(len(tracks), 2)
self.assertEqual(tracks[0].text,
"1951-06-30 - [Birth] Andre Hazes, Dutch barkeeper/singer (We Love Orange) was born")
self.assertEqual(tracks[1].text, "1949-06-30 - [Birth] Andrew Scott, Wales, rock guitarist (Sweet) was born")
@requests_mock.Mocker()
def test_playlist_page_shows_create_spotify_playlist_form_if_no_user_in_session(self, m):
m.get(settings.API_BASE_ADDRESS + '/playlist/', json={
"response": {
"status": {
"version": 0.1,
"code": 0,
"status": "Success"
},
"tracks": [
{
"name": "Zij Gelooft In Mij (2006 Digital Remaster)",
"artist": "Andre Hazes",
"spotifyId": "spotify-WW:track:4ZHCNqDss0HQchbrhleipg",
"event": "1951-06-30 - [Birth] Andre Hazes, Dutch barkeeper/singer (We Love Orange) was born"
},
{
"name": "He's Got the Whole World",
"artist": "Andrew Scott",
"spotifyId": "spotify-WW:track:3ZQsFBQVbWD7nDKkoSKB3q",
"event": "1949-06-30 - [Birth] Andrew Scott, Wales, rock guitarist (Sweet) was born"
}
],
"pagination": {
"total": 2,
"offset": 0,
"results": 2
}
}
}, status_code=200)
self.browser.get(self.live_server_url + '/playlist')
create_spotify_playlist_form = self.browser.find_element_by_id('create-playlist')
self.assertIsNotNone(create_spotify_playlist_form)
@requests_mock.Mocker()
def test_playlist_page_shows_create_spotify_playlist_form_if_user_in_session_but_not_on_db(self, m):
m.get(settings.API_BASE_ADDRESS + '/playlist/', json={
"response": {
"status": {
"version": 0.1,
"code": 0,
"status": "Success"
},
"tracks": [
{
"name": "Zij Gelooft In Mij (2006 Digital Remaster)",
"artist": "Andre Hazes",
"spotifyId": "spotify-WW:track:4ZHCNqDss0HQchbrhleipg",
"event": "1951-06-30 - [Birth] Andre Hazes, Dutch barkeeper/singer (We Love Orange) was born"
},
{
"name": "He's Got the Whole World",
"artist": "Andrew Scott",
"spotifyId": "spotify-WW:track:3ZQsFBQVbWD7nDKkoSKB3q",
"event": "1949-06-30 - [Birth] Andrew Scott, Wales, rock guitarist (Sweet) was born"
}
],
"pagination": {
"total": 2,
"offset": 0,
"results": 2
}
}
}, status_code=200)
session = self.client.session
session.save()
self.browser.get(self.live_server_url)
username = 'thesearchingwanderer'
self.browser.add_cookie({'name': settings.SESSION_COOKIE_NAME, 'value': session.session_key})
session['username'] = username
session.save()
self.browser.get(self.live_server_url + '/playlist')
create_spotify_playlist_form = self.browser.find_element_by_id('create-playlist')
self.assertIsNotNone(create_spotify_playlist_form)
@requests_mock.Mocker()
def test_playlist_page_shows_spotify_playlist(self, m):
m.get(settings.API_BASE_ADDRESS + '/playlist/', json={
"response": {
"status": {
"version": 0.1,
"code": 0,
"status": "Success"
},
"tracks": [
{
"name": "Zij Gelooft In Mij (2006 Digital Remaster)",
"artist": "Andre Hazes",
"spotifyId": "spotify-WW:track:4ZHCNqDss0HQchbrhleipg",
"event": "1951-06-30 - [Birth] Andre Hazes, Dutch barkeeper/singer (We Love Orange) was born"
},
{
"name": "He's Got the Whole World",
"artist": "Andrew Scott",
"spotifyId": "spotify-WW:track:3ZQsFBQVbWD7nDKkoSKB3q",
"event": "1949-06-30 - [Birth] Andrew Scott, Wales, rock guitarist (Sweet) was born"
}
],
"pagination": {
"total": 2,
"offset": 0,
"results": 2
}
}
}, status_code=200)
session = self.client.session
session.save()
self.browser.get(self.live_server_url)
username = 'thesearchingwanderer'
user = User.objects.create(username=username)
Playlist.objects.create(
user=user,
date=datetime.date.today(),
url='https://open.spotify.com/embed/user/thesearchingwanderer/playlist/5jUBZBiQWmAiJaeJLYldcj?si=GgOazCjdR0uyHw6Vx9kCfQ'
)
self.browser.add_cookie({'name': settings.SESSION_COOKIE_NAME, 'value': session.session_key})
session['username'] = username
session['spotify_token'] = {
'access_token': 'random',
'expires_at': int(time.time()) + 3600
}
session.save()
self.browser.get(self.live_server_url + '/playlist')
spotify_playlist = self.browser.find_element_by_id('spotify-playlist')
self.assertIsNotNone(spotify_playlist)
|
|
# -*- coding: utf-8 -*-
import os
import time
from datetime import datetime
import click
import pythoncom
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from win32com.client import Dispatch
from t_infinity import driver
from t_infinity.logger import logger
class LogisticsCreate:
days_in_field = None
call_number = None
def __init__(self, serial_number, product_code, inbound_repair_order_number):
self.instance = driver.instance
self.instance.session_id = driver.session_id
self.wait = WebDriverWait(self.instance, 5)
if self.check_ro_number_is_free(inbound_repair_order_number):
return
history = self.check_history(serial_number, product_code)
if history is 1:
self.in_zulu(serial_number, product_code, inbound_repair_order_number)
elif history is 2:
self.not_in_zulu(serial_number, product_code, inbound_repair_order_number)
elif history is 3:
self._add_new(serial_number, product_code, inbound_repair_order_number)
self.go_to_create()
self.create_welcome_fill()
self.job_site_details_fill()
self.ship_site_details()
self.job_item_details(serial_number, product_code, inbound_repair_order_number)
self.job_details()
self.complete()
self.print(inbound_repair_order_number, self.call_number)
def check_ro_number_is_free(self, inbound_repair_order_number):
logger.debug('checking for RO number')
xpath_repair_order_number = ('//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]'
'/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[2]/div/input[1]')
xpath_first_row = ('//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]'
'/div[1]/div[4]/div[2]/table/tbody/tr[1]')
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_SerProd/aspx/serprod_query.aspx')
elem = self.wait.until(ec.visibility_of_element_located((By.XPATH, xpath_repair_order_number)))
logger.debug("Successful navigation, element marker found")
elem.send_keys(inbound_repair_order_number)
logger.debug('%s sent to element', inbound_repair_order_number)
try:
self.wait.until(ec.text_to_be_present_in_element((By.XPATH, xpath_first_row), inbound_repair_order_number))
logger.critical('repair order number exists')
return True
except TimeoutException:
logger.debug('repair order number does not exists')
return False
def check_history(self, serial_number, product_code):
logger.debug('Checking history for %s:%s', serial_number, product_code)
# serialized product query
xpath_serial_number_input = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[1]/div/input[1]'
xpath_product_code_input = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[7]/div/input[1]'
xpath_serial_number_first_row = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[2]/table/tbody/tr'
# serialized product modify
id_site_no = 'scmaster_cplMainContent_txtSerSiteNum'
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_SerProd/aspx/serprod_query.aspx')
elem = self.wait.until(ec.presence_of_element_located((By.XPATH, xpath_serial_number_input)))
elem.send_keys(serial_number)
elem = self.instance.find_element_by_xpath(xpath_product_code_input)
elem.send_keys(product_code)
try:
self.wait.until(ec.text_to_be_present_in_element((By.XPATH, xpath_serial_number_first_row), serial_number))
self.instance.find_element_by_xpath(xpath_serial_number_first_row).click()
element_site_no = self.wait.until(ec.visibility_of_element_located((By.ID, id_site_no)))
site_no = element_site_no.get_attribute('value')
logger.debug("found a site number: %s", site_no)
except TimeoutException:
site_no = False
logger.debug('no site number found')
if site_no is False:
logger.debug('site_no is False')
return 3
if 'ZULU' == site_no:
logger.debug('Serial Number found in %s', site_no)
return 1
if site_no:
logger.debug('Serial Number found in %s', site_no)
return 2
def in_zulu(self, serial_number, product_code, inbound_repair_order_number):
logger.debug("running in_zulu flow")
_ = datetime.strptime(str(datetime.utcnow()), '%Y-%m-%d %H:%M:%S.%f')
today = time.mktime(_.timetuple())
id_current_ro_number = 'scmaster_cplMainContent_txtSerReference2'
id_last_ro_ref = 'scmaster_cplMainContent_txtSerReference1'
id_booked_in_date = 'scmaster_cplMainContent_dtpSerInstallDate'
id_product = 'scmaster_cplMainContent_cboSerProdNum'
id_sumbit = 'scmaster_btnSubmit'
id_status = 'scmaster_cplMainContent_cboSerSeStatCode'
while serial_number not in self.instance.current_url:
self.check_history(serial_number, product_code)
element_current_ro_ref = self.wait.until(ec.visibility_of_element_located((By.ID, id_current_ro_number)))
element_last_ro_ref = self.instance.find_element_by_id(id_last_ro_ref)
element_booked_in_date = self.instance.find_element_by_id(id_booked_in_date)
element_product = self.instance.find_element_by_id(id_product)
element_submit = self.instance.find_element_by_id(id_sumbit)
element_status = self.instance.find_element_by_id(id_status)
previous_product = element_product.get_attribute('value')
previous_repair_order = element_current_ro_ref.get_attribute('value')
if '177' not in previous_product:
if not click.confirm("Tesseract product code appears incorrect, continue?"):
logger.info('killing process')
return
if previous_product != product_code:
if not click.confirm("Your product does not match what is logged on Tesseract, continue?"):
logger.info('killing process')
return
element_last_ro_ref.clear()
element_last_ro_ref.send_keys(previous_repair_order)
element_current_ro_ref.clear()
element_current_ro_ref.send_keys(inbound_repair_order_number)
last_booked_in = element_booked_in_date.get_attribute('value')
logger.debug(last_booked_in)
_ = datetime.strptime(last_booked_in.strip(), '%m/%d/%Y')
last_time_time_stamp = time.mktime(_.timetuple())
self.days_in_field = int(today - last_time_time_stamp) / ((60 ** 2) * 24)
logger.debug(self.days_in_field)
element_booked_in_date.clear()
element_booked_in_date.send_keys(time.strftime("%m/%d/%Y"))
element_status.clear()
element_status.send_keys('REP')
if click.confirm("Record ready for submission, continue?"):
element_submit.click()
else:
return
logger.debug('Submitting...')
self.wait.until(ec.alert_is_present())
Alert(self.instance).accept()
try:
WebDriverWait(self.instance, 3).until(ec.alert_is_present())
logger.critical("Unable to submit, %s", Alert(self.instance).text)
Alert(self.instance).accept()
except TimeoutException:
logger.debug('successfully modified product')
pass
def not_in_zulu(self, serial_number, product_code, inbound_repair_order_number):
logger.debug("running not_in_zulu flow")
xpath_query_serial_number = '//*[@id="scmaster_cplMainContent_grdPowerQuery_ctlPowerQueryGrid"]/div[1]/div[4]/div[1]/table/tbody/tr[2]/td[1]/div/input[1]'
_ = datetime.strptime(str(datetime.utcnow()), '%Y-%m-%d %H:%M:%S.%f')
today = time.mktime(_.timetuple())
id_delete_button = 'scmaster_btnDelete'
id_last_booked_in = 'scmaster_cplMainContent_dtpSerInstallDate'
while serial_number not in self.instance.current_url:
self.check_history(serial_number, product_code)
element_delete_button = self.wait.until(ec.visibility_of_element_located((By.ID, id_delete_button)))
element_last_booked_in_date = self.instance.find_element_by_id(id_last_booked_in)
last_booked_in = element_last_booked_in_date.get_attribute('value')
logger.debug(last_booked_in)
_ = datetime.strptime(last_booked_in.strip(), '%m/%d/%Y')
last_time_time_stamp = time.mktime(_.timetuple())
self.days_in_field = int(today - last_time_time_stamp) / ((60 ** 2) * 24)
logger.debug(self.days_in_field)
element_delete_button.click()
logger.debug(Alert(self.instance).text)
Alert(self.instance).accept()
try:
WebDriverWait(self.instance, 3).until(ec.alert_is_present())
logger.critical("Unable to delete, %s", Alert(self.instance).text)
Alert(self.instance).accept()
except TimeoutException:
logger.debug('product delete from installation')
pass
self.wait.until(ec.presence_of_element_located((By.XPATH, xpath_query_serial_number)))
self._add_new(serial_number, product_code, inbound_repair_order_number)
def _add_new(self, serial_number, product_code, inbound_repair_order_number):
logger.debug('adding product')
id_add_button = 'scmaster_tdButtonStrip2'
id_serial_number = 'scmaster_cplMainContent_txtSerNum'
id_booked_in_date = 'scmaster_cplMainContent_dtpSerInstallDate'
id_current_ro_ref = 'scmaster_cplMainContent_txtSerReference2'
id_product = 'scmaster_cplMainContent_cboSerProdNum'
id_site_no = 'scmaster_cplMainContent_txtSerSiteNum'
id_status = 'scmaster_cplMainContent_cboSerSeStatCode'
id_submit = 'scmaster_btnSubmit'
element_add_button = self.instance.find_element_by_id(id_add_button)
element_add_button.click()
element_serial_number = self.wait.until(ec.presence_of_element_located((By.ID, id_serial_number)))
element_booked_in_date = self.instance.find_element_by_id(id_booked_in_date)
element_current_ro_ref = self.instance.find_element_by_id(id_current_ro_ref)
element_product = self.instance.find_element_by_id(id_product)
element_site_no = self.instance.find_element_by_id(id_site_no)
element_status = self.instance.find_element_by_id(id_status)
element_submit = self.instance.find_element_by_id(id_submit)
element_serial_number.send_keys(serial_number)
element_booked_in_date.clear()
element_booked_in_date.send_keys(time.strftime("%m/%d/%Y"))
element_current_ro_ref.send_keys(inbound_repair_order_number)
element_product.send_keys(product_code)
element_site_no.send_keys('ZULU')
element_status.send_keys('REP')
logger.debug('submitting...')
element_submit.click()
logger.debug('waiting for popup')
self.wait.until(ec.alert_is_present())
logger.debug(Alert(self.instance).text)
Alert(self.instance).accept()
def go_to_create(self):
self.instance.get('https://tesseract-cloud2.co.uk/SC51/SC_RepairJob/aspx/repairjob_create_wzd.aspx')
def create_welcome_fill(self):
id_book_in_date = 'scmaster_cplMainContent_datBookInDate'
id_next = 'scmaster_cplMainContent_cmdNext'
id_workshop_site = 'scmaster_cplMainContent_cboJobWorkshopSiteNum'
dt = datetime.now()
script_workshop_site = 'DisplayCombo("cboJobWorkshopSiteNum", "frmRepairJobCreateWzd");'
element_workshop_site = self.wait.until(ec.presence_of_element_located((By.ID, id_workshop_site)))
element_workshop_site.clear()
element_workshop_site.send_keys('STOWS')
self.instance.execute_script(script_workshop_site)
if not self._handle_modal('fraModalPopup', 'STOWS'):
return
logger.debug(f'{dt.month}/{dt.day}/{dt.year}')
self.wait.until(
ec.text_to_be_present_in_element_value((By.ID, id_book_in_date), f'{dt.month}/{dt.day}/{dt.year}'))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
return
def job_site_details_fill(self):
id_site_num = 'scmaster_cplMainContent_cboCallSiteNum'
id_name = 'scmaster_cplMainContent_cboCallSiteName'
id_next = 'scmaster_cplMainContent_cmdNext'
script_site_num = 'DisplayCombo("cboCallSiteNum", "frmRepairJobCreateWzd");'
element_site_num = self.wait.until(ec.presence_of_element_located((By.ID, id_site_num)))
element_site_num.send_keys('ZULU')
self.instance.execute_script(script_site_num)
if not self._handle_modal('fraModalPopup', 'ZULU'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_name), 'Zulu Stock'))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
return
def ship_site_details(self):
id_ship_site_num = 'scmaster_cplMainContent_cboShipSiteNum'
id_next = 'scmaster_cplMainContent_cmdNext'
self.wait.until(ec.presence_of_element_located((By.ID, id_ship_site_num)))
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
def job_item_details(self, serial_number, product_code, repair_order_number):
id_serial_num = 'scmaster_cplMainContent_cboCallSerNum'
id_material_number = 'scmaster_cplMainContent_cboCallProdNum'
id_repair_order_number = 'scmaster_cplMainContent_txtJobRef6'
id_next = 'scmaster_cplMainContent_cmdNext'
script_serial_num = 'DisplayCombo(\'cboCallSerNum\', \'frmRepairJobCreateWzd\')'
element_serial_num = self.wait.until(ec.presence_of_element_located((By.ID, id_serial_num)))
element_serial_num.send_keys(serial_number)
self.instance.execute_script(script_serial_num)
if not self._handle_modal(expected_value=serial_number):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_material_number), product_code))
element_repair_order_number = self.instance.find_element_by_id(id_repair_order_number)
element_repair_order_number.send_keys(repair_order_number)
element_next = self.instance.find_element_by_id(id_next)
element_next.click()
def job_details(self):
id_job_type = 'scmaster_cplMainContent_cboCallCalTCode'
id_flow_code = 'scmaster_cplMainContent_cboJobFlowCode'
id_problem = 'scmaster_cplMainContent_txtCallProblem'
id_desc = 'scmaster_cplMainContent_txtCalTDesc'
id_position = 'scmaster_cplMainContent_txtFlowPos'
id_finsih = 'scmaster_cplMainContent_cmdFinish'
script_job_type = 'DisplayCombo(\'cboCallCalTCode\', \'frmRepairJobCreateWzd\');'
script_flow_code = 'DisplayCombo(\'cboJobFlowCode\', \'frmRepairJobCreateWzd\');'
element_job_type = self.wait.until(ec.presence_of_element_located((By.ID, id_job_type)))
element_flow_code = self.instance.find_element_by_id(id_flow_code)
element_problem = self.instance.find_element_by_id(id_problem)
element_job_type.send_keys('ZR1')
element_flow_code.send_keys('SWBO%')
problems = []
problems.append('This product has been in the filed for ' + str(self.days_in_field) + ' days')
problems.append('This call was automatically generated with T-Infinity created by Kieran Wynne')
for problem in problems:
element_problem.send_keys(problem)
element_problem.send_keys(Keys.RETURN)
self.instance.execute_script(script_job_type)
if not self._handle_modal(expected_value='ZR1'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_desc), 'Zulu Equipment Repair'))
self.instance.execute_script(script_flow_code)
if not self._handle_modal(expected_value='SWBOOKIN'):
return
self.wait.until(ec.text_to_be_present_in_element_value((By.ID, id_position), '1'))
element_finish = self.instance.find_element_by_id(id_finsih)
element_finish.click()
def complete(self):
id_job_numbers = 'scmaster_cplMainContent_txtJobNumbers'
element_job_numbers = self.wait.until(ec.presence_of_element_located((By.ID, id_job_numbers)))
self.call_number = element_job_numbers.text
def _handle_modal(self, frame_id='fraModalPopup', expected_value=None):
wait.until(ec.frame_to_be_available_and_switch_to_it((By.ID, frame_id)))
options = browser.find_elements_by_css_selector('#scmaster_cplMainContent_grdDropdown > tbody > tr')
logger.debug(len(options)
if not options:
self.instance.switch_to_default_content()
return False
if len(options) is 2:
logger.debug('No relevant options exist')
self.instance.switch_to_default_content()
return False
if len(options) > 3:
logger.debug('multiple options available')
click.confirm('Click the option you like then confirm that you are done')
self.instance.switch_to_default_content()
return True
if len(options) is 3:
logger.debug('selecting the only available option')
logger.debug(options[1].text)
if expected_value in options[1].text:
options[1].click()
self.instance.switch_to_default_content()
return True
else:
self.instance.switch_to_default_content()
return False
def print(self, repair_order_number, call_number):
pythoncom.CoInitialize()
labelCom = Dispatch('Dymo.DymoAddIn')
labelText = Dispatch('Dymo.DymoLabels')
current_path = os.path.abspath(os.path.dirname(__file__))
isOpen = labelCom.Open(os.path.join(current_path, "labels/Zulu-book-in.label"))
selectPrinter = 'DYMO LabelWriter 450'
labelCom.SelectPrinter(selectPrinter)
labelText.SetField('RO-Number', repair_order_number)
labelText.SetField('Call-Number', call_number)
labelCom.StartPrintJob()
labelCom.Print(1, False)
labelCom.EndPrintJob()
|
|
from __future__ import unicode_literals
from django import template
from django.utils.html import escape
from django.utils.safestring import mark_safe
from .compat import parse_bits
from ..cachefiles import ImageCacheFile
from ..registry import generator_registry
from ..lib import force_text
register = template.Library()
ASSIGNMENT_DELIMETER = 'as'
HTML_ATTRS_DELIMITER = '--'
DEFAULT_THUMBNAIL_GENERATOR = 'imagekit:thumbnail'
def get_cachefile(context, generator_id, generator_kwargs, source=None):
generator_id = generator_id.resolve(context)
kwargs = dict((k, v.resolve(context)) for k, v in generator_kwargs.items())
generator = generator_registry.get(generator_id, **kwargs)
return ImageCacheFile(generator)
def parse_dimensions(dimensions):
"""
Parse the width and height values from a dimension string. Valid values are
'1x1', '1x', and 'x1'. If one of the dimensions is omitted, the parse result
will be None for that value.
"""
width, height = [d.strip() and int(d) or None for d in dimensions.split('x')]
return dict(width=width, height=height)
class GenerateImageAssignmentNode(template.Node):
def __init__(self, variable_name, generator_id, generator_kwargs):
self._generator_id = generator_id
self._generator_kwargs = generator_kwargs
self._variable_name = variable_name
def get_variable_name(self, context):
return force_text(self._variable_name)
def render(self, context):
variable_name = self.get_variable_name(context)
context[variable_name] = get_cachefile(context, self._generator_id,
self._generator_kwargs)
return ''
class GenerateImageTagNode(template.Node):
def __init__(self, generator_id, generator_kwargs, html_attrs):
self._generator_id = generator_id
self._generator_kwargs = generator_kwargs
self._html_attrs = html_attrs
def render(self, context):
file = get_cachefile(context, self._generator_id,
self._generator_kwargs)
attrs = dict((k, v.resolve(context)) for k, v in
self._html_attrs.items())
# Only add width and height if neither is specified (to allow for
# proportional in-browser scaling).
if not 'width' in attrs and not 'height' in attrs:
attrs.update(width=file.width, height=file.height)
attrs['src'] = file.url
attr_str = ' '.join('%s="%s"' % (escape(k), escape(v)) for k, v in
attrs.items())
return mark_safe('<img %s />' % attr_str)
class ThumbnailAssignmentNode(template.Node):
def __init__(self, variable_name, generator_id, dimensions, source, generator_kwargs):
self._variable_name = variable_name
self._generator_id = generator_id
self._dimensions = dimensions
self._source = source
self._generator_kwargs = generator_kwargs
def get_variable_name(self, context):
return force_text(self._variable_name)
def render(self, context):
variable_name = self.get_variable_name(context)
generator_id = self._generator_id.resolve(context) if self._generator_id else DEFAULT_THUMBNAIL_GENERATOR
kwargs = dict((k, v.resolve(context)) for k, v in
self._generator_kwargs.items())
kwargs['source'] = self._source.resolve(context)
kwargs.update(parse_dimensions(self._dimensions.resolve(context)))
generator = generator_registry.get(generator_id, **kwargs)
context[variable_name] = ImageCacheFile(generator)
return ''
class ThumbnailImageTagNode(template.Node):
def __init__(self, generator_id, dimensions, source, generator_kwargs, html_attrs):
self._generator_id = generator_id
self._dimensions = dimensions
self._source = source
self._generator_kwargs = generator_kwargs
self._html_attrs = html_attrs
def render(self, context):
generator_id = self._generator_id.resolve(context) if self._generator_id else DEFAULT_THUMBNAIL_GENERATOR
dimensions = parse_dimensions(self._dimensions.resolve(context))
kwargs = dict((k, v.resolve(context)) for k, v in
self._generator_kwargs.items())
kwargs['source'] = self._source.resolve(context)
kwargs.update(dimensions)
generator = generator_registry.get(generator_id, **kwargs)
file = ImageCacheFile(generator)
attrs = dict((k, v.resolve(context)) for k, v in
self._html_attrs.items())
# Only add width and height if neither is specified (to allow for
# proportional in-browser scaling).
if not 'width' in attrs and not 'height' in attrs:
attrs.update(width=file.width, height=file.height)
attrs['src'] = file.url
attr_str = ' '.join('%s="%s"' % (escape(k), escape(v)) for k, v in
attrs.items())
return mark_safe('<img %s />' % attr_str)
def parse_ik_tag_bits(parser, bits):
"""
Parses the tag name, html attributes and variable name (for assignment tags)
from the provided bits. The preceding bits may vary and are left to be
parsed by specific tags.
"""
varname = None
html_attrs = {}
tag_name = bits.pop(0)
if len(bits) >= 2 and bits[-2] == ASSIGNMENT_DELIMETER:
varname = bits[-1]
bits = bits[:-2]
if HTML_ATTRS_DELIMITER in bits:
if varname:
raise template.TemplateSyntaxError('Do not specify html attributes'
' (using "%s") when using the "%s" tag as an assignment'
' tag.' % (HTML_ATTRS_DELIMITER, tag_name))
index = bits.index(HTML_ATTRS_DELIMITER)
html_bits = bits[index + 1:]
bits = bits[:index]
if not html_bits:
raise template.TemplateSyntaxError('Don\'t use "%s" unless you\'re'
' setting html attributes.' % HTML_ATTRS_DELIMITER)
args, html_attrs = parse_bits(parser, html_bits, [], 'args',
'kwargs', None, False, tag_name)
if len(args):
raise template.TemplateSyntaxError('All "%s" tag arguments after'
' the "%s" token must be named.' % (tag_name,
HTML_ATTRS_DELIMITER))
return (tag_name, bits, html_attrs, varname)
#@register.tag
def generateimage(parser, token):
"""
Creates an image based on the provided arguments.
By default::
{% generateimage 'myapp:thumbnail' source=mymodel.profile_image %}
generates an ``<img>`` tag::
<img src="/path/to/34d944f200dd794bf1e6a7f37849f72b.jpg" width="100" height="100" />
You can add additional attributes to the tag using "--". For example,
this::
{% generateimage 'myapp:thumbnail' source=mymodel.profile_image -- alt="Hello!" %}
will result in the following markup::
<img src="/path/to/34d944f200dd794bf1e6a7f37849f72b.jpg" width="100" height="100" alt="Hello!" />
For more flexibility, ``generateimage`` also works as an assignment tag::
{% generateimage 'myapp:thumbnail' source=mymodel.profile_image as th %}
<img src="{{ th.url }}" width="{{ th.width }}" height="{{ th.height }}" />
"""
bits = token.split_contents()
tag_name, bits, html_attrs, varname = parse_ik_tag_bits(parser, bits)
args, kwargs = parse_bits(parser, bits, ['generator_id'], 'args', 'kwargs',
None, False, tag_name)
if len(args) != 1:
raise template.TemplateSyntaxError('The "%s" tag requires exactly one'
' unnamed argument (the generator id).' % tag_name)
generator_id = args[0]
if varname:
return GenerateImageAssignmentNode(varname, generator_id, kwargs)
else:
return GenerateImageTagNode(generator_id, kwargs, html_attrs)
#@register.tag
def thumbnail(parser, token):
"""
A convenient shortcut syntax for generating a thumbnail. The following::
{% thumbnail '100x100' mymodel.profile_image %}
is equivalent to::
{% generateimage 'imagekit:thumbnail' source=mymodel.profile_image width=100 height=100 %}
The thumbnail tag supports the "--" and "as" bits for adding html
attributes and assigning to a variable, respectively. It also accepts the
kwargs "anchor", and "crop".
To use "smart cropping" (the ``SmartResize`` processor)::
{% thumbnail '100x100' mymodel.profile_image %}
To crop, anchoring the image to the top right (the ``ResizeToFill``
processor)::
{% thumbnail '100x100' mymodel.profile_image anchor='tr' %}
To resize without cropping (using the ``ResizeToFit`` processor)::
{% thumbnail '100x100' mymodel.profile_image crop=0 %}
"""
bits = token.split_contents()
tag_name, bits, html_attrs, varname = parse_ik_tag_bits(parser, bits)
args, kwargs = parse_bits(parser, bits, [], 'args', 'kwargs',
None, False, tag_name)
if len(args) < 2:
raise template.TemplateSyntaxError('The "%s" tag requires at least two'
' unnamed arguments: the dimensions and the source image.'
% tag_name)
elif len(args) > 3:
raise template.TemplateSyntaxError('The "%s" tag accepts at most three'
' unnamed arguments: a generator id, the dimensions, and the'
' source image.' % tag_name)
dimensions, source = args[-2:]
generator_id = args[0] if len(args) > 2 else None
if varname:
return ThumbnailAssignmentNode(varname, generator_id, dimensions,
source, kwargs)
else:
return ThumbnailImageTagNode(generator_id, dimensions, source, kwargs,
html_attrs)
generateimage = register.tag(generateimage)
thumbnail = register.tag(thumbnail)
|
|
from __future__ import unicode_literals
import json
import sure # noqa
import boto3
from moto import mock_iot
@mock_iot
def test_things():
client = boto3.client('iot', region_name='ap-northeast-1')
name = 'my-thing'
type_name = 'my-type-name'
# thing type
thing_type = client.create_thing_type(thingTypeName=type_name)
thing_type.should.have.key('thingTypeName').which.should.equal(type_name)
thing_type.should.have.key('thingTypeArn')
res = client.list_thing_types()
res.should.have.key('thingTypes').which.should.have.length_of(1)
for thing_type in res['thingTypes']:
thing_type.should.have.key('thingTypeName').which.should_not.be.none
thing_type = client.describe_thing_type(thingTypeName=type_name)
thing_type.should.have.key('thingTypeName').which.should.equal(type_name)
thing_type.should.have.key('thingTypeProperties')
thing_type.should.have.key('thingTypeMetadata')
# thing
thing = client.create_thing(thingName=name, thingTypeName=type_name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should.have.key('thingName').which.should_not.be.none
thing.should.have.key('thingArn').which.should_not.be.none
thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}})
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should.have.key('thingName').which.should_not.be.none
thing.should.have.key('thingArn').which.should_not.be.none
res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1')
thing = client.describe_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('defaultClientId')
thing.should.have.key('thingTypeName')
thing.should.have.key('attributes')
thing.should.have.key('version')
# delete thing
client.delete_thing(thingName=name)
res = client.list_things()
res.should.have.key('things').which.should.have.length_of(0)
# delete thing type
client.delete_thing_type(thingTypeName=type_name)
res = client.list_thing_types()
res.should.have.key('thingTypes').which.should.have.length_of(0)
@mock_iot
def test_list_thing_types():
client = boto3.client('iot', region_name='ap-northeast-1')
for i in range(0, 100):
client.create_thing_type(thingTypeName=str(i + 1))
thing_types = client.list_thing_types()
thing_types.should.have.key('nextToken')
thing_types.should.have.key('thingTypes').which.should.have.length_of(50)
thing_types['thingTypes'][0]['thingTypeName'].should.equal('1')
thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50')
thing_types = client.list_thing_types(nextToken=thing_types['nextToken'])
thing_types.should.have.key('thingTypes').which.should.have.length_of(50)
thing_types.should_not.have.key('nextToken')
thing_types['thingTypes'][0]['thingTypeName'].should.equal('51')
thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100')
@mock_iot
def test_list_thing_types_with_typename_filter():
client = boto3.client('iot', region_name='ap-northeast-1')
client.create_thing_type(thingTypeName='thing')
client.create_thing_type(thingTypeName='thingType')
client.create_thing_type(thingTypeName='thingTypeName')
client.create_thing_type(thingTypeName='thingTypeNameGroup')
client.create_thing_type(thingTypeName='shouldNotFind')
client.create_thing_type(thingTypeName='find me it shall not')
thing_types = client.list_thing_types(thingTypeName='thing')
thing_types.should_not.have.key('nextToken')
thing_types.should.have.key('thingTypes').which.should.have.length_of(4)
thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing')
thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup')
thing_types = client.list_thing_types(thingTypeName='thingTypeName')
thing_types.should_not.have.key('nextToken')
thing_types.should.have.key('thingTypes').which.should.have.length_of(2)
thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName')
thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup')
@mock_iot
def test_list_things_with_next_token():
client = boto3.client('iot', region_name='ap-northeast-1')
for i in range(0, 200):
client.create_thing(thingName=str(i + 1))
things = client.list_things()
things.should.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('1')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1')
things['things'][-1]['thingName'].should.equal('50')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50')
things = client.list_things(nextToken=things['nextToken'])
things.should.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('51')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51')
things['things'][-1]['thingName'].should.equal('100')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100')
things = client.list_things(nextToken=things['nextToken'])
things.should.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('101')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101')
things['things'][-1]['thingName'].should.equal('150')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150')
things = client.list_things(nextToken=things['nextToken'])
things.should_not.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('151')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151')
things['things'][-1]['thingName'].should.equal('200')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200')
@mock_iot
def test_list_things_with_attribute_and_thing_type_filter_and_next_token():
client = boto3.client('iot', region_name='ap-northeast-1')
client.create_thing_type(thingTypeName='my-thing-type')
for i in range(0, 200):
if not (i + 1) % 3:
attribute_payload = {
'attributes': {
'foo': 'bar'
}
}
elif not (i + 1) % 5:
attribute_payload = {
'attributes': {
'bar': 'foo'
}
}
else:
attribute_payload = {}
if not (i + 1) % 2:
thing_type_name = 'my-thing-type'
client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload)
else:
client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload)
# Test filter for thingTypeName
things = client.list_things(thingTypeName=thing_type_name)
things.should.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('2')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2')
things['things'][-1]['thingName'].should.equal('100')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100')
all(item['thingTypeName'] == thing_type_name for item in things['things'])
things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name)
things.should_not.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('102')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102')
things['things'][-1]['thingName'].should.equal('200')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200')
all(item['thingTypeName'] == thing_type_name for item in things['things'])
# Test filter for attributes
things = client.list_things(attributeName='foo', attributeValue='bar')
things.should.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(50)
things['things'][0]['thingName'].should.equal('3')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3')
things['things'][-1]['thingName'].should.equal('150')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150')
all(item['attributes'] == {'foo': 'bar'} for item in things['things'])
things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar')
things.should_not.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(16)
things['things'][0]['thingName'].should.equal('153')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153')
things['things'][-1]['thingName'].should.equal('198')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198')
all(item['attributes'] == {'foo': 'bar'} for item in things['things'])
# Test filter for attributes and thingTypeName
things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar')
things.should_not.have.key('nextToken')
things.should.have.key('things').which.should.have.length_of(33)
things['things'][0]['thingName'].should.equal('6')
things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6')
things['things'][-1]['thingName'].should.equal('198')
things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198')
all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things'])
@mock_iot
def test_certs():
client = boto3.client('iot', region_name='ap-northeast-1')
cert = client.create_keys_and_certificate(setAsActive=True)
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('certificatePem').which.should_not.be.none
cert.should.have.key('keyPair')
cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none
cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none
cert_id = cert['certificateId']
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key('certificateDescription')
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('certificateArn').which.should_not.be.none
cert_desc.should.have.key('certificateId').which.should_not.be.none
cert_desc.should.have.key('certificatePem').which.should_not.be.none
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(1)
for cert in res['certificates']:
cert.should.have.key('certificateArn').which.should_not.be.none
cert.should.have.key('certificateId').which.should_not.be.none
cert.should.have.key('status').which.should_not.be.none
cert.should.have.key('creationDate').which.should_not.be.none
client.update_certificate(certificateId=cert_id, newStatus='REVOKED')
cert = client.describe_certificate(certificateId=cert_id)
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
client.delete_certificate(certificateId=cert_id)
res = client.list_certificates()
res.should.have.key('certificates').which.should.have.length_of(0)
@mock_iot
def test_certs_create_inactive():
client = boto3.client('iot', region_name='ap-northeast-1')
cert = client.create_keys_and_certificate(setAsActive=False)
cert_id = cert['certificateId']
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key('certificateDescription')
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('INACTIVE')
client.update_certificate(certificateId=cert_id, newStatus='ACTIVE')
cert = client.describe_certificate(certificateId=cert_id)
cert.should.have.key('certificateDescription')
cert_desc = cert['certificateDescription']
cert_desc.should.have.key('status').which.should.equal('ACTIVE')
@mock_iot
def test_policy():
client = boto3.client('iot', region_name='ap-northeast-1')
name = 'my-policy'
doc = '{}'
policy = client.create_policy(policyName=name, policyDocument=doc)
policy.should.have.key('policyName').which.should.equal(name)
policy.should.have.key('policyArn').which.should_not.be.none
policy.should.have.key('policyDocument').which.should.equal(doc)
policy.should.have.key('policyVersionId').which.should.equal('1')
policy = client.get_policy(policyName=name)
policy.should.have.key('policyName').which.should.equal(name)
policy.should.have.key('policyArn').which.should_not.be.none
policy.should.have.key('policyDocument').which.should.equal(doc)
policy.should.have.key('defaultVersionId').which.should.equal('1')
res = client.list_policies()
res.should.have.key('policies').which.should.have.length_of(1)
for policy in res['policies']:
policy.should.have.key('policyName').which.should_not.be.none
policy.should.have.key('policyArn').which.should_not.be.none
client.delete_policy(policyName=name)
res = client.list_policies()
res.should.have.key('policies').which.should.have.length_of(0)
@mock_iot
def test_principal_policy():
client = boto3.client('iot', region_name='ap-northeast-1')
policy_name = 'my-policy'
doc = '{}'
policy = client.create_policy(policyName=policy_name, policyDocument=doc)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert['certificateArn']
client.attach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key('policies').which.should.have.length_of(1)
for policy in res['policies']:
policy.should.have.key('policyName').which.should_not.be.none
policy.should.have.key('policyArn').which.should_not.be.none
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key('principals').which.should.have.length_of(1)
for principal in res['principals']:
principal.should_not.be.none
client.detach_principal_policy(policyName=policy_name, principal=cert_arn)
res = client.list_principal_policies(principal=cert_arn)
res.should.have.key('policies').which.should.have.length_of(0)
res = client.list_policy_principals(policyName=policy_name)
res.should.have.key('principals').which.should.have.length_of(0)
@mock_iot
def test_principal_thing():
client = boto3.client('iot', region_name='ap-northeast-1')
thing_name = 'my-thing'
thing = client.create_thing(thingName=thing_name)
cert = client.create_keys_and_certificate(setAsActive=True)
cert_arn = cert['certificateArn']
client.attach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key('things').which.should.have.length_of(1)
for thing in res['things']:
thing.should_not.be.none
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key('principals').which.should.have.length_of(1)
for principal in res['principals']:
principal.should_not.be.none
client.detach_thing_principal(thingName=thing_name, principal=cert_arn)
res = client.list_principal_things(principal=cert_arn)
res.should.have.key('things').which.should.have.length_of(0)
res = client.list_thing_principals(thingName=thing_name)
res.should.have.key('principals').which.should.have.length_of(0)
@mock_iot
def test_thing_groups():
client = boto3.client('iot', region_name='ap-northeast-1')
group_name = 'my-group-name'
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupName').which.should.equal(group_name)
thing_group.should.have.key('thingGroupArn')
res = client.list_thing_groups()
res.should.have.key('thingGroups').which.should.have.length_of(1)
for thing_group in res['thingGroups']:
thing_group.should.have.key('groupName').which.should_not.be.none
thing_group.should.have.key('groupArn').which.should_not.be.none
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupName').which.should.equal(group_name)
thing_group.should.have.key('thingGroupProperties')
thing_group.should.have.key('thingGroupMetadata')
thing_group.should.have.key('version')
# delete thing group
client.delete_thing_group(thingGroupName=group_name)
res = client.list_thing_groups()
res.should.have.key('thingGroups').which.should.have.length_of(0)
# props create test
props = {
'thingGroupDescription': 'my first thing group',
'attributePayload': {
'attributes': {
'key1': 'val01',
'Key02': 'VAL2'
}
}
}
thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props)
thing_group.should.have.key('thingGroupName').which.should.equal(group_name)
thing_group.should.have.key('thingGroupArn')
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupProperties') \
.which.should.have.key('attributePayload') \
.which.should.have.key('attributes')
res_props = thing_group['thingGroupProperties']['attributePayload']['attributes']
res_props.should.have.key('key1').which.should.equal('val01')
res_props.should.have.key('Key02').which.should.equal('VAL2')
# props update test with merge
new_props = {
'attributePayload': {
'attributes': {
'k3': 'v3'
},
'merge': True
}
}
client.update_thing_group(
thingGroupName=group_name,
thingGroupProperties=new_props
)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupProperties') \
.which.should.have.key('attributePayload') \
.which.should.have.key('attributes')
res_props = thing_group['thingGroupProperties']['attributePayload']['attributes']
res_props.should.have.key('key1').which.should.equal('val01')
res_props.should.have.key('Key02').which.should.equal('VAL2')
res_props.should.have.key('k3').which.should.equal('v3')
# props update test
new_props = {
'attributePayload': {
'attributes': {
'k4': 'v4'
}
}
}
client.update_thing_group(
thingGroupName=group_name,
thingGroupProperties=new_props
)
thing_group = client.describe_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupProperties') \
.which.should.have.key('attributePayload') \
.which.should.have.key('attributes')
res_props = thing_group['thingGroupProperties']['attributePayload']['attributes']
res_props.should.have.key('k4').which.should.equal('v4')
res_props.should_not.have.key('key1')
@mock_iot
def test_thing_group_relations():
client = boto3.client('iot', region_name='ap-northeast-1')
name = 'my-thing'
group_name = 'my-group-name'
# thing group
thing_group = client.create_thing_group(thingGroupName=group_name)
thing_group.should.have.key('thingGroupName').which.should.equal(group_name)
thing_group.should.have.key('thingGroupArn')
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
# add in 4 way
client.add_thing_to_thing_group(
thingGroupName=group_name,
thingName=name
)
client.add_thing_to_thing_group(
thingGroupArn=thing_group['thingGroupArn'],
thingArn=thing['thingArn']
)
client.add_thing_to_thing_group(
thingGroupName=group_name,
thingArn=thing['thingArn']
)
client.add_thing_to_thing_group(
thingGroupArn=thing_group['thingGroupArn'],
thingName=name
)
things = client.list_things_in_thing_group(
thingGroupName=group_name
)
things.should.have.key('things')
things['things'].should.have.length_of(1)
thing_groups = client.list_thing_groups_for_thing(
thingName=name
)
thing_groups.should.have.key('thingGroups')
thing_groups['thingGroups'].should.have.length_of(1)
# remove in 4 way
client.remove_thing_from_thing_group(
thingGroupName=group_name,
thingName=name
)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group['thingGroupArn'],
thingArn=thing['thingArn']
)
client.remove_thing_from_thing_group(
thingGroupName=group_name,
thingArn=thing['thingArn']
)
client.remove_thing_from_thing_group(
thingGroupArn=thing_group['thingGroupArn'],
thingName=name
)
things = client.list_things_in_thing_group(
thingGroupName=group_name
)
things.should.have.key('things')
things['things'].should.have.length_of(0)
# update thing group for thing
client.update_thing_groups_for_thing(
thingName=name,
thingGroupsToAdd=[
group_name
]
)
things = client.list_things_in_thing_group(
thingGroupName=group_name
)
things.should.have.key('things')
things['things'].should.have.length_of(1)
client.update_thing_groups_for_thing(
thingName=name,
thingGroupsToRemove=[
group_name
]
)
things = client.list_things_in_thing_group(
thingGroupName=group_name
)
things.should.have.key('things')
things['things'].should.have.length_of(0)
@mock_iot
def test_create_job():
client = boto3.client('iot', region_name='eu-west-1')
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
# job document
job_document = {
"field": "value"
}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
description="Description",
presignedUrlConfig={
'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role',
'expiresInSec': 123
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={
'maximumPerMinute': 10
}
)
job.should.have.key('jobId').which.should.equal(job_id)
job.should.have.key('jobArn')
job.should.have.key('description')
@mock_iot
def test_describe_job():
client = boto3.client('iot', region_name='eu-west-1')
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json",
presignedUrlConfig={
'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role',
'expiresInSec': 123
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={
'maximumPerMinute': 10
}
)
job.should.have.key('jobId').which.should.equal(job_id)
job.should.have.key('jobArn')
job = client.describe_job(jobId=job_id)
job.should.have.key('documentSource')
job.should.have.key('job')
job.should.have.key('job').which.should.have.key("jobArn")
job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key('job').which.should.have.key("targets")
job.should.have.key('job').which.should.have.key("jobProcessDetails")
job.should.have.key('job').which.should.have.key("lastUpdatedAt")
job.should.have.key('job').which.should.have.key("createdAt")
job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS")
job.should.have.key('job').which.should.have.key("presignedUrlConfig")
job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key(
"roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role')
job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key(
"expiresInSec").which.should.equal(123)
job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key(
"maximumPerMinute").which.should.equal(10)
@mock_iot
def test_describe_job_1():
client = boto3.client('iot', region_name='eu-west-1')
name = "my-thing"
job_id = "TestJob"
# thing
thing = client.create_thing(thingName=name)
thing.should.have.key('thingName').which.should.equal(name)
thing.should.have.key('thingArn')
# job document
job_document = {
"field": "value"
}
job = client.create_job(
jobId=job_id,
targets=[thing["thingArn"]],
document=json.dumps(job_document),
presignedUrlConfig={
'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role',
'expiresInSec': 123
},
targetSelection="CONTINUOUS",
jobExecutionsRolloutConfig={
'maximumPerMinute': 10
}
)
job.should.have.key('jobId').which.should.equal(job_id)
job.should.have.key('jobArn')
job = client.describe_job(jobId=job_id)
job.should.have.key('job')
job.should.have.key('job').which.should.have.key("jobArn")
job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id)
job.should.have.key('job').which.should.have.key("targets")
job.should.have.key('job').which.should.have.key("jobProcessDetails")
job.should.have.key('job').which.should.have.key("lastUpdatedAt")
job.should.have.key('job').which.should.have.key("createdAt")
job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig")
job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS")
job.should.have.key('job').which.should.have.key("presignedUrlConfig")
job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key(
"roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role')
job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key(
"expiresInSec").which.should.equal(123)
job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key(
"maximumPerMinute").which.should.equal(10)
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import unittest
import requests
import requests_mock
import tenacity
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.hooks.http_hook import HttpHook
from airflow.models import Connection
from tests.compat import mock
def get_airflow_connection(conn_id=None):
return Connection(
conn_id='http_default',
conn_type='http',
host='test:8080/',
extra='{"bareer": "test"}'
)
def get_airflow_connection_with_port(conn_id=None):
return Connection(
conn_id='http_default',
conn_type='http',
host='test.com',
port=1234
)
class TestHttpHook(unittest.TestCase):
"""Test get, post and raise_for_status"""
def setUp(self):
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount('mock', adapter)
self.get_hook = HttpHook(method='GET')
self.get_lowercase_hook = HttpHook(method='get')
self.post_hook = HttpHook(method='POST')
@requests_mock.mock()
def test_raise_for_status_with_200(self, m):
m.get(
'http://test:8080/v1/test',
status_code=200,
text='{"status":{"status": 200}}',
reason='OK'
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
resp = self.get_hook.run('v1/test')
self.assertEqual(resp.text, '{"status":{"status": 200}}')
@mock.patch('requests.Session')
@mock.patch('requests.Request')
def test_get_request_with_port(self, request_mock, session_mock):
from requests.exceptions import MissingSchema
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection_with_port
):
expected_url = 'http://test.com:1234/some/endpoint'
for endpoint in ['some/endpoint', '/some/endpoint']:
try:
self.get_hook.run(endpoint)
except MissingSchema:
pass
request_mock.assert_called_once_with(
mock.ANY,
expected_url,
headers=mock.ANY,
params=mock.ANY
)
request_mock.reset_mock()
session_mock.reset_mock()
@requests_mock.mock()
def test_get_request_do_not_raise_for_status_if_check_response_is_false(self, m):
m.get(
'http://test:8080/v1/test',
status_code=404,
text='{"status":{"status": 404}}',
reason='Bad request'
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
resp = self.get_hook.run('v1/test', extra_options={'check_response': False})
self.assertEqual(resp.text, '{"status":{"status": 404}}')
@requests_mock.mock()
def test_hook_contains_header_from_extra_field(self, m):
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
expected_conn = get_airflow_connection()
conn = self.get_hook.get_conn()
self.assertDictContainsSubset(json.loads(expected_conn.extra), conn.headers)
self.assertEqual(conn.headers.get('bareer'), 'test')
@requests_mock.mock()
@mock.patch('requests.Request')
def test_hook_with_method_in_lowercase(self, m, request_mock):
from requests.exceptions import MissingSchema, InvalidURL
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection_with_port
):
data = "test params"
try:
self.get_lowercase_hook.run('v1/test', data=data)
except (MissingSchema, InvalidURL):
pass
request_mock.assert_called_once_with(
mock.ANY,
mock.ANY,
headers=mock.ANY,
params=data
)
@requests_mock.mock()
def test_hook_uses_provided_header(self, m):
conn = self.get_hook.get_conn(headers={"bareer": "newT0k3n"})
self.assertEqual(conn.headers.get('bareer'), "newT0k3n")
@requests_mock.mock()
def test_hook_has_no_header_from_extra(self, m):
conn = self.get_hook.get_conn()
self.assertIsNone(conn.headers.get('bareer'))
@requests_mock.mock()
def test_hooks_header_from_extra_is_overridden(self, m):
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
conn = self.get_hook.get_conn(headers={"bareer": "newT0k3n"})
self.assertEqual(conn.headers.get('bareer'), 'newT0k3n')
@requests_mock.mock()
def test_post_request(self, m):
m.post(
'http://test:8080/v1/test',
status_code=200,
text='{"status":{"status": 200}}',
reason='OK'
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
resp = self.post_hook.run('v1/test')
self.assertEqual(resp.status_code, 200)
@requests_mock.mock()
def test_post_request_with_error_code(self, m):
m.post(
'http://test:8080/v1/test',
status_code=418,
text='{"status":{"status": 418}}',
reason='I\'m a teapot'
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
with self.assertRaises(AirflowException):
self.post_hook.run('v1/test')
@requests_mock.mock()
def test_post_request_do_not_raise_for_status_if_check_response_is_false(self, m):
m.post(
'http://test:8080/v1/test',
status_code=418,
text='{"status":{"status": 418}}',
reason='I\'m a teapot'
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
resp = self.post_hook.run('v1/test', extra_options={'check_response': False})
self.assertEqual(resp.status_code, 418)
@mock.patch('airflow.hooks.http_hook.requests.Session')
def test_retry_on_conn_error(self, mocked_session):
retry_args = dict(
wait=tenacity.wait_none(),
stop=tenacity.stop_after_attempt(7),
retry=tenacity.retry_if_exception_type(
requests.exceptions.ConnectionError
)
)
def send_and_raise(request, **kwargs):
raise requests.exceptions.ConnectionError
mocked_session().send.side_effect = send_and_raise
# The job failed for some reason
with self.assertRaises(tenacity.RetryError):
self.get_hook.run_with_advanced_retry(
endpoint='v1/test',
_retry_args=retry_args
)
self.assertEqual(
self.get_hook._retry_obj.stop.max_attempt_number + 1,
mocked_session.call_count
)
@requests_mock.mock()
def test_run_with_advanced_retry(self, m):
m.get(
u'http://test:8080/v1/test',
status_code=200,
reason=u'OK'
)
retry_args = dict(
wait=tenacity.wait_none(),
stop=tenacity.stop_after_attempt(3),
retry=tenacity.retry_if_exception_type(Exception),
reraise=True
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
response = self.get_hook.run_with_advanced_retry(
endpoint='v1/test',
_retry_args=retry_args
)
self.assertIsInstance(response, requests.Response)
def test_header_from_extra_and_run_method_are_merged(self):
def run_and_return(session, prepped_request, extra_options, **kwargs):
return prepped_request
# The job failed for some reason
with mock.patch(
'airflow.hooks.http_hook.HttpHook.run_and_check',
side_effect=run_and_return
):
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
pr = self.get_hook.run('v1/test', headers={'some_other_header': 'test'})
actual = dict(pr.headers)
self.assertEqual(actual.get('bareer'), 'test')
self.assertEqual(actual.get('some_other_header'), 'test')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
def test_method_converted_to_uppercase_when_created_in_lowercase(self):
self.assertEqual(self.get_lowercase_hook.method, 'GET')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_connection_without_host(self, mock_get_connection):
c = Connection(conn_id='http_default', conn_type='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://')
@parameterized.expand([
'GET',
'POST',
])
@requests_mock.mock()
def test_json_request(self, method, mock_requests):
obj1 = {'a': 1, 'b': 'abc', 'c': [1, 2, {"d": 10}]}
def match_obj1(request):
return request.json() == obj1
mock_requests.request(
method=method,
url='//test:8080/v1/test',
additional_matcher=match_obj1
)
with mock.patch(
'airflow.hooks.base_hook.BaseHook.get_connection',
side_effect=get_airflow_connection
):
# will raise NoMockAddress exception if obj1 != request.json()
HttpHook(method=method).run('v1/test', json=obj1)
send_email_test = mock.Mock()
|
|
__author__ = "Mihaela Rosca"
__contact__ = "[email protected]"
import heapq
import matplotlib.pyplot as plt
import numpy
import os
import scipy
import scipy.linalg
from os.path import isfile, join
from scipy import misc
# Import all common functions
from common import *
# The directory path to the images
PICTURE_PATH = "/pics/cambrdige_pics/"
# The current directory where the script is ran
currentDir = os.path.dirname(os.path.abspath(__file__))
"""
Converts the data to zero mean data.
"""
def convertDataToZeroMean(data):
means = scipy.mean(data, axis=0)
rows, cols = data.shape
zeroMean = numpy.zeros((rows, cols))
for i in xrange(rows):
zeroMean[i] = data[i] - means
assert zeroMean.shape == data.shape
return zeroMean
"""
Uses a heuristic to evaluate how many dimensions should the data be reduced
to.
Arguments:
eigenValues:
The eigen values of the covariance matrix, or numbers proportional to them.
Should be a numpy 1-D array.
Returns:
The dimension the data should be reduced to.
"""
def dimensionFromEigenIndividualVariance(eigenValues):
threshold = 0.01
dimension = 0
s = numpy.sum(eigenValues)
print "sum eigen" + str(s)
for eigen in eigenValues:
r = eigen / s
if r > threshold:
dimension += 1
return dimension
# requires the eigen values to be sorted before
def dimensionFromEigenTotalVariance(eigenValues):
threshold = 0.95
dimension = 0
s = numpy.sum(eigenValues)
print "sum eigen" + str(s)
current = 0
for eigen in eigenValues:
r = (eigen / s)
current += r
if current >= threshold:
break
dimension += 1
return dimension
"""
This method uses the Karhunen Lowe transform to fastly compute the
eigen vaues of the data.
It is faster than the SVD method below, but can be prone to floating point
errors more than the SVD one.
Arguments:
train:
Numpy array of arrays
dimension: the dimension to which to reduce the size of the data set.
Returns:
The principal components of the data.
"""
# Returns the principal components of the given training
# data by commputing the principal eigen vectors of the
# covariance matrix of the data
def pca(train, dimension):
# Use the Karhunen Lowe transform to fastly compute
# the principal components.
rows, cols = train.shape
# Step1: Get the mean of each column of the data
# Ie create the average image
u = convertDataToZeroMean(train)
# Step2: Compute the eigen values of the U * U^T matrix
# the size of U * U^T is rows * rows (ie the number of data points you have
# in your training)
eigVals, eigVecs = scipy.linalg.eig(u.dot(u.T))
# Step3: Compute the eigen values of U^T*U from the eigen values of U * U^T
bigEigVecs = numpy.zeros((rows, cols))
for i in xrange(rows):
bigEigVecs[i] = u.T.dot(eigVecs[:, i])
# Step 4: Normalize the eigen vectors to get orthonormal components
bigEigVecs = map(lambda x: x / scipy.linalg.norm(x), bigEigVecs)
eigValsBigVecs = zip(eigVals, bigEigVecs)
sortedEigValsBigVecs = sorted(eigValsBigVecs, key=lambda x : x[0], reverse=True)
index = 0
if dimension == None:
# Get the eigen values
# Note that these are not the eigen values of the covariance matrix
# but the eigen values of U * U ^T
# however, this is fine because they just differ by a factor
# so the ratio between eigen values will be preserved
eigenValues = map(lambda x : x[0], sortedEigValsBigVecs)
dimension = dimensionFromEigenTotalVariance(eigenValues)
print "Using PCA dimension " + str(dimension)
result = np.empty(rows, dimension)
for eigVal, vector in sortedEigValsBigVecs:
if index >= dimension:
break
if eigVal <=0:
print "Warning: Non-positive eigen value"
result[:, index] = vector
index = index + 1
return result
"""
Arguments:
train:
Numpy array of arrays
dimension: the dimension to which to reduce the size of the data set.
Returns:
The principal components of the data.
This method should be preferred over the above: it is well known that the
SVD methods are more stable than the ones that require the computation of
the eigen values and eigen vectors.
For more detail see:
http://math.stackexchange.com/questions/3869/what-is-the-intuitive-relationship-between-svd-and-pca
"""
def pcaWithSVD(train, dimension=None):
zeroMean = convertDataToZeroMean(train)
# SVD guaranteed that the singular values are in non-increasing order
# this means that the u's are already ordered as required, according
# to the magnitute of the eigen values
u, s, vh = scipy.linalg.svd(zeroMean)
if dimension == None:
# Get the eigen values from the singular values
eigenValues = s ** 2;
dimension = dimensionFromEigenTotalVariance(eigenValues)
print "Using PCA dimension " + str(dimension)
return vh[0:dimension-1]
"""
Arguments:
pcaMethod: a method to use for PCA.
images: A python list of images that have to be of the same size.
dimension: the dimension to which to reduce the size of the data set.
Returns:
A tuple:
The first element of the tuple is formed from the eigen faces of given
images.
The second element of the tuple if formed from the vector version of the
eigen faces. This is kept for optimization reasons.
"""
def getEigenFaces(pcaMethod, images, dimension=None):
imgSize = images[0].shape;
# this call should not be here: the code should assume that the images have
# been transofrmed to vectors before
imgs = imagesToVectors(images)
vectors = pcaMethod(imgs, dimension)
eigenFaces = map(lambda x: vectorToImage(x, imgSize), vectors)
return (eigenFaces, vectors)
def reduce(principalComponents, vectors):
assert len(principalComponents) > 0
print principalComponents[0].shape
principalComponents = np.array(principalComponents)
lowDimRepresentation = np.dot(vectors, principalComponents.T)
# lowDimRepresentation = map(lambda x : vectors.dot(x), principalComponents)
# sameDimRepresentation = \
# sum([ x * y for x, y in zip(principalComponents, lowDimRepresentation)])
# TODO: do this with einsum
sameDimRepresentation = lowDimRepresentation[:, np.newaxis] * principalComponents.T
sameDimRepresentation = sameDimRepresentation.sum(axis=2)
# TODO: create the proper thing here so that you can
# easily see what the ouput is
return (lowDimRepresentation, sameDimRepresentation)
"""
Reduces a 2D image represented by a numpy 2D array of integer values(pixels)
to a lower dimension, dictated by the number of principal components.
"""
def reduceImageToLowerDimensions(principalComponents, image2D):
assert len(principalComponents) > 0
size = principalComponents[0].shape
vector = vectorToImage(image2D, size)
lowDimRepresentation = map(lambda x : x.T.dot(vector), principalComponents)
sameDimRepresentation = \
sum([ x * y for x, y in zip(principalComponents, lowDimRepresentation)])
return (lowDimRepresentation, sameDimRepresentation)
def main():
# Load all the image files in the current directory
picFiles = []
path = currentDir + PICTURE_PATH
for root, dirs, files in os.walk(path):
if root != path:
picFiles += map(lambda x: os.path.join(root, x), files)
print len(picFiles)
imgs = map(lambda x: misc.imread(x, flatten=True), picFiles)
eigenFaces, principalComponents = getEigenFaces(pca, imgs)
# plt.imshow(eigenFaces[0], cmap=plt.cm.gray)
# plt.show()
lowDimRepresentation, sameDimRepresentation = \
reduceImageToLowerDimensions(principalComponents, imgs[0])
plt.imshow(imgs[0], cmap=plt.cm.gray)
plt.show()
image2D = vectorToImage(sameDimRepresentation, imgs[0].shape)
plt.imshow(image2D, cmap=plt.cm.gray)
plt.show()
print "done"
if __name__ == '__main__':
main()
|
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ mypy attrs packaging rich ])
#
# This script downloads Home Assistant's source tarball.
# Inside the homeassistant/components directory, each integration has an associated manifest.json,
# specifying required packages and other integrations it depends on:
#
# {
# "requirements": [ "package==1.2.3" ],
# "dependencies": [ "component" ]
# }
#
# By parsing the files, a dictionary mapping integrations to requirements and dependencies is created.
# For all of these requirements and the dependencies' requirements,
# nixpkgs' python3Packages are searched for appropriate names.
# Then, a Nix attribute set mapping integration name to dependencies is created.
import json
import os
import pathlib
import re
import subprocess
import sys
import tarfile
import tempfile
from functools import reduce
from io import BytesIO
from typing import Dict, Optional, Set, Any
from urllib.request import urlopen
from packaging import version as Version
from rich.console import Console
from rich.table import Table
COMPONENT_PREFIX = "homeassistant.components"
PKG_SET = "home-assistant.python.pkgs"
# If some requirements are matched by multiple or no Python packages, the
# following can be used to choose the correct one
PKG_PREFERENCES = {
"youtube_dl": "youtube-dl-light",
"tensorflow": "tensorflow",
"fiblary3": "fiblary3-fork", # https://github.com/home-assistant/core/issues/66466
}
def run_mypy() -> None:
cmd = ["mypy", "--ignore-missing-imports", __file__]
print(f"$ {' '.join(cmd)}")
subprocess.run(cmd, check=True)
def get_version():
with open(os.path.dirname(sys.argv[0]) + "/default.nix") as f:
# A version consists of digits, dots, and possibly a "b" (for beta)
m = re.search('hassVersion = "([\\d\\.b]+)";', f.read())
return m.group(1)
def parse_components(version: str = "master"):
components = {}
components_with_tests = []
with tempfile.TemporaryDirectory() as tmp:
with urlopen(
f"https://github.com/home-assistant/home-assistant/archive/{version}.tar.gz"
) as response:
tarfile.open(fileobj=BytesIO(response.read())).extractall(tmp)
# Use part of a script from the Home Assistant codebase
core_path = os.path.join(tmp, f"core-{version}")
for entry in os.scandir(os.path.join(core_path, "tests/components")):
if entry.is_dir():
components_with_tests.append(entry.name)
sys.path.append(core_path)
from script.hassfest.model import Integration
integrations = Integration.load_dir(
pathlib.Path(
os.path.join(core_path, "homeassistant/components")
)
)
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.disabled:
components[domain] = integration.manifest
return components, components_with_tests
# Recursively get the requirements of a component and its dependencies
def get_reqs(components: Dict[str, Dict[str, Any]], component: str, processed: Set[str]) -> Set[str]:
requirements = set(components[component].get("requirements", []))
deps = components[component].get("dependencies", [])
deps.extend(components[component].get("after_dependencies", []))
processed.add(component)
for dependency in deps:
if dependency not in processed:
requirements.update(get_reqs(components, dependency, processed))
return requirements
def dump_packages() -> Dict[str, Dict[str, str]]:
# Store a JSON dump of Nixpkgs' python3Packages
output = subprocess.check_output(
[
"nix-env",
"-f",
os.path.dirname(sys.argv[0]) + "/../../..",
"-qa",
"-A",
PKG_SET,
"--arg", "config", "{ allowAliases = false; }",
"--json",
]
)
return json.loads(output)
def name_to_attr_path(req: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]:
if req in PKG_PREFERENCES:
return f"{PKG_SET}.{PKG_PREFERENCES[req]}"
attr_paths = []
names = [req]
# E.g. python-mpd2 is actually called python3.6-mpd2
# instead of python-3.6-python-mpd2 inside Nixpkgs
if req.startswith("python-") or req.startswith("python_"):
names.append(req[len("python-") :])
for name in names:
# treat "-" and "_" equally
name = re.sub("[-_]", "[-_]", name)
# python(minor).(major)-(pname)-(version or unstable-date)
# we need the version qualifier, or we'll have multiple matches
# (e.g. pyserial and pyserial-asyncio when looking for pyserial)
pattern = re.compile(f"^python\\d\\.\\d-{name}-(?:\\d|unstable-.*)", re.I)
for attr_path, package in packages.items():
if pattern.match(package["name"]):
attr_paths.append(attr_path)
# Let's hope there's only one derivation with a matching name
assert len(attr_paths) <= 1, f"{req} matches more than one derivation: {attr_paths}"
if attr_paths:
return attr_paths[0]
else:
return None
def get_pkg_version(package: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]:
pkg = packages.get(f"{PKG_SET}.{package}", None)
if not pkg:
return None
return pkg["version"]
def main() -> None:
packages = dump_packages()
version = get_version()
print("Generating component-packages.nix for version {}".format(version))
components, components_with_tests = parse_components(version=version)
build_inputs = {}
outdated = {}
for component in sorted(components.keys()):
attr_paths = []
missing_reqs = []
reqs = sorted(get_reqs(components, component, set()))
for req in reqs:
# Some requirements are specified by url, e.g. https://example.org/foobar#xyz==1.0.0
# Therefore, if there's a "#" in the line, only take the part after it
req = req[req.find("#") + 1 :]
name, required_version = req.split("==", maxsplit=1)
# Remove extra_require from name, e.g. samsungctl instead of
# samsungctl[websocket]
if name.endswith("]"):
name = name[:name.find("[")]
attr_path = name_to_attr_path(name, packages)
if our_version := get_pkg_version(name, packages):
if Version.parse(our_version) < Version.parse(required_version):
outdated[name] = {
'wanted': required_version,
'current': our_version
}
if attr_path is not None:
# Add attribute path without "python3Packages." prefix
attr_paths.append(attr_path[len(PKG_SET + ".") :])
else:
missing_reqs.append(name)
else:
build_inputs[component] = (attr_paths, missing_reqs)
with open(os.path.dirname(sys.argv[0]) + "/component-packages.nix", "w") as f:
f.write("# Generated by parse-requirements.py\n")
f.write("# Do not edit!\n\n")
f.write("{\n")
f.write(f' version = "{version}";\n')
f.write(" components = {\n")
for component, deps in build_inputs.items():
available, missing = deps
f.write(f' "{component}" = ps: with ps; [')
if available:
f.write(" " + " ".join(available))
f.write(" ];")
if len(missing) > 0:
f.write(f" # missing inputs: {' '.join(missing)}")
f.write("\n")
f.write(" };\n")
f.write(" # components listed in tests/components for which all dependencies are packaged\n")
f.write(" supportedComponentsWithTests = [\n")
for component, deps in build_inputs.items():
available, missing = deps
if len(missing) == 0 and component in components_with_tests:
f.write(f' "{component}"' + "\n")
f.write(" ];\n")
f.write("}\n")
supported_components = reduce(lambda n, c: n + (build_inputs[c][1] == []),
components.keys(), 0)
total_components = len(components)
print(f"{supported_components} / {total_components} components supported, "
f"i.e. {supported_components / total_components:.2%}")
if outdated:
table = Table(title="Outdated dependencies")
table.add_column("Package")
table.add_column("Current")
table.add_column("Wanted")
for package, version in sorted(outdated.items()):
table.add_row(package, version['current'], version['wanted'])
console = Console()
console.print(table)
if __name__ == "__main__":
run_mypy()
main()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops.distributions import bernoulli
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"SampleEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
"InferenceHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Interface for implementing sampling in seq2seq decoders.
Helper instances are used by `BasicDecoder`.
"""
@abc.abstractproperty
def batch_size(self):
"""Batch size of tensor returned by `sample`.
Returns a scalar int32 tensor.
"""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractproperty
def sample_ids_shape(self):
"""Shape of tensor returned by `sample`, excluding the batch dimension.
Returns a `TensorShape`.
"""
raise NotImplementedError("sample_ids_shape has not been implemented")
@abc.abstractproperty
def sample_ids_dtype(self):
"""DType of tensor returned by `sample`.
Returns a DType.
"""
raise NotImplementedError("sample_ids_dtype has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
sample_ids_shape=None, sample_ids_dtype=None):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
`int32`, the shape of each value in the `sample_ids` batch. Defaults to
a scalar.
sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
self._sample_ids_shape = tensor_shape.TensorShape(sample_ids_shape or [])
self._sample_ids_dtype = sample_ids_dtype or dtypes.int32
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_ids_shape
@property
def sample_ids_dtype(self):
return self._sample_ids_dtype
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sampler = bernoulli.Bernoulli(
probs=self._sampling_probability, dtype=dtypes.bool)
select_sample = select_sampler.sample(
sample_shape=self.batch_size, seed=self._scheduling_seed)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
gen_array_ops.fill([self.batch_size], -1))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
inputs_not_sampling = array_ops.gather_nd(
base_next_inputs, where_not_sampling)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_inputs_fn=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_inputs_fn: (Optional) callable to apply to the RNN outputs to create
the next input when sampling. If `None` (default), the RNN outputs will
be used as the next inputs.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
self._next_inputs_fn = next_inputs_fn
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return sampler.sample(sample_shape=self.batch_size, seed=self._seed)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
sample_ids = math_ops.cast(sample_ids, dtypes.bool)
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_inputs_fn is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_inputs_fn(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
no_samples = math_ops.logical_not(math_ops.reduce_any(sample_ids))
next_inputs = control_flow_ops.cond(
math_ops.logical_or(all_finished, no_samples),
lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tensor_shape.TensorShape([])
@property
def sample_ids_dtype(self):
return dtypes.int32
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.argmax(outputs, axis=-1, output_type=dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class SampleEmbeddingHelper(GreedyEmbeddingHelper):
"""A helper for use during inference.
Uses sampling (from a distribution) instead of argmax and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token,
softmax_temperature=None, seed=None):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`. The returned tensor
will be passed to the decoder input.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
softmax_temperature: (Optional) `float32` scalar, value to divide the
logits by before computing the softmax. Larger values (above 1.0) result
in more random samples, while smaller values push the sampling
distribution towards the argmax. Must be strictly greater than 0.
Defaults to 1.0.
seed: (Optional) The sampling seed.
Raises:
ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
scalar.
"""
super(SampleEmbeddingHelper, self).__init__(
embedding, start_tokens, end_token)
self._softmax_temperature = softmax_temperature
self._seed = seed
def sample(self, time, outputs, state, name=None):
"""sample for SampleEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, we sample instead of argmax (greedy).
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
if self._softmax_temperature is None:
logits = outputs
else:
logits = outputs / self._softmax_temperature
sample_id_sampler = categorical.Categorical(logits=logits)
sample_ids = sample_id_sampler.sample(seed=self._seed)
return sample_ids
class InferenceHelper(Helper):
"""A helper to use during inference with a custom sampling function."""
def __init__(self, sample_fn, sample_shape, sample_dtype,
start_inputs, end_fn, next_inputs_fn=None):
"""Initializer.
Args:
sample_fn: A callable that takes `outputs` and emits tensor `sample_ids`.
sample_shape: Either a list of integers, or a 1-D Tensor of type `int32`,
the shape of the each sample in the batch returned by `sample_fn`.
sample_dtype: the dtype of the sample returned by `sample_fn`.
start_inputs: The initial batch of inputs.
end_fn: A callable that takes `sample_ids` and emits a `bool` vector
shaped `[batch_size]` indicating whether each sample is an end token.
next_inputs_fn: (Optional) A callable that takes `sample_ids` and returns
the next batch of inputs. If not provided, `sample_ids` is used as the
next batch of inputs.
"""
self._sample_fn = sample_fn
self._end_fn = end_fn
self._sample_shape = tensor_shape.TensorShape(sample_shape)
self._sample_dtype = sample_dtype
self._next_inputs_fn = next_inputs_fn
self._batch_size = array_ops.shape(start_inputs)[0]
self._start_inputs = ops.convert_to_tensor(
start_inputs, name="start_inputs")
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return self._sample_shape
@property
def sample_ids_dtype(self):
return self._sample_dtype
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
del time, state # unused by sample
return self._sample_fn(outputs)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
del time, outputs # unused by next_inputs
if self._next_inputs_fn is None:
next_inputs = sample_ids
else:
next_inputs = self._next_inputs_fn(sample_ids)
finished = self._end_fn(sample_ids)
return (finished, next_inputs, state)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013 Andreas Vogel [email protected]
# parts of this software are based on tiler_tools (...)
# the license terms (see below) apply to the complete software the same way
#
###############################################################################
# Copyright (c) 2011, Vadim Shlyakhov
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import xml.sax as sax
from optparse import OptionParser
import math
import traceback
info="""
parse a directory hierarchy created with mobac (output OSMTracker) and the associated mobac profile.
and create an avnav.xml file
The directory structure must be z/x/y.png. Tiles must be 256x256 (no check)
Tile numbering is expected to be y=0 upper left.
When reading the profile we try to map each of the "layers" found inside to one of our layers.
This requires to be carefull when creating them, to always use the same min/max zoom for a group of them
as we will directly move them into one of our layers.
"""
OVERVIEW="avnav.xml"
#an xml description of the layers we generated - following the TMS spec
overview_xml='''<?xml version="1.0" encoding="UTF-8" ?>
<TileMapService version="1.0.0" >
<Title>avnav tile map service</Title>
<TileMaps>
%(tilemaps)s
</TileMaps>
</TileMapService>
'''
overview_tilemap_xml='''
<TileMap
title="%(title)s"
srs="OSGEO:41001"
profile="%(profile)s"
href="%(url)s"
minzoom="%(minZoom)d"
maxzoom="%(maxZoom)d">
%(bounding)s
<TileFormat width="%(tileSize)d" height="%(tileSize)d" mime-type="x-png" extension="png" %(zoomOffset)s/>
%(layerboundings)s
</TileMap>
'''
boundingbox_xml='''
<BoundingBox minlon="%(minlon).11G" minlat="%(minlat).11G" maxlon="%(maxlon).11G" maxlat="%(maxlat).11G"
title="%(title)s"/>
'''
zoom_boundings_entry='''
<BoundingBox minx="%(minx)s" maxx="%(maxx)s" miny="%(miny)s" maxy="%(maxy)s">
</BoundingBox>
'''
zoom_boundings_zoom='''
<ZoomBoundings zoom="%(zoom)s">
%(boundings)s
</ZoomBoundings>
'''
zoom_boundings='''
<LayerZoomBoundings>
%(boundings)s
</LayerZoomBoundings>
'''
options=None
upzoom=1 #how many additional layers to be created for a single source gemf file
def log(s):
print("LOG: %s"%(s,))
def debug(num,txt):
if (num <= options.verbose):
print("DEBUG %s"%(txt,))
#convert tile numbers to lat/lon
#see:http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#X_and_Y
#This returns the NW-corner of the square
def num2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = math.atan(math.sinh(math.pi * (1 - 2 * ytile / n)))
lat_deg = math.degrees(lat_rad)
return (lat_deg, lon_deg)
class Tileset(object):
def __init__(self,name,zoom,minx,miny,maxx,maxy):
self.name=name
self.zoom=zoom
self.minx=minx
self.miny=miny
self.maxx=maxx
self.maxy=maxy
class Tilegroup(object):
def __init__(self,name):
self.elements=[]
self.name=name
self.minzoom=-1
self.maxzoom=0
#boundings is zoom,name,minx,miny,maxx,maxy (tilenumbers)
def addElement(self,element):
if self.minzoom == -1 or element.zoom < self.minzoom:
self.minzoom=element.zoom
if element.zoom > self.maxzoom:
self.maxzoom=element.zoom
self.elements.append(element)
def getMaxZoomElements(self):
rt=[]
for el in self.elements:
if el.zoom==self.maxzoom:
rt.append(el)
return rt
def getZoomElements(self,zoom):
rt=[]
for el in self.elements:
if el.zoom==zoom:
rt.append(el)
return rt
class Layer(object):
def __init__(self,name,minzoom,maxzoom,baseurl=""):
self.tlist=[]
self.minzoom=minzoom
self.maxzoom=maxzoom
self.name=name
self.baseurl=baseurl
#add ad group if their minzoom/maxzoom fits
#return true/false
def addEntry(self,tilegroup):
if tilegroup.minzoom != self.minzoom:
return False
if tilegroup.maxzoom != self.maxzoom:
return False
self.tlist.append(tilegroup)
return True
def getMaxZoomElements(self):
rt=[]
for tg in self.tlist:
rt+=tg.getMaxZoomElements()
return rt
def getZoomElements(self,zoom):
rt=[]
for tg in self.tlist:
rt+=tg.getZoomElements(zoom)
return rt
def createBoundingsXml(self, useMax=False):
upperOffset=0.999 #using this for the max value to avoid some rounding errors
startzoom = None
if useMax:
startzoom = self.maxzoom
else:
startzoom = self.minzoom
minlat=85
maxlat=-85
minlon=180
maxlon=-180
for zoom in range(startzoom, self.maxzoom + 1):
mz = self.getZoomElements(zoom)
for el in mz:
# minx/miny: upper left corner - minlon,maxlat
# maxx/maxy: lower right - maxlon,minlat
# try to avoid always including the next tiles by subtracting 1/1000...
elmaxlat, elminlon = num2deg(el.minx, el.miny, el.zoom)
elminlat, elmaxlon = num2deg(el.maxx + upperOffset, el.maxy + upperOffset, el.zoom)
if elmaxlat > maxlat:
maxlat=elmaxlat
if elmaxlon > maxlon:
maxlon=elmaxlon
if elminlat < minlat:
minlat=elminlat
if elminlon < minlon:
minlon=elminlon
boundings = {'minlon': minlon,
'minlat': minlat,
'maxlon': maxlon,
'maxlat': maxlat,
'title': 'bounding'}
return boundingbox_xml % boundings
#----------------------------
#sax reader for overview
class ListHandler(sax.handler.ContentHandler):
def __init__(self,layerlist):
self.eltype=None
self.layerlist=layerlist
self.currentGroup=None
self.startFound=False
def startElement(self, name, attrs):
self.eltype=name
if name=="atlas":
self.startFound=True
return
if not self.startFound:
return
if name == "Layer":
self.currentGroup=Tilegroup(attrs['name'])
elif name == "Map":
assert self.currentGroup is not None, "invalid xml, missing Layer before Map"
maxtile = attrs["maxTileCoordinate"]
mintile = attrs["minTileCoordinate"]
zoom = int(attrs["zoom"])
maxta=maxtile.split('/')
minta=mintile.split('/')
assert len(maxta) == 2, "invalid format for maxTile %s"%(maxtile,)
assert len(minta) == 2, "invalid format for minTile %s"%(mintile,)
maxx=int(maxta[0])//256
maxy=int(maxta[1])//256
minx=int(minta[0])//256
miny=int(minta[1])//256
self.currentGroup.addElement(Tileset(attrs['name'], zoom, minx, miny, maxx, maxy))
def endElement(self, name):
if name == "Layer":
#try to insert layer into list
rt=False
for layer in self.layerlist:
rt=layer.addEntry(self.currentGroup)
if rt:
log("added entry %s to layer %s"%(self.currentGroup.name,layer.name))
break
if not rt:
name="Layer-%d:%d"%(self.currentGroup.minzoom,self.currentGroup.maxzoom)
log("creating new layer %s for group %s (%d:%d)"%(name,self.currentGroup.name,self.currentGroup.minzoom,self.currentGroup.maxzoom))
self.layerlist.append(Layer(name,self.currentGroup.minzoom,self.currentGroup.maxzoom))
self.layerlist[-1].addEntry(self.currentGroup)
self.currentGroup=None
def characters(self, content):
pass
def createTileMapForLayer(layer,name,zOffset,tileSize,zoomBoundings):
layerBoundings=""
if zoomBoundings is not None:
for z in list(zoomBoundings.keys()):
zoomBoundingsString=""
for e in zoomBoundings[z]:
zoomBoundingsString+=zoom_boundings_entry % e
layerBoundings+=zoom_boundings_zoom % {'zoom':z,
'boundings':zoomBoundingsString
}
tilemap=overview_tilemap_xml % {
"profile": "zxy-mercator",
"title":name,
"url":layer.baseurl,
"minZoom":layer.minzoom,
"maxZoom":layer.maxzoom,
"bounding":layer.createBoundingsXml(),
"layerboundings":zoom_boundings%{'boundings':layerBoundings},
"tileSize":tileSize,
"zoomOffset":'zOffset="%d"'%(zOffset,)
}
return tilemap
def createOverview(layerlist,zoomBoundings):
tilemaps=""
for layer in layerlist:
tilemaps+=createTileMapForLayer(layer,layer.name,256,0,zoomBoundings)
overviewstr=overview_xml % {
"tilemaps":tilemaps,
}
return overviewstr
#we create n pseudo-layers
#with each having an increased tile size...
def createOverviewSingleLayer(layer,zoomBoundings,options):
tilemaps=""
boundings=""
zOffset=0
tileSize=256
layerBoundings=""
numupzoom=upzoom
if options is not None and options.get('upzoom') is not None:
numupzoom=options['upzoom']
#currently our front end does not really handle any upzoom
#for idx in range(numupzoom+1):
for idx in range(1):
tilemaps+=createTileMapForLayer(layer,layer.name if idx == 0 else "%s-%d"%(layer.name,idx),
zOffset,tileSize,zoomBoundings)
zOffset+=1
tileSize*=2
overviewstr=overview_xml % {
"tilemaps":tilemaps,
}
return overviewstr
def writeOverview(overviewfname,layerlist):
overviewstr=createOverview(layerlist,None)
with open(overviewfname,"w",encoding='utf-8') as f:
f.write(overviewstr)
log(overviewfname+" written, successfully finished")
#create an avnav.xml string from a GEMF file
#we assume that our GEMF file is somehow complete - i.e. if there is a range
#contained in a higher zoomlevel, we assume that it is also there in lower ones
#so we compute the enclosing ranges only from the highest level for each source
#the data is expected in the format of getSources from GemfFile (an array of sources each containing an array of ranges)
#we have 2 different "styles" of a GEMF file:
#multi source - we assume that we generated this and create one layer from each source, calculating
# a bounding box
#single source - we assume that someone else created this
# we generate n "pseudo" layers - each having a different zOffset
# and we directly map the ranges to layerZoomBoundings
# for all the pseudo-layers they are the same...
def getGemfInfo(data,options):
layerlist=[]
if len(data) > 1:
log("creating multi source gemf overview (%d sources)"%(len(data),))
for src in data:
tilegroup=Tilegroup(src['name'])
zoomBoundings={}
for rdata in src['ranges']:
zoom=rdata['zoom']
entry={"minx":rdata['xmin'],
"miny": rdata['ymin'],
"maxx":rdata['xmax'],
"maxy":rdata['ymax']}
if zoomBoundings.get(zoom) is None:
zoomBoundings[zoom]=[]
zoomBoundings[zoom].append(entry)
tileset=Tileset("gemfrange",rdata['zoom'],rdata['xmin'],rdata['ymin'],rdata['xmax'],rdata['ymax'])
tilegroup.addElement(tileset)
layer=Layer(src['name'],tilegroup.minzoom,tilegroup.maxzoom,src['name'])
layer.addEntry(tilegroup)
layerlist.append({'layer':layer,'zoomBoundings':zoomBoundings})
#sort layerlist (srclist) by maxzoom
layerlist.sort(key=lambda x: x['layer'].maxzoom,reverse=True)
tilemaps=""
for l in layerlist:
layer=l['layer']
zoomBoundings=l['zoomBoundings']
tilemaps+=createTileMapForLayer(layer,layer.name,0,256,zoomBoundings)
return overview_xml % {
"tilemaps":tilemaps,
}
else:
#single layer
log("creating single source gemf overview")
src=data[0]
zoomBoundings={}
tilegroup=Tilegroup(src['name'])
for rdata in src['ranges']:
zoom=rdata['zoom']
entry={"minx":rdata['xmin'],
"miny": rdata['ymin'],
"maxx":rdata['xmax'],
"maxy":rdata['ymax']}
if zoomBoundings.get(zoom) is None:
zoomBoundings[zoom]=[]
zoomBoundings[zoom].append(entry)
tileset=Tileset("gemfrange",rdata['zoom'],rdata['xmin'],rdata['ymin'],rdata['xmax'],rdata['ymax'])
tilegroup.addElement(tileset)
layer=Layer(src['name'],tilegroup.minzoom,tilegroup.maxzoom,src['name'])
layer.addEntry(tilegroup)
rt=createOverviewSingleLayer(layer,zoomBoundings,options)
return rt
#parse an overview file and return the overview as string
def parseXml(xmlfile,baseurl=""):
log("parsing xml file %s"%(xmlfile,))
layerlist=[]
parser=sax.parse(xmlfile,ListHandler(layerlist))
if len(layerlist) > 0:
if baseurl != "":
for layer in layerlist:
layer.baseurl=baseurl
log("created %d layers from %s"%(len(layerlist),xmlfile))
layerlist.sort(key=lambda x: x.maxzoom,reverse=True)
return createOverview(layerlist,None)
else:
log("empty layerlist for %s"%(xmlfile,))
return None
def parseAndWrite(xmlfile,ovfile):
log("parsing xml file %s"%(xmlfile,))
layerlist=[]
parser=sax.parse(xmlfile,ListHandler(layerlist))
if len(layerlist) > 0:
log("created %d layers from %s"%(len(layerlist),xmlfile))
layerlist.sort(key=lambda x: x.maxzoom,reverse=True)
try:
writeOverview(ovfile,layerlist)
return True
except:
log("error while creating %s:%s"%(ovfile,traceback.format_exc()))
else:
log("xml file %s did not contain any layers"%(xmlfile,))
return False
def main(argv):
global options, layerlist
usage="usage: %prog <options> basedir [mobacprofile]"
parser = OptionParser(
usage = usage,
version="1.0",
description='create overview for avnav')
parser.add_option("-d", "--debug", dest="verbose")
parser.add_option("-i", "--ignore", dest="ignore", action="store_const",const=1)
(options, args) = parser.parse_args(argv[1:])
if options.verbose is None:
options.verbose=0
else:
options.verbose=int(options.verbose)
assert len(args) >=1 ,usage
filename=None
outdir=args[0]
ovfile=os.path.join(outdir,OVERVIEW)
if len(args) < 2:
#check for xml files in the outdir being newer as avnav.xml
assert os.path.isdir(outdir), "output directory %s does not exist"%(outdir,)
avnavTs=None
if os.path.isfile(ovfile):
avnavTs=os.stat(ovfile).st_mtime
odfiles=os.listdir(outdir)
foundFile=False
for ofile in odfiles:
if ofile == OVERVIEW:
continue
if ofile.lower().endswith(".xml"):
xmlfile=os.path.join(outdir,ofile)
xmlTs=os.stat(xmlfile).st_mtime
if avnavTs is None or xmlTs > avnavTs:
foundFile=True
rt=parseAndWrite(xmlfile, ovfile)
if rt:
return 0
if foundFile or not options.ignore:
log("ERROR: did not find any suitable mobac profile in %s"%(outdir,))
return 1
else:
log("did not find any file to update %s"%(ovfile))
return 2
else:
#filename given on commandline
rt=parseAndWrite(args[1], ovfile)
if rt:
return 0
else:
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Deprecated! Use WebHdfs instead.
Only some utils and Hdfs are still used.
Interfaces for Hadoop filesystem access via the HADOOP-4707 Thrift APIs.
"""
import errno
import logging
import os
import posixpath
import random
import stat as statconsts
import subprocess
import urlparse
import threading
from thrift.transport import TTransport
from django.utils.encoding import smart_str, force_unicode
from django.utils.translation import ugettext as _
from desktop.lib import thrift_util, i18n
from desktop.lib.conf import validate_port
from hadoop.api.hdfs import Namenode, Datanode
from hadoop.api.hdfs.constants import QUOTA_DONT_SET, QUOTA_RESET
from hadoop.api.common.ttypes import RequestContext, IOException
import hadoop.conf
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.exceptions import PermissionDeniedException
LOG = logging.getLogger(__name__)
DEFAULT_USER = "webui"
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
# The buffer size of the pipe to hdfs -put during upload
WRITE_BUFFER_SIZE = 128*1024 # 128K
# Class that we translate into PermissionDeniedException
HADOOP_ACCESSCONTROLEXCEPTION = "org.apache.hadoop.security.AccessControlException"
# Timeout for thrift calls to NameNode
NN_THRIFT_TIMEOUT = 15
DN_THRIFT_TIMEOUT = 3
# Encoding used by HDFS namespace
HDFS_ENCODING = 'utf-8'
def encode_fs_path(path):
"""encode_fs_path(path) -> byte string in utf8"""
return smart_str(path, HDFS_ENCODING, errors='strict')
def decode_fs_path(path):
"""decode_fs_path(bytestring) -> unicode path"""
return force_unicode(path, HDFS_ENCODING, errors='strict')
def test_fs_configuration(fs_config, hadoop_bin_conf):
"""Test FS configuration. Returns list of (confvar, error)."""
TEST_FILE = '/tmp/.hue_config_test.%s' % (random.randint(0, 9999999999))
res = [ ]
res.extend(validate_port(fs_config.NN_THRIFT_PORT))
res.extend(validate_port(fs_config.NN_HDFS_PORT))
if res:
return res
# Check thrift plugin
try:
fs = HadoopFileSystem.from_config(
fs_config, hadoop_bin_path=hadoop_bin_conf.get())
fs.setuser(fs.superuser)
ls = fs.listdir('/')
except TTransport.TTransportException:
msg = 'Failed to contact Namenode plugin at %s:%s.' % \
(fs_config.NN_HOST.get(), fs_config.NN_THRIFT_PORT.get())
LOG.exception(msg)
res.append((fs_config, msg))
return res
except (IOError, IOException):
msg = 'Failed to see HDFS root directory at %s. Please check HDFS configuration.' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config, msg))
return res
if 'tmp' not in ls:
return res
# Check nn port (via upload)
try:
w_file = fs.open(TEST_FILE, 'w')
except OSError, ex:
msg = 'Failed to execute Hadoop (%s)' % (hadoop_bin_conf.get(),)
LOG.exception(msg)
res.append((hadoop_bin_conf, msg))
return res
try:
try:
w_file.write('hello world')
w_file.close()
except IOError:
msg = 'Failed to upload files using %s' % (fs.uri,)
LOG.exception(msg)
res.append((fs_config.NN_HDFS_PORT, msg))
return res
# Check dn plugin (via read)
try:
r_file = fs.open(TEST_FILE, 'r')
r_file.read()
except Exception:
msg = 'Failed to read file. Are all datanodes configured with the HUE plugin?'
LOG.exception(msg)
res.append((fs_config, msg))
finally:
# Cleanup. Ignore if file not found.
try:
if fs.exists(TEST_FILE):
fs.remove(TEST_FILE)
except Exception, ex:
LOG.error('Failed to cleanup test file "%s:%s": %s' % (fs.uri, TEST_FILE, ex))
return res
def _coerce_exceptions(function):
"""
Decorator that causes exceptions thrown by the decorated function
to be coerced into generic exceptions from the hadoop.fs.exceptions
module.
"""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except IOException, e:
e.msg = force_unicode(e.msg, errors='replace')
e.stack = force_unicode(e.stack, errors='replace')
LOG.exception("Exception in Hadoop FS call " + function.__name__)
if e.clazz == HADOOP_ACCESSCONTROLEXCEPTION:
raise PermissionDeniedException(e.msg, e)
else:
raise
return wrapper
class Hdfs(object):
"""
An abstract HDFS proxy
"""
@staticmethod
def basename(path):
return posixpath.basename(path)
@staticmethod
def dirname(path):
return posixpath.dirname(path)
@staticmethod
def split(path):
return posixpath.split(path)
@staticmethod
def join(first, *comp_list):
return posixpath.join(first, *comp_list)
@staticmethod
def abspath(path):
return posixpath.abspath(path)
@staticmethod
def normpath(path):
res = posixpath.normpath(path)
# Python normpath() doesn't eliminate leading double slashes
if res.startswith('//'):
return res[1:]
return res
@staticmethod
def parent_path(path):
return Hdfs.join(path, "..")
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
i = url.find('://')
if i == -1:
# Not found. Treat the entire argument as an HDFS path
return ('hdfs', '', normpath(url), '', '')
schema = url[:i]
if schema not in ('hdfs', 'viewfs'):
# Default to standard for non-hdfs
return urlparse.urlsplit(url)
url = url[i+3:]
i = url.find('/')
if i == -1:
# Everything is netloc. Assume path is root.
return (schema, url, '/', '', '')
netloc = url[:i]
path = url[i:]
return (schema, netloc, normpath(path), '', '')
def listdir_recursive(self, path, glob=None):
"""
listdir_recursive(path, glob=None) -> [ entry names ]
Get directory entry names without stats, recursively.
"""
paths = [path]
while paths:
path = paths.pop()
if self.isdir(path):
hdfs_paths = self.listdir_stats(path, glob)
paths[:0] = [x.path for x in hdfs_paths]
yield path
def create_home_dir(self, home_path=None):
if home_path is None:
home_path = self.get_home_dir()
from useradmin.conf import HOME_DIR_PERMISSIONS
mode = int(HOME_DIR_PERMISSIONS.get(), 8)
if not self.exists(home_path):
user = self.user
try:
try:
self.setuser(self.superuser)
self.mkdir(home_path)
self.chmod(home_path, mode)
self.chown(home_path, user, user)
except IOError:
msg = 'Failed to create home dir ("%s") as superuser %s' % (home_path, self.superuser)
LOG.exception(msg)
raise
finally:
self.setuser(user)
def copyFromLocal(self, local_src, remote_dst, mode=0755):
remote_dst = remote_dst.endswith(posixpath.sep) and remote_dst[:-1] or remote_dst
local_src = local_src.endswith(posixpath.sep) and local_src[:-1] or local_src
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
(basename, filename) = os.path.split(local_src)
self._copy_file(local_src, self.isdir(remote_dst) and self.join(remote_dst, filename) or remote_dst)
def _copy_dir(self, local_dir, remote_dir, mode=0755):
self.mkdir(remote_dir, mode=mode)
for f in os.listdir(local_dir):
local_src = os.path.join(local_dir, f)
remote_dst = self.join(remote_dir, f)
if os.path.isdir(local_src):
self._copy_dir(local_src, remote_dst, mode)
else:
self._copy_file(local_src, remote_dst)
def _copy_file(self, local_src, remote_dst, chunk_size=1024 * 1024 * 64):
if os.path.isfile(local_src):
if self.exists(remote_dst):
LOG.info(_('%(remote_dst)s already exists. Skipping.') % {'remote_dst': remote_dst})
return
else:
LOG.info(_('%(remote_dst)s does not exist. Trying to copy.') % {'remote_dst': remote_dst})
src = file(local_src)
try:
try:
self.create(remote_dst, permission=0755)
chunk = src.read(chunk_size)
while chunk:
self.append(remote_dst, chunk)
chunk = src.read(chunk_size)
LOG.info(_('Copied %s -> %s.') % (local_src, remote_dst))
except:
LOG.exception(_('Copying %s -> %s failed.') % (local_src, remote_dst))
raise
finally:
src.close()
else:
LOG.info(_('Skipping %s (not a file).') % local_src)
@_coerce_exceptions
def mktemp(self, subdir='', prefix='tmp', basedir=None):
"""
mktemp(prefix) -> <temp_dir or basedir>/<subdir>/prefix.<rand>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
while True:
name = prefix + '.' + str(random.getrandbits(RANDOM_BITS))
candidate = self.join(base, name)
if not self.exists(candidate):
return candidate
def mkswap(self, filename, subdir='', suffix='swp', basedir=None):
"""
mkswap(filename, suffix) -> <temp_dir or basedir>/<subdir>/filename.<suffix>
Return a unique temporary filename with prefix in the cluster's temp dir.
"""
RANDOM_BITS = 64
base = self.join(basedir or self._temp_dir, subdir)
if not self.isdir(base):
self.mkdir(base)
candidate = self.join(base, "%s.%s" % (filename, suffix))
return candidate
def exists(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def do_as_user(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'do_as_user'})
def create(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'exists'})
def append(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'append'})
def mkdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'mkdir'})
def isdir(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'isdir'})
def listdir_stats(self):
raise NotImplementedError(_("%(function)s has not been implemented.") % {'function': 'listdir_stats'})
"""
Deprecated! Use WebHdfs instead
"""
class HadoopFileSystem(Hdfs):
"""
Implementation of Filesystem APIs through Thrift to a Hadoop cluster.
"""
def __init__(self, host, thrift_port, hdfs_port=8020,
nn_kerberos_principal="hdfs",
dn_kerberos_principal="hdfs",
security_enabled=False,
hadoop_bin_path="hadoop",
temp_dir='/tmp'):
"""
@param host hostname or IP of the namenode
@param thrift_port port on which the Thrift plugin is listening
@param hdfs_port port on which NameNode IPC is listening
@param hadoop_bin_path path to find the hadoop wrapper script on the
installed system - default is fine if it is in
the user's PATH env
@param temp_dir Temporary directory, for mktemp()
"""
self.host = host
self.thrift_port = thrift_port
self.hdfs_port = hdfs_port
self.security_enabled = security_enabled
self.nn_kerberos_principal = nn_kerberos_principal
self.dn_kerberos_principal = dn_kerberos_principal
self.hadoop_bin_path = hadoop_bin_path
self._resolve_hadoop_path()
self.security_enabled = security_enabled
self._temp_dir = temp_dir
self.nn_client = thrift_util.get_client(
Namenode.Client, host, thrift_port,
service_name="HDFS Namenode HUE Plugin",
use_sasl=security_enabled,
kerberos_principal=nn_kerberos_principal,
timeout_seconds=NN_THRIFT_TIMEOUT)
# The file systems are cached globally. We store
# user information in a thread-local variable so that
# safety can be preserved there.
self.thread_local = threading.local()
self.setuser(DEFAULT_USER)
LOG.debug("Initialized HadoopFS: %s:%d (%s)", host, thrift_port, hadoop_bin_path)
@classmethod
def from_config(cls, fs_config, hadoop_bin_path="hadoop"):
return cls(host=fs_config.NN_HOST.get(),
thrift_port=fs_config.NN_THRIFT_PORT.get(),
hdfs_port=fs_config.NN_HDFS_PORT.get(),
security_enabled=fs_config.SECURITY_ENABLED.get(),
nn_kerberos_principal=fs_config.NN_KERBEROS_PRINCIPAL.get(),
dn_kerberos_principal=fs_config.DN_KERBEROS_PRINCIPAL.get(),
hadoop_bin_path=hadoop_bin_path)
def _get_hdfs_base(self):
return "hdfs://%s:%d" % (self.host, self.hdfs_port) # TODO(todd) fetch the port from the NN thrift
def _resolve_hadoop_path(self):
"""The hadoop_bin_path configuration may be a non-absolute path, in which case
it's checked against $PATH.
If the hadoop binary can't be found anywhere, raises an Exception.
"""
for path_dir in os.getenv("PATH", "").split(os.pathsep):
path = os.path.join(path_dir, self.hadoop_bin_path)
if os.path.exists(path):
self.hadoop_bin_path = os.path.abspath(path)
return
raise OSError(errno.ENOENT, "Hadoop binary (%s) does not exist." % (self.hadoop_bin_path,))
@property
def uri(self):
return self._get_hdfs_base()
@property
def superuser(self):
"""
Retrieves the user that Hadoop considers as
"superuser" by looking at ownership of /.
This is slightly inaccurate.
"""
return self.stats("/")["user"]
def setuser(self, user):
# Hadoop determines the groups the user belongs to on the server side.
self.thread_local.request_context = RequestContext()
if not self.request_context.confOptions:
self.request_context.confOptions = {}
self.thread_local.request_context.confOptions['effective_user'] = user
self.thread_local.user = user
@property
def user(self):
return self.thread_local.user
@property
def groups(self):
return self.thread_local.groups
@property
def request_context(self):
return self.thread_local.request_context
@_coerce_exceptions
def open(self, path, mode="r", *args, **kwargs):
if mode == "w":
return FileUpload(self, path, mode, *args, **kwargs)
return File(self, path, mode, *args, **kwargs)
@_coerce_exceptions
def remove(self, path):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "File not found: %s" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: %s" % path)
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=False)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def mkdir(self, path, mode=0755):
# TODO(todd) there should be a mkdir that isn't mkdirHIER
# (this is mkdir -p I think)
path = encode_fs_path(path)
success = self.nn_client.mkdirhier(self.request_context, normpath(path), mode)
if not success:
raise IOError("mkdir failed")
def _rmdir(self, path, recursive=False):
path = encode_fs_path(path)
stat = self._hadoop_stat(path)
if not stat:
raise IOError(errno.ENOENT, "Directory not found: %s" % (path,))
if not stat.isDir:
raise IOError(errno.EISDIR, "Is not a directory: %s" % (path,))
success = self.nn_client.unlink(
self.request_context, normpath(path), recursive=recursive)
if not success:
raise IOError("Unlink failed")
@_coerce_exceptions
def rmdir(self, path):
return self._rmdir(path)
@_coerce_exceptions
def rmtree(self, path):
return self._rmdir(path, True)
@_coerce_exceptions
def listdir(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self.basename(decode_fs_path(stat.path)) for stat in stats]
@_coerce_exceptions
def listdir_stats(self, path):
path = encode_fs_path(path)
stats = self.nn_client.ls(self.request_context, normpath(path))
return [self._unpack_stat(s) for s in stats]
@_coerce_exceptions
def get_content_summaries(self, paths):
paths = [ normpath(encode_fs_path(path)) for path in paths ]
summaries = self.nn_client.multiGetContentSummary(self.request_context, paths)
def _fix_summary(summary):
summary.path = decode_fs_path(summary.path)
return summary
return [_fix_summary(s) for s in summaries]
@_coerce_exceptions
def rename(self, old, new):
old = encode_fs_path(old)
new = encode_fs_path(new)
success = self.nn_client.rename(
self.request_context, normpath(old), normpath(new))
if not success: #TODO(todd) these functions should just throw if failed
raise IOError("Rename failed")
@_coerce_exceptions
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (old_dir,))
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, "'%s' is not a directory" % (new_dir,))
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(HadoopFileSystem.join(old_dir, dirent),
HadoopFileSystem.join(new_dir, dirent))
@_coerce_exceptions
def exists(self, path):
stat = self._hadoop_stat(path)
return stat is not None
@_coerce_exceptions
def isfile(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return not stat.isDir
@_coerce_exceptions
def isdir(self, path):
stat = self._hadoop_stat(path)
if stat is None:
return False
return stat.isDir
@_coerce_exceptions
def stats(self, path, raise_on_fnf=True):
stat = self._hadoop_stat(path)
if not stat:
if raise_on_fnf:
raise IOError(errno.ENOENT, "File %s not found" % (path,))
else:
return None
ret = self._unpack_stat(stat)
return ret
@_coerce_exceptions
def chmod(self, path, mode):
path = encode_fs_path(path)
self.nn_client.chmod(self.request_context, normpath(path), mode)
@_coerce_exceptions
def chown(self, path, user, group):
path = encode_fs_path(path)
self.nn_client.chown(self.request_context, normpath(path), user, group)
@_coerce_exceptions
def get_namenode_info(self):
(capacity, used, available) = self.nn_client.df(self.request_context)
return dict(
usage=dict(capacity_bytes=capacity,
used_bytes=used,
available_bytes=available),
)
@_coerce_exceptions
def _get_blocks(self, path, offset, length):
"""
Get block locations from the Name Node. Returns an array of Block
instances that might look like:
[ Block(path='/user/todd/motd', genStamp=1001, blockId=5564389078175231298,
nodes=[DatanodeInfo(xceiverCount=1, capacity=37265149952, name='127.0.0.1:50010',
thriftPort=53417, state=1, remaining=18987925504, host='127.0.0.1',
storageID='DS-1238582576-127.0.1.1-50010-1240968238474', dfsUsed=36864)], numBytes=424)]
"""
path = encode_fs_path(path)
blocks = self.nn_client.getBlocks(self.request_context, normpath(path), offset, length)
def _fix_block(blk):
blk.path = decode_fs_path(blk.path)
return blk
return [_fix_block(blk) for blk in blocks]
def _hadoop_stat(self, path):
"""Returns None if file does not exist."""
path = encode_fs_path(path)
try:
stat = self.nn_client.stat(self.request_context, normpath(path))
stat.path = decode_fs_path(stat.path)
return stat
except IOException, ioe:
if ioe.clazz == 'java.io.FileNotFoundException':
return None
raise
@_coerce_exceptions
def _read_block(self, block, offset, len):
"""
Reads a chunk of data from the given block from the first available
datanode that serves it.
@param block a thrift Block object
@param offset offset from the beginning of the block (not file)
@param len the number of bytes to read
"""
errs = []
unipath = block.path
block.path = encode_fs_path(block.path)
try:
for node in block.nodes:
dn_conn = self._connect_dn(node)
try:
try:
data = dn_conn.readBlock(self.request_context, block, offset, len)
return data.data
except Exception, e:
errs.append(e)
finally:
dn_conn.close()
finally:
block.path = unipath
raise IOError("Could not read block %s from any replicas: %s" % (block, repr(errs)))
@_coerce_exceptions
def set_diskspace_quota(self, path, size):
"""
Set the diskspace quota of a given path.
@param path The path to the given hdfs resource
@param size The amount of bytes that a given subtree of files can grow to.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if size < 0:
raise ValueError("The size quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, size)
@_coerce_exceptions
def set_namespace_quota(self, path, num_files):
"""
Set the maximum number of files of a given path.
@param path The path to the given hdfs resource
@param num_files The amount of files that can exist within that subtree.
"""
path = encode_fs_path(path)
if normpath(path) == '/':
raise ValueError('Cannot set quota for "/"')
if num_files < 0:
raise ValueError("The number of files quota should be 0 or positive or unset")
self.nn_client.setQuota(self.request_context, normpath(path), num_files, QUOTA_DONT_SET)
@_coerce_exceptions
def clear_diskspace_quota(self, path):
"""
Remove the diskspace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_DONT_SET, QUOTA_RESET)
@_coerce_exceptions
def clear_namespace_quota(self, path):
"""
Remove the namespace quota at a given path
"""
path = encode_fs_path(path)
self.nn_client.setQuota(self.request_context, normpath(path), QUOTA_RESET, QUOTA_DONT_SET)
@_coerce_exceptions
def get_diskspace_quota(self, path):
"""
Get the current space quota in bytes for disk space. None if it is unset
"""
path = encode_fs_path(path)
space_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).spaceQuota
if space_quota == QUOTA_RESET or space_quota == QUOTA_DONT_SET:
return None
else:
return space_quota
@_coerce_exceptions
def get_namespace_quota(self, path):
"""
Get the current quota in number of files. None if it is unset
"""
path = encode_fs_path(path)
file_count_quota = self.nn_client.getContentSummary(self.request_context, normpath(path)).quota
if file_count_quota == QUOTA_RESET or file_count_quota == QUOTA_DONT_SET:
return None
else:
return file_count_quota
@_coerce_exceptions
def get_usage_and_quota(self, path):
"""
Returns a dictionary with "file_count", "file_quota",
"space_used", and "space_quota". The quotas
may be None.
"""
path = encode_fs_path(path)
summary = self.nn_client.getContentSummary(self.request_context, normpath(path))
ret = dict()
ret["file_count"] = summary.fileCount
ret["space_used"] = summary.spaceConsumed
if summary.quota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["file_quota"] = None
else:
ret["file_quota"] = summary.quota
if summary.spaceQuota in (QUOTA_RESET, QUOTA_DONT_SET):
ret["space_quota"] = None
else:
ret["space_quota"] = summary.spaceQuota
return ret
@_coerce_exceptions
def get_delegation_token(self):
# TODO(atm): The second argument here should really be the Hue kerberos
# principal, which doesn't exist yet. Todd's working on that.
return self.nn_client.getDelegationToken(self.request_context, 'hadoop')
def _connect_dn(self, node):
dn_conf = thrift_util.ConnectionConfig(
Datanode.Client,
node.host,
node.thriftPort,
"HDFS Datanode Thrift",
use_sasl=self.security_enabled,
kerberos_principal=self.dn_kerberos_principal,
timeout_seconds=DN_THRIFT_TIMEOUT)
service, protocol, transport = \
thrift_util.connect_to_thrift(dn_conf)
transport.open()
service.close = lambda: transport.close()
return service
@staticmethod
def _unpack_stat(stat):
"""Unpack a Thrift "Stat" object into a dictionary that looks like fs.stat"""
mode = stat.perms
if stat.isDir:
mode |= statconsts.S_IFDIR
else:
mode |= statconsts.S_IFREG
return {
'path': decode_fs_path(stat.path),
'size': stat.length,
'mtime': stat.mtime / 1000,
'mode': mode,
'user': stat.owner,
'group': stat.group,
'atime': stat.atime
}
@staticmethod
def urlsplit(url):
"""
Take an HDFS path (hdfs://nn:port/foo) or just (/foo) and split it into
the standard urlsplit's 5-tuple.
"""
return Hdfs.urlsplit(url)
def require_open(func):
"""
Decorator that ensures that the file instance isn't closed when the
function is run.
"""
def wrapper(self, *args, **kwargs):
if self.closed:
raise IOError(errno.EBADF, "I/O operation on closed file")
return func(self, *args, **kwargs)
return wrapper
class File(object):
""" Represents an open file on HDFS. """
def __init__(self, fs, path, mode="r", buffering=False):
self.fs = fs
self.path = normpath(path)
self.pos = 0
self.closed = False
self._block_cache = BlockCache()
if buffering or mode != "r":
raise Exception("buffering and write support not yet implemented") # NYI
stat = self._stat()
if stat is None:
raise IOError(errno.ENOENT, "No such file or directory: '%s'" % path)
if stat.isDir:
raise IOError(errno.EISDIR, "Is a directory: '%s'" % path)
#TODO(todd) somehow we need to check permissions here - maybe we need an access() call?
# Minimal context manager implementation.
# See: http://www.python.org/doc/2.5.2/lib/typecontextmanager.html
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False # don't supress exceptions.
@require_open
def seek(self, offset, whence=0):
""" Set the file pointer to the given spot. @see file.seek """
if whence == SEEK_SET:
self.pos = offset
elif whence == SEEK_CUR:
self.pos += offset
elif whence == SEEK_END:
self.pos = self._stat().length + offset
else:
raise IOError(errno.EINVAL, "Invalid argument to seek for whence")
@require_open
def tell(self):
return self.pos
def _get_block(self, pos):
"""Return the Block instance that contains the given offset"""
cached_block = self._block_cache.find_block(pos)
if cached_block:
return cached_block
# Cache "miss" - fetch ahead 500MB worth of blocks
new_blocks = self.fs._get_blocks(self.path, pos, 500*1024*1024)
self._block_cache.insert_new_blocks(new_blocks)
result = self._block_cache.find_block(pos)
if not result:
raise IOError("No block for position %d in file %s" % (pos, self.path))
return result
@require_open
def _read_in_block(self, length=DEFAULT_READ_SIZE):
"""
Tries to read up to length bytes, but will often read fewer, since
a single call will not read across a block boundary.
"""
end_pos = min(self.pos + length, self._stat().length)
# If we're at EOF, return empty string
if end_pos == self.pos:
return ""
block = self._get_block(self.pos)
assert _block_contains_pos(block, self.pos)
assert block.path == self.path
in_block_pos = self.pos - block.startOffset
assert in_block_pos >= 0
in_block_len = min(length, block.numBytes - in_block_pos)
result = self.fs._read_block(block, in_block_pos, in_block_len)
self.pos += len(result)
assert self.pos <= end_pos
return result
@require_open
def read(self, length=DEFAULT_READ_SIZE):
"""
Read the given number of bytes from this file.
If EOF has been reached, returns the empty string.
@param length the number of bytes wanted
"""
result = []
read_so_far = 0
while read_so_far < length:
this_data = self._read_in_block(length - read_so_far)
if this_data == "": # eof
break
read_so_far += len(this_data)
result.append(this_data)
return "".join(result)
def close(self):
self.closed = True
def _stat(self):
if not hasattr(self, "_stat_cache"):
self._stat_cache = self.fs._hadoop_stat(self.path)
return self._stat_cache
class FileUpload(object):
"""A write-only file that supports no seeking and cannot exist prior to
opening.
"""
def __init__(self, fs, path, mode="w", block_size=None):
self.fs = fs
self.closed = False
assert mode == "w"
extra_confs = []
if block_size:
extra_confs.append("-Ddfs.block.size=%d" % block_size)
self.subprocess_cmd = [self.fs.hadoop_bin_path,
"jar",
hadoop.conf.SUDO_SHELL_JAR.get(),
self.fs.user,
"-Dfs.default.name=" + self.fs.uri] + \
extra_confs + \
["-put", "-", encode_fs_path(path)]
self.subprocess_env = i18n.make_utf8_env()
if self.subprocess_env.has_key('HADOOP_CLASSPATH'):
self.subprocess_env['HADOOP_CLASSPATH'] += ':' + hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
else:
self.subprocess_env['HADOOP_CLASSPATH'] = hadoop.conf.HADOOP_EXTRA_CLASSPATH_STRING.get()
if hadoop.conf.HADOOP_CONF_DIR.get():
self.subprocess_env['HADOOP_CONF_DIR'] = hadoop.conf.HADOOP_CONF_DIR.get()
self.path = path
self.putter = subprocess.Popen(self.subprocess_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
env=self.subprocess_env,
bufsize=WRITE_BUFFER_SIZE)
@require_open
def write(self, data):
"""May raise IOError, particularly EPIPE"""
self.putter.stdin.write(data)
@require_open
def close(self):
try:
(stdout, stderr) = self.putter.communicate()
except IOError, ioe:
logging.debug("Saw IOError writing %r" % self.path, exc_info=1)
if ioe.errno == errno.EPIPE:
stdout, stderr = self.putter.communicate()
self.closed = True
if stderr:
LOG.warn("HDFS FileUpload (cmd='%s', env='%s') outputted stderr:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stderr))
if stdout:
LOG.info("HDFS FileUpload (cmd='%s', env='%s') outputted stdout:\n%s" %
(repr(self.subprocess_cmd), repr(self.subprocess_env), stdout))
if self.putter.returncode != 0:
raise IOError("hdfs put returned bad code: %d\nstderr: %s" %
(self.putter.returncode, stderr))
LOG.info("Completed upload: %s" % repr(self.subprocess_cmd))
@require_open
def flush(self):
self.putter.stdin.flush()
def _block_contains_pos(block, pos):
return pos >= block.startOffset and pos < block.startOffset + block.numBytes
class BlockCache(object):
"""
A cache of block locations used by a single HDFS input file.
Essentially this keeps the blocks in sorted order and does
binary search to find the block that contains a given offset.
It also provides the ability to merge in the response of a NN
getBlocks response to the cache.
"""
def __init__(self):
self.blocks = []
def find_block(self, pos, _min_idx=0, _max_idx=None):
"""
Return the Block object that contains the specified
position pos, or None if it is not in the cache.
"""
if _max_idx is None:
_max_idx = len(self.blocks) - 1
if _max_idx < _min_idx:
return None
pivot_idx = (_max_idx + _min_idx) / 2
pivot_block = self.blocks[pivot_idx]
if pos < pivot_block.startOffset:
return self.find_block(pos, _min_idx, pivot_idx - 1)
elif pos >= pivot_block.startOffset + pivot_block.numBytes:
return self.find_block(pos, pivot_idx + 1, _max_idx)
else:
return pivot_block
def insert_new_blocks(self, new_blocks):
"""
Merge a list of Block objects from the NN into the list
of cached blocks.
If the set of blocks overlaps, the new blocks take precedence.
"""
# We could do a more efficient merge here since both lists
# are already sorted, but these data structures are small, so let's
# do the easy thing.
blocks_dict = dict( (b.blockId, b) for b in self.blocks )
# Merge in new data to dictionary
for nb in new_blocks:
blocks_dict[nb.blockId] = nb
# Convert back to sorted list
block_list = blocks_dict.values()
block_list.sort(cmp=lambda a,b: cmp(a.startOffset, b.startOffset))
# Update cache with new data
self.blocks = block_list
|
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation
from pipeline.component.hetero_secureboost import HeteroSecureBoost
from pipeline.component.dataio import DataIO
from pipeline.component.intersection import Intersection
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from pipeline.interface.model import Model
from pipeline.utils.tools import load_job_config
from pipeline.runtime.entity import JobParameters
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"}
host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=host)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
reader_1 = Reader(name="reader_1")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data)
# define DataIO components
dataio_0 = DataIO(name="dataio_0") # start component numbering at 0
dataio_1 = DataIO(name="dataio_1") # start component numbering at 1
param = {
"with_label": True,
"label_name": "y",
"label_type": "int",
"output_format": "dense",
"missing_fill": True,
"missing_fill_method": "mean",
"outlier_replace": False,
"outlier_replace_method": "designated",
"outlier_replace_value": 0.66,
"outlier_impute": "-9999"
}
# get DataIO party instance of guest
dataio_0_guest_party_instance = dataio_0.get_party_instance(role='guest', party_id=guest)
# configure DataIO for guest
dataio_0_guest_party_instance.component_param(**param)
# get and configure DataIO party instance of host
dataio_1.get_party_instance(role='guest', party_id=guest).component_param(**param)
param = {
"input_format": "tag",
"with_label": False,
"tag_with_value": True,
"delimitor": ";",
"output_format": "dense"
}
dataio_0.get_party_instance(role='host', party_id=host).component_param(**param)
dataio_1.get_party_instance(role='host', party_id=host).component_param(**param)
# define Intersection components
intersection_0 = Intersection(name="intersection_0", intersect_method="raw")
intersection_1 = Intersection(name="intersection_1", intersect_method="raw")
param = {
"name": 'hetero_feature_binning_0',
"method": 'optimal',
"optimal_binning_param": {
"metric_method": "iv",
"init_bucket_method": "quantile"
},
"bin_indexes": -1
}
hetero_feature_binning_0 = HeteroFeatureBinning(**param)
statistic_0 = DataStatistics(name='statistic_0')
param = {
"name": 'hetero_feature_selection_0',
"filter_methods": ["unique_value", "iv_filter", "statistic_filter"],
"unique_param": {
"eps": 1e-6
},
"iv_param": {
"metrics": ["iv", "iv"],
"filter_type": ["top_k", "threshold"],
"take_high": [True, True],
"threshold": [10, 0.1]
},
"statistic_param": {
"metrics": ["coefficient_of_variance", "skewness"],
"filter_type": ["threshold", "threshold"],
"take_high": [True, False],
"threshold": [0.001, -0.01]
},
"select_col_indexes": -1
}
hetero_feature_selection_0 = HeteroFeatureSelection(**param)
hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1')
param = {
"task_type": "classification",
"learning_rate": 0.1,
"num_trees": 10,
"subsample_feature_rate": 0.5,
"n_iter_no_change": False,
"tol": 0.0002,
"bin_num": 50,
"objective_param": {
"objective": "cross_entropy"
},
"encrypt_param": {
"method": "paillier"
},
"predict_param": {
"threshold": 0.5
},
"tree_param": {
"max_depth": 2
},
"cv_param": {
"n_splits": 5,
"shuffle": False,
"random_seed": 103,
"need_cv": False
},
"validation_freqs": 2,
"early_stopping_rounds": 5,
"metrics": ["auc", "ks"]
}
hetero_secureboost_0 = HeteroSecureBoost(name='hetero_secureboost_0', **param)
evaluation_0 = Evaluation(name='evaluation_0')
# add components to pipeline, in order of task execution
pipeline.add_component(reader_0)
pipeline.add_component(reader_1)
pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data))
pipeline.add_component(dataio_1, data=Data(data=reader_1.output.data), model=Model(dataio_0.output.model))
# set data input sources of intersection components
pipeline.add_component(intersection_0, data=Data(data=dataio_0.output.data))
pipeline.add_component(intersection_1, data=Data(data=dataio_1.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data))
pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data),
model=Model(isometric_model=[hetero_feature_binning_0.output.model,
statistic_0.output.model]))
pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data),
model=Model(hetero_feature_selection_0.output.model))
# set train & validate data of hetero_secureboost_0 component
pipeline.add_component(hetero_secureboost_0, data=Data(train_data=hetero_feature_selection_0.output.data,
validate_data=hetero_feature_selection_1.output.data))
pipeline.add_component(evaluation_0, data=Data(data=[hetero_secureboost_0.output.data]))
# compile pipeline once finished adding modules, this step will form conf and dsl files for running job
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
print(pipeline.get_component("hetero_secureboost_0").get_summary())
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
|
from numpy.distutils.core import setup, Extension
import distutils.sysconfig
import sys
import os
import os.path
import re
# Get BUILDTYPE for checking if this is intel-mac
buildtype = os.getenv("BUILDTYPE")
if buildtype:
buildtype = buildtype.strip()
if not buildtype:
raise ValueError("Environment variable BUILDTYPE is not defined")
# (Non-standard) Directories containing .h include files
incdir_list = [ "pyfermod",
os.path.join("fer", "common"),
os.path.join("fmt", "cmn"),
os.path.join("fer", "ef_utility"),
os.path.join("fer", "grdel"), ]
bind_and_hide_internal = os.getenv("BIND_AND_HIDE_INTERNAL")
if bind_and_hide_internal:
bind_and_hide_internal = bind_and_hide_internal.strip()
# NETCDF_LIBDIR must be given, either for the static library or the shared-object library
netcdf_libdir = os.getenv("NETCDF_LIBDIR")
if netcdf_libdir:
netcdf_libdir = netcdf_libdir.strip()
if not netcdf_libdir:
raise ValueError("Environment variable NETCDF_LIBDIR is not defined")
# HDF5_LIBDIR is only given if the HDF5 and NetCDF libraries are to be statically linked
hdf5_libdir = os.getenv("HDF5_LIBDIR")
if hdf5_libdir:
hdf5_libdir = hdf5_libdir.strip()
# SZ_LIBDIR is the location of the SZ library to be linked in
sz_libdir = os.getenv("SZ_LIBDIR")
if sz_libdir:
sz_libdir = sz_libdir.strip()
# CAIRO_LIBDIR is only given if the cairo library is to be statically linked in
cairo_libdir = os.getenv("CAIRO_LIBDIR")
if cairo_libdir:
cairo_libdir = cairo_libdir.strip()
# PIXMAN_LIBDIR is only given if the pixman-1 library is to be statically linked in
pixman_libdir = os.getenv("PIXMAN_LIBDIR")
if pixman_libdir:
pixman_libdir = pixman_libdir.strip()
# PANGO_LIBDIR gives a non-standard location of the pango libraries
pango_libdir = os.getenv("PANGO_LIBDIR")
if pango_libdir:
pango_libdir = pango_libdir.strip()
# GFORTRAN_LIB gives a non-standard full-path location of the gfortran library to be used
# in the linking step. If not given or empty, the -lgfortran flag is used in the linking step.
gfortran_lib = os.getenv("GFORTRAN_LIB")
if gfortran_lib:
gfortran_lib = gfortran_lib.strip()
# The location of libpythonx.x.so, in case it is not in a standard location
python_libdir = os.path.split( distutils.sysconfig.get_python_lib(standard_lib=True) )[0]
# The list of additional directories to examine for libraries
libdir_list = [ "lib", netcdf_libdir, ]
if hdf5_libdir:
libdir_list.append(hdf5_libdir)
if sz_libdir:
libdir_list.append(sz_libdir)
if cairo_libdir:
libdir_list.append(cairo_libdir)
if pixman_libdir:
libdir_list.append(pixman_libdir)
if pango_libdir:
libdir_list.append(pango_libdir)
libdir_list.append(python_libdir)
# Get the list of ferret static libraries
# Stripping off the "lib" prefix and the ".a" suffix
fer_lib_list = [ ]
for libname in os.listdir("lib"):
if (libname[:3] == "lib") and (libname[-2:] == ".a"):
fer_lib_list.append(libname[3:-2])
# Create the list of libraries to link
lib_list = fer_lib_list[:]
if buildtype != "intel-mac":
# fer_lib_list is included multiple times to resolve interdependencies
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
lib_list.extend(fer_lib_list)
# Linking in the rest of the system libraries were moved to addn_link_flags
# in order to make sure the appropriate netcdff, netcdf, hdf5_hl, hdf5, and
# cairo libraries are used.
addn_link_args = [ ]
# Link to the appropriate netcdf libraries.
# The hdf5 libraries are only used to resolve netcdf library function
# calls when statically linking in the netcdf libraries.
if hdf5_libdir:
netcdff_lib = os.path.join(netcdf_libdir, "libnetcdff.a")
addn_link_args.append(netcdff_lib)
netcdf_lib = os.path.join(netcdf_libdir, "libnetcdf.a")
addn_link_args.append(netcdf_lib)
hdf5_hl_lib = os.path.join(hdf5_libdir, "libhdf5_hl.a")
addn_link_args.append(hdf5_hl_lib)
hdf5_lib = os.path.join(hdf5_libdir, "libhdf5.a")
addn_link_args.append(hdf5_lib)
else:
addn_link_args.extend([ "-lnetcdff", "-lnetcdf" ])
# Link to the cairo library and the libraries it requires.
if cairo_libdir:
cairo_lib = os.path.join(cairo_libdir, "libcairo.a")
addn_link_args.append(cairo_lib);
if pixman_libdir:
pixman_lib = os.path.join(pixman_libdir, "libpixman-1.a")
else:
pixman_lib = "-lpixman-1"
addn_link_args.extend([ pixman_lib, "-lfreetype", "-lfontconfig", "-lpng" ])
else:
addn_link_args.append("-lcairo")
# The Pango-Cairo text-rendering libraries
addn_link_args.append("-lpangocairo-1.0")
# Link in the appropriate system libraries
if hdf5_libdir:
addn_link_args.append("-lcurl")
if sz_libdir:
addn_link_args.append("-lsz")
if gfortran_lib:
addn_link_args.append(gfortran_lib)
else:
addn_link_args.append("-lgfortran")
addn_link_args.extend([ "-lz", "-ldl", "-lm", "-fPIC", ])
if bind_and_hide_internal:
# Bind symbols and function symbols to any internal definitions
# and do not make any of the symbols or function symbols defined
# in any libraries externally visible (mainly for cairo and pixman).
# Those in the object files (including those from pyfermod and
# fer/ef_utility) will still be visible.
addn_link_args.extend([ "-Wl,-Bsymbolic", "-Wl,--exclude-libs,ALL"])
if os.uname()[0] == 'Darwin':
# For Mac OSX, leave room for library path renames
addn_link_args.append("-Wl,-headerpad_max_install_names")
# Get the list of C source files in pyfermod
src_list = [ ]
for srcname in os.listdir("pyfermod"):
if srcname[-2:] == ".c":
src_list.append(os.path.join("pyfermod", srcname))
# Get the list of additional objects to be linked in
addnobjs_list = [ ]
dirname = os.path.join("fer", "ef_utility")
for srcname in os.listdir(dirname):
if srcname[-2:] == ".o":
addnobjs_list.append(os.path.join(dirname, srcname))
dirname = os.path.join("fer", "special")
for srcname in ( "FerMem_routines.o", "fakes3.o", "ferret_dispatch.o", "gui_fakes.o", "linux_routines.o", ):
addnobjs_list.append(os.path.join(dirname, srcname))
for srcname in os.listdir(dirname):
if (srcname[0] == 'x') and (srcname[-7:] == "_data.o"):
addnobjs_list.append(os.path.join(dirname, srcname))
if bind_and_hide_internal:
# Duplicate objects in libraries to make them externally visible (e.g., for las
# external functions) if the '--exclude-libs ALL' flag was passed to the linker.
dirname = os.path.join("fmt", "src")
addnobjs_list.append(os.path.join(dirname, "tm_lenstr.o"));
addnobjs_list.append(os.path.join(dirname, "tm_fmt.o"));
addnobjs_list.append(os.path.join(dirname, "tm_lefint.o"));
# Create the pyferret.libpyferret Extension
ext_mods = [ Extension("pyferret.libpyferret", include_dirs = incdir_list,
sources = src_list,
extra_objects = addnobjs_list,
library_dirs = libdir_list,
libraries = lib_list,
extra_link_args = addn_link_args), ]
pyferret_version = None
xrev_name = os.path.join("fer", "dat", "xrevision_data.F")
xrev_file = open(xrev_name)
try:
pat = re.compile('\\s+DATA\\s+revision_level\\s*/\\s*(\\S+)\\s*/\\s*', flags=re.IGNORECASE)
for datlin in xrev_file:
mat = re.match(pat, datlin)
if mat:
pyferret_version = mat.group(1)
break
finally:
xrev_file.close()
if not pyferret_version:
raise ValueError('Unable to find the version number in ' + xrev_name)
# Configure the setup
setup(name = "pyferret",
version = pyferret_version,
description = "Python module providing Ferret functionality",
long_description = "Python module providing Ferret functionality",
author = "Karl M. Smith",
author_email = "[email protected]",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
packages = [ "pyferret", "pyferret.eofanal", "pyferret.fershp",
"pyferret.graphbind", "pyferret.regrid", "pyferret.stats", ],
package_dir = { "pyferret":"pyfermod", },
ext_modules = ext_mods)
setup(name = "pipedviewer",
version = pyferret_version,
description = "Graphics viewer controlled by a command pipe",
long_description = "A graphics viewer application that receives its " \
"drawing and other commands primarily from another " \
"application through a pipe. A limited number of " \
"commands are provided by the viewer itself to allow " \
"saving and some manipulation of the displayed scene. " \
"The controlling application, however, will be unaware " \
"of these modifications made to the scene.",
author = "Karl M. Smith",
author_email = "[email protected]",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "multiprocessing", ],
packages = [ "pipedviewer", ],
package_dir = { "pipedviewer":"pviewmod", })
setup(name = "gcircle",
version = pyferret_version,
description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
long_description = "Module of functions involving great circles with " \
"points given in longitudes and latitudes (thus " \
"assuming a spheroid model of the earth).",
author = "Karl M. Smith",
author_email = "[email protected]",
url = "http://ferret.pmel.noaa.gov/Ferret/documentation/pyferret",
license = "Public Domain",
requires = [ "numpy", ],
py_modules = [ "gcircle", ])
|
|
import sqlalchemy
from sqlalchemy import orm, Column, types, ForeignKey
from alchy import model, query, manager, events
from .base import TestQueryBase
Model = model.make_declarative_base()
class TestEventsBase(TestQueryBase):
@classmethod
def setUpClass(cls):
cls.db = manager.Manager(Model=Model, config=cls.config)
def setUp(self):
self.db.create_all()
def tearDown(self):
self.db.drop_all()
class TestEvents(TestEventsBase):
"""Test Model events"""
class Huey(Model):
event_tracker = {}
__tablename__ = 'huey'
__events__ = {
'before_insert': 'before_insert',
'after_insert': [
'after_insert1',
'after_insert2',
('after_insert3', {'raw': True})
],
'on_set': [('on_set_name', {'attribute': 'name'})]
}
_id = Column(types.Integer(), primary_key=True)
name = Column(types.String())
dewey_id = Column(types.Integer(), ForeignKey('dewey._id'))
def before_insert(mapper, connection, target):
target.event_tracker['before_insert'] = target.query.all()
target.name = 'Huey'
def after_insert1(mapper, connection, target):
target.event_tracker['after_insert1'] = target.query.all()
target.event_tracker['after_insert2'] = 1
def after_insert2(mapper, connection, target):
target.event_tracker['after_insert2'] += 1
def after_insert3(mapper, connection, target):
from sqlalchemy.orm.state import InstanceState
assert isinstance(target, InstanceState)
def on_set_name(target, value, oldvalue, initator):
target.event_tracker['set_name'] = 1
class Dewey(Model):
__tablename__ = 'dewey'
__events__ = None
_id = Column(types.Integer(), primary_key=True)
name = Column(types.String())
number = Column(types.Integer())
min_hueys = Column(types.Boolean())
hueys = orm.relationship('Huey')
@events.before_insert()
def before_insert(mapper, connection, target):
target.name = 'Dewey'
@events.on_set('name', retval=True)
def on_set_name(target, value, oldvalue, initator):
if oldvalue is None or (
hasattr(oldvalue, '__class__') and
oldvalue.__class__.__name__ == 'symbol'):
# oldvalue is a symbol for either NO_VALUE or NOT_SET so allow
# update
return value
else:
# value previously set, so prevent edit
return oldvalue
@events.on_append('hueys')
def on_append_hueys(target, value, intiator):
if len(target.hueys) >= 1:
target.min_hueys = True
@events.on_remove('hueys')
def on_remove_hueys(target, value, initator):
if not len(target.hueys) >= 1:
target.min_hueys = False
@events.before_insert_update()
def before_edit(mapper, connection, target):
target.number = (target.number or 0) + 1
def tearDown(self):
super(TestEvents, self).tearDown()
# clear event tracker
self.Huey.event_tracker.clear()
def test_events_using_class_attribute(self):
h = self.Huey()
self.db.add_commit(h)
self.assertEqual(len(h.event_tracker['before_insert']), 0)
self.assertEqual(len(h.event_tracker['after_insert1']), 1)
self.assertEqual(h.name, 'Huey')
self.assertEqual(h.event_tracker['after_insert2'], 2)
self.assertEqual(h.event_tracker['set_name'], 1)
def test_events_using_decorator(self):
d = self.Dewey()
self.assertIsNone(d.number)
self.db.add_commit(d)
self.assertEqual(d.number, 1)
self.assertEqual(d.name, 'Dewey')
# trigger update event
d.name = 'Foo'
self.db.commit()
self.assertEqual(d.number, 2)
def test_attribute_event_set(self):
name = 'mister'
d = self.Dewey()
d.name = name
d.name = 'no change'
self.assertEqual(d.name, name)
d = self.Dewey(name=name)
d.name = 'no change'
self.assertEqual(d.name, name)
def test_attribute_event_append(self):
d = self.Dewey()
self.assertIsNone(d.min_hueys)
d.hueys.append(self.Huey())
self.assertIsNone(d.min_hueys)
d.hueys.append(self.Huey())
self.assertTrue(d.min_hueys)
def test_attribute_event_remove(self):
d = self.Dewey(hueys=[self.Huey(), self.Huey()])
self.assertTrue(d.min_hueys)
d.hueys.remove(d.hueys[0])
self.assertTrue(d.min_hueys)
d.hueys.remove(d.hueys[0])
self.assertFalse(d.min_hueys)
class TestInstanceEvents(TestEventsBase):
class Louie(Model):
event_tracker = {}
_id = Column(types.Integer(), primary_key=True)
name = Column(types.String())
@events.on_expire()
def on_expire(target, attrs):
target.event_tracker['expire'] = True
@events.on_load()
def on_load(target, context):
target.event_tracker['load'] = True
@events.on_refresh()
def on_refresh(target, context, attrs):
target.event_tracker['refresh'] = True
def tearDown(self):
super(TestInstanceEvents, self).tearDown()
# clear event tracker
self.Louie.event_tracker.clear()
def test_expire(self):
self.db.add(self.Louie())
record = self.Louie.get(1)
self.assertIsNone(record.event_tracker.get('expire'))
record.expire()
self.assertTrue(record.event_tracker.get('expire'))
def test_load(self):
self.db.add_commit(self.Louie())
record = self.Louie.get(1)
self.assertTrue(record.event_tracker.get('load'))
def test_refresh(self):
record = self.Louie()
self.db.add_commit(record)
self.assertIsNone(record.event_tracker.get('refresh'))
self.assertIsNone(record.event_tracker.get('refresh'))
self.Louie.get(1)
self.assertTrue(record.event_tracker.get('refresh'))
|
|
"""
The goal is to build JSON dictionaries that can be used to search for codes in
sources and map to elements in the CDM vocabulary.
"""
import csv
import json
import os
import argparse
import sys
def open_csv_file(file_name, mode="r"):
ver_info = sys.version_info[0]
if ver_info == 2:
return open(file_name, mode=mode + "b")
else:
return open(file_name, newline="", mode=mode, encoding="utf8")
def main(source_vocabulary_directory, output_json_directory=None, delimiter="\t"):
"""Build files for needed vocabulary"""
if output_json_directory is None:
output_json_directory = source_vocabulary_directory
concept_csv = os.path.join(source_vocabulary_directory, "CONCEPT.csv")
vocabularies = []
# Determine which vocabularies are in the concept file
print("Scanning '%s'" % os.path.abspath(concept_csv))
with open_csv_file(concept_csv, "r") as f:
dict_reader = csv.DictReader(f, delimiter=delimiter)
i = 0
for row_dict in dict_reader:
vocabulary_id = row_dict["VOCABULARY_ID".lower()]
if vocabulary_id not in vocabularies:
vocabularies += [vocabulary_id]
i += 1
print("Read %s lines" % i)
print("Found %s vocabularies" % len(vocabularies))
# Generate first pass of converting concepts into a JSON lookup file
# Generate one for concept_name and one for concept_code
for vocabulary in vocabularies:
fields_to_key_on = ["CONCEPT_CODE".lower(), "CONCEPT_NAME".lower(), "CONCEPT_ID".lower()]
if vocabulary is not None:
vocabulary_name = "_".join(vocabulary.split(" "))
for field_to_key_on in fields_to_key_on:
file_vocabulary_name = field_to_key_on + "_" + vocabulary_name + ".json"
path_vocabulary_name = os.path.join(output_json_directory, file_vocabulary_name)
if not os.path.exists(path_vocabulary_name):
print("Generating '%s'" % file_vocabulary_name)
csv_file_name_to_keyed_json(concept_csv, path_vocabulary_name, field_to_key_on,
[("VOCABULARY_ID".lower(), vocabulary), ("INVALID_REASON".lower(), "")])
concept_relationship_csv = os.path.join(source_vocabulary_directory, "CONCEPT_RELATIONSHIP.csv")
concept_relationship_json = os.path.join(output_json_directory, "concept_relationship.json")
# Build a master dict
if not(os.path.exists(concept_relationship_json)):
print("Generating '%s'" % concept_relationship_json)
csv_file_name_to_keyed_json(concept_relationship_csv, concept_relationship_json, "CONCEPT_ID_1".lower(),
("RELATIONSHIP_ID".lower(), "Maps to"))
with open_csv_file(concept_csv, "r") as f:
dict_reader = csv.DictReader(f, delimiter=delimiter)
concept_dict_vocabulary = {}
for row_dict in dict_reader:
concept_dict_vocabulary[row_dict["CONCEPT_ID".lower()]] = row_dict["VOCABULARY_ID".lower()]
global_concept_json = os.path.join(output_json_directory, "global_concept_vocabulary.json")
print("Generating '%s'" % global_concept_json)
with open(global_concept_json, "w") as fw:
json.dump(concept_dict_vocabulary, fw, sort_keys=True, indent=4, separators=(',', ': '))
with open_csv_file(concept_csv) as f:
dict_reader = csv.DictReader(f, delimiter=delimiter)
concept_dict_domain = {}
for row_dict in dict_reader:
concept_dict_domain[row_dict["CONCEPT_ID".lower()]] = row_dict["DOMAIN_ID".lower()]
global_concept_domain_json = os.path.join(output_json_directory, "global_concept_domain.json")
print("Generating '%s'" % global_concept_json)
with open(global_concept_domain_json, "w") as fw:
json.dump(concept_dict_vocabulary, fw, sort_keys=True, indent=4, separators=(',', ': '))
vocabularies_with_maps = ["ICD9CM", "ICD9Proc", "ICD10CM", "ICD10PCS", "Multum", "LOINC", "CPT4", "HCPCS", "NDC",
"RxNorm"]
for vocabulary_id in vocabularies_with_maps:
print("Annotating '%s'" % vocabulary_id)
vocabulary_json = os.path.join(output_json_directory, "concept_code_" + vocabulary_id + ".json")
concept_with_parent_json = os.path.join(output_json_directory, vocabulary_id + "_with_parent.json")
if not os.path.exists(concept_with_parent_json):
with open(vocabulary_json, "r") as fj:
vocabulary_dict = json.load(fj)
with open(concept_relationship_json, "r") as fj:
concept_rel_dict = json.load(fj)
for concept_code in vocabulary_dict:
concept_dict = vocabulary_dict[concept_code]
concept_id = concept_dict["CONCEPT_ID".lower()]
if concept_id in concept_rel_dict:
try:
mapped_concept_id = concept_rel_dict[concept_id]["CONCEPT_ID_2".lower()]
except TypeError:
# Filter out OMOP Extension
# If only OMOP Extension then include
multiple_concepts = concept_rel_dict[concept_id]
omop_extensions = []
everything_else = []
for concept_rel in multiple_concepts:
concept_id = concept_rel["concept_id_2"]
vocabulary = concept_dict_vocabulary[concept_id]
if vocabulary == "OMOP Extension":
omop_extensions += [concept_rel]
else:
everything_else += [concept_rel]
omop_extensions.sort(key=lambda x: x["VALID_END_DATE".lower()], reverse=True)
everything_else.sort(key=lambda x: x["VALID_END_DATE".lower()], reverse=True)
sorted_multiple_concepts = everything_else + omop_extensions
mapped_concept_id = sorted_multiple_concepts[0]["CONCEPT_ID_2".lower()]
concept_dict["MAPPED_CONCEPT_ID".lower()] = mapped_concept_id
if mapped_concept_id in concept_dict_vocabulary:
concept_dict["MAPPED_CONCEPT_VOCAB".lower()] = concept_dict_vocabulary[mapped_concept_id]
else:
concept_dict["MAPPED_CONCEPT_VOCAB".lower()] = None
if mapped_concept_id in concept_dict_domain:
concept_dict["MAPPED_CONCEPT_DOMAIN".lower()] = concept_dict_domain[mapped_concept_id]
else:
concept_dict["MAPPED_CONCEPT_DOMAIN".lower()] = None
else:
concept_dict["MAPPED_CONCEPT_ID".lower()] = None
with open(concept_with_parent_json, "w") as fw:
json.dump(vocabulary_dict, fw, sort_keys=True, indent=4, separators=(',', ': '))
def csv_file_name_to_keyed_json(csv_file_name, json_file_name, field_to_key_on, filter_pairs=None, delimiter="\t"):
"""Create a keyed JSON file"""
with open_csv_file(csv_file_name, "r") as fd:
dict_reader = csv.DictReader(fd, delimiter=delimiter)
result_dict = {}
include_row = True
filter_value = None
field = None
if filter_pairs is not None:
if filter_pairs.__class__ != [].__class__:
filter_pairs = [filter_pairs]
include_row = False
for row_dict in dict_reader:
if filter_pairs is not None:
for filter_pair in filter_pairs:
field, filter_value = filter_pair
field_value = row_dict[field]
if field_value == filter_value:
include_row = True
else:
include_row = False
break
if include_row:
key = row_dict[field_to_key_on]
if key in result_dict:
if result_dict[key].__class__ == [].__class__:
result_dict[key] += [row_dict]
else:
result_dict[key] = [result_dict[key], row_dict]
else:
result_dict[key] = row_dict
with open(json_file_name, "w") as fw:
json.dump(result_dict, fw, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
arg_parse_obj = argparse.ArgumentParser(
description="Transform Athena vocabulary files into JSON map files for mapping scripts")
arg_parse_obj.add_argument("-c", "--config-file-name", dest="config_file_name", help="JSON config file",
default="cdm_config.json")
arg_obj = arg_parse_obj.parse_args()
print("Reading config file '%s'" % arg_obj.config_file_name)
with open(arg_obj.config_file_name, "r") as fc:
config_dict = json.load(fc)
main(config_dict["json_map_directory"])
|
|
import gdb
import sys
import os
import omniplay.gdbscripts
import ctypes
counter = 1
def exitHandler(event):
pid = event.inferior.pid
print "Goodbye! Pid %d exited" % pid
def grabParameterRegs():
eax = gdb.parse_and_eval("$eax")
ebx = gdb.parse_and_eval("$ebx")
ecx = gdb.parse_and_eval("$ecx")
edx = gdb.parse_and_eval("$edx")
esi = gdb.parse_and_eval("$esi")
edi = gdb.parse_and_eval("$edi")
ebp = gdb.parse_and_eval("$ebp")
return eax, ebx, ecx, edx, esi, edi, ebp
def getIovecPtr():
iovecType = gdb.lookup_type("struct iovec").pointer()
return iovecType
def getRecordPid(pid):
if not pid in getRecordPid.pids:
global utils
newpid = utils.get_current_record_pid(pid)
getRecordPid.pids[pid] = newpid
return getRecordPid.pids[pid]
getRecordPid.pids = {}
def getPC():
pc = gdb.parse_and_eval("$pc")
pcint = pc.cast(gdbTypes.inttype)
uint = ctypes.c_uint(int(pcint))
return uint.value
class PreLoadHandler:
def __init__(self):
self.syscallReturn = False
self.sawVsyscall = False
self.needsLoad = False
self.currentSyscall = -1
self.basePC = getPC()
#This maps address offsets to the correct system calls when in ld!
self.offsetMap = {
84164 : 45,
91795 : 192,
91617 : 33,
91300 : 5,
91117 : 195,
91428 : 3,
91181 : 197,
91357 : 6,
-1011 : 243,
91924 : 125
}
def switchOver(self):
return self.needsLoad
def handleCatchpoint(self, pid):
regs = grabParameterRegs()
if self.syscallReturn:
self.syscallReturn = False
syscallExit(self.currentSyscall, regs, pid)
return
self.syscallReturn = True
#If the breakpoint handled it, don't do anything
if self.sawVsyscall:
self.sawVsyscall = False
else:
pc = getPC()
offset = pc - self.basePC
if offset in self.offsetMap:
syscall = self.offsetMap[offset]
else:
syscall = -1
self.currentSyscall = syscall
syscallEnter(syscall, regs, pid)
if syscall == -1:
print "\tPC:", format(pc, '#x')
print "\tOffset:", offset
def handleBreakpoint(self, pid, breakpoint):
if breakpoint.location == "__libc_start_main":
print "---- Detected libc available ----"
self.needsLoad = True
return
self.sawVsyscall = True
regs = grabParameterRegs()
syscall = int(regs[0])
self.currentSyscall = syscall
syscallEnter(syscall, regs, pid)
return
def handle(self, event):
pid = gdb.selected_inferior().pid
isBreakpoint = isinstance(event, gdb.BreakpointEvent)
if isBreakpoint:
breakpoint = event.breakpoints[0]
self.handleBreakpoint(pid, breakpoint)
else:
self.handleCatchpoint(pid)
def onActualStop(self):
if self.switchOver():
print "---- Reading libc symbols ----"
gdb.execute("sharedlibrary libc.so")
gdb.execute("delete")
global handler
handler = PostLoadHandler()
gdb.execute("continue")
return
gdb.execute("continue")
return False
class PostLoadHandler:
def __init__(self):
self.count = 0
self.beginOfCall = True
self.currentSyscall = -1
self.cstr = gdb.lookup_type("char").pointer()
self.voidptr = gdb.lookup_type("void").pointer()
self.recordPid = None
gdb.Breakpoint("__kernel_vsyscall")
def handle(self, event):
regs = grabParameterRegs()
pid = gdb.selected_inferior().pid
if self.beginOfCall:
syscall = int(regs[0])
self.currentSyscall = syscall
syscallEnter(syscall, regs, pid)
else:
syscallExit(self.currentSyscall, regs, pid)
self.currentSyscall = -1
self.beginOfCall = not self.beginOfCall
def onActualStop(self):
if not self.beginOfCall:
gdb.execute("finish")
else:
gdb.execute("continue")
return False
def syscallEnter(syscall, regs, pid):
global counter
recordpid = getRecordPid(pid)
if syscall == -1:
outstr = "%i Pid %i (record pid %i), could not determine syscall number"
print outstr % ( counter, pid, recordpid )
counter += 1
return
else:
outstr = "%i Pid %i (record pid %i), Syscall Number %i"
print outstr % ( counter, pid, recordpid, syscall )
printArgs(syscall, regs)
printSyscallEnterData(syscall, regs, recordpid)
def syscallExit(syscall, regs, pid):
if syscall == -1:
return
notrecorded = [ 243, 244, 56, 31, 98, 137, 17, 35, 188, 32, 53, 219, 44, 189, 58, 273 ]
if syscall in notrecorded:
return
recordpid = getRecordPid(pid)
printSyscallExitData(syscall, regs, recordpid)
printReturnValue(syscall, regs)
global counter
counter += 1
def printArgs(sysnum, regs):
watchedCalls = [ 3, 4, 5, 6, 90, 145, 146, 180, 181, 192, 333, 334 ]
if not sysnum in watchedCalls:
return
outputs = {
3 : "read ( fd = %i, buf = %s, count = %i )",
4 : "write ( fd = %i, buf = %s, count = %i )",
5 : "open ( pathname = %s, flags = %i )",
6 : "close ( fd = %i )",
90 : "mmap ( addr = %s, length = %i, prot = %i, flags = %i, fd = %i, offset = %i )",
145 : "readv ( fd = %i, iovec = %s, iovcnt = %i )",
146 : "writev ( fd = %i, iovec = %s, iovcnt = %i )",
180 : "pread ( fd = %i, buf = %s, count = %i, offset = %i )",
181 : "pwrite ( fd = %i, buf = %s, count = %i, offset = %i )",
192 : "mmap2 ( addr = %s, length = %i, prot = %i, flags = %i, fd = %i, offset = %i )",
333 : "preadv ( fd = %i, iovec = %s, iovcnt = %i, offset = %i )",
334 : "pwritev ( fd = %i, iovec = %s, iovcnt = %i, offset = %i )"
}
#calls that look like "fd, address, count"
if sysnum == 3 or sysnum == 4 or sysnum == 145 or sysnum == 146:
buf = regs[2].cast(gdbTypes.voidptr)
formats = ( int(regs[1]), str(buf), int(regs[3]) )
elif sysnum == 5:
buf = regs[1].cast(gdbTypes.cstr)
formats = ( str(buf), int(regs[2]) )
elif sysnum == 6:
formats = int(regs[1])
#the mmaps
elif sysnum == 90 or sysnum == 192:
addr = regs[1].cast(gdbTypes.voidptr)
offset = regs[6].cast(gdbTypes.inttype)
formats = ( str(addr), int(regs[2]), int(regs[3]), int(regs[4]), int(regs[5]), int(offset) )
else:
buf = regs[2].cast(gdbTypes.voidptr)
formats = ( int(regs[1]), str(buf), int(regs[3]), int(regs[4]) )
outstr = outputs[sysnum] % formats
print "\t" + outstr
def printSyscallEnterData(syscall, regs, pid):
watchedCalls = [ 4, 146, 181, 334 ]
if syscall not in watchedCalls:
return
printer = Printer()
try:
if syscall == 4 or syscall == 181:
buf = regs[2].cast(gdbTypes.cstr)
count = int(regs[3])
printer.printWrite(pid, buf, count)
else:
iovec = regs[2].cast(getIovecPtr())
count = int(regs[3])
printer.printWriteIovec(pid, iovec, count)
except gdb.MemoryError:
print "\t<invalid memory address>"
def printSyscallExitData(syscall, regs, pid):
watchedCalls = [ 3, 90, 145, 180, 192, 333 ]
if syscall not in watchedCalls:
return
printer = Printer()
retval = int(regs[0])
if retval == -1:
return
try:
if syscall == 3 or syscall == 180:
buf = regs[2].cast(gdbTypes.cstr)
printer.printRead(pid, buf, retval)
elif syscall == 90 or syscall == 192:
fd = int(regs[5])
if fd == -1:
return
buf = regs[0].cast(gdbTypes.cstr)
length = int(regs[2])
printer.printRead(pid, buf, length)
else:
iovec = regs[2].cast(getIovecPtr())
count = int(regs[3])
printer.printReadIovec(pid, iovec, count, retval)
except gdb.MemoryError:
print "\t<invalid memory address>"
def printReturnValue(syscall, regs):
watchedCalls = [ 3, 4, 5, 6, 90, 145, 146, 180, 181, 192, 333, 334 ]
if syscall not in watchedCalls:
return
returnVal = regs[0]
#mmap returns a pointer
if syscall == 90 or syscall == 192:
intval = returnVal.cast(gdbTypes.inttype)
if intval == -1:
val = -1
else:
val = returnVal.cast(gdbTypes.voidptr)
print "\tReturned: %s" % str(val)
else:
print "\tReturned: %i" % int(returnVal)
class Printer():
def init(self, group):
Printer.group = group
Printer.root = "/tmp/io_%i" % group
Printer.reads = '/'.join([Printer.root, "reads"])
Printer.writes = '/'.join([Printer.root, "writes"])
def setup_files(self):
if not os.path.isdir(Printer.root):
os.mkdir(Printer.root)
if not os.path.isdir(Printer.reads):
os.mkdir(Printer.reads)
if not os.path.isdir(Printer.writes):
os.mkdir(Printer.writes)
def printRead(self, pid, buf, length):
totalValues.bytesRead += length
self._doRead(pid, Printer._printMemRaw, buf, length)
def printWrite(self, pid, buf, length):
totalValues.bytesWritten += length
self._doWrite(pid, Printer._printMemRaw, buf, length)
def printReadIovec(self, pid, iovec, count, length):
totalValues.bytesRead += length
self._doRead(pid, Printer._printIovecRaw, iovec, count, length)
def printWriteIovec(self, pid, iovec, count):
self._doWrite(pid, Printer._printIovecRaw, iovec, count, None)
def _doRead(self, pid, func, *args):
outfile = self._getFile(True, pid)
func(self, outfile, *args, newLine=False)
print "\tCreated out file", outfile.name
outfile.close()
def _doWrite(self, pid, func, *args):
outfile = self._getFile(False, pid)
print "\tData ***"
func(self, sys.stdout, *args, newLine=True)
print "\t***"
func(self, outfile, *args, newLine=False)
print "\tCreated out file", outfile.name
outfile.close()
def _getFile(self, isRead, pid):
folder = Printer.reads if isRead else Printer.writes
global counter
filename = "%s/%i_%i_%i" % ( folder, Printer.group, pid, counter )
outfile = open(filename, 'w')
return outfile
def _printMemRaw(self, ostream, buf, length, newLine=True):
try:
sval = buf.string('ascii', 'ignore', length)
except UnicodeError:
sval = "<***contained unprintable characters***>"
if newLine:
print >>ostream, sval
else:
print >>ostream, sval,
def _printIovecRaw(self, ostream, iovecArr, count, length, newLine=False):
vecarr = [ iovecArr[i] for i in xrange(count) ]
if length == None:
length = sum([ int(vec['iov_len']) for vec in vecarr ])
#TODO: This really really really needs to go somewhere else
totalValues.bytesWritten += length
for vec in vecarr:
buf = vec['iov_base'].cast(gdbTypes.cstr)
vlength = int(vec['iov_len'])
if vlength > length:
vlength = length
length -= vlength
self._printMemRaw(ostream, buf, vlength, newLine=False)
if newLine:
print >>ostream, ''
#This is pretty hacky but oh well
class gdbTypes:
cstr = gdb.lookup_type("char").pointer()
voidptr = gdb.lookup_type("void").pointer()
inttype = gdb.lookup_type("int")
#TODO: find a better way to do this
class totalValues:
bytesRead = 0
bytesWritten = 0
handler = None
utils = None
def stopHandler(event):
global handler
handler.handle(event)
def main():
global utils
utils = omniplay.gdbscripts.ScriptUtilities()
group = utils.get_replay_group()
printer = Printer()
printer.init(group)
printer.setup_files()
print "Replay Group is", group
global handler
handler = PreLoadHandler()
gdb.events.exited.connect(exitHandler)
gdb.events.stop.connect(stopHandler)
#This line is a performance booster potentially -> makes it so it doesn't
# remove and reinsert breakpoints everytime it stops
#gdb.execute("set breakpoint always-inserted on")
#Temp
gdb.execute("catch syscall")
gdb.execute("break __kernel_vsyscall")
gdb.execute("break __libc_start_main")
while True:
try:
if handler.onActualStop():
break
except gdb.error:
break
print "Total Bytes Read:", totalValues.bytesRead
print "Total Bytes Written:", totalValues.bytesWritten
main()
|
|
import os
import re
import subprocess
from collections import namedtuple
import logging
import bisect
from common import SushiError, get_extension
import chapters
MediaStreamInfo = namedtuple('MediaStreamInfo', ['id', 'info', 'default', 'title'])
SubtitlesStreamInfo = namedtuple('SubtitlesStreamInfo', ['id', 'info', 'type', 'default', 'title'])
MediaInfo = namedtuple('MediaInfo', ['video', 'audio', 'subtitles', 'chapters'])
class FFmpeg(object):
@staticmethod
def get_info(path):
try:
process = subprocess.Popen(['ffmpeg', '-hide_banner', '-i', path], stderr=subprocess.PIPE)
out, err = process.communicate()
process.wait()
return err
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
@staticmethod
def demux_file(input_path, **kwargs):
args = ['ffmpeg', '-hide_banner', '-i', input_path, '-y']
audio_stream = kwargs.get('audio_stream', None)
audio_path = kwargs.get('audio_path', None)
audio_rate = kwargs.get('audio_rate', None)
if audio_stream is not None:
args.extend(('-map', '0:{0}'.format(audio_stream)))
if audio_rate:
args.extend(('-ar', str(audio_rate)))
args.extend(('-ac', '1', '-acodec', 'pcm_s16le', audio_path))
script_stream = kwargs.get('script_stream', None)
script_path = kwargs.get('script_path', None)
if script_stream is not None:
args.extend(('-map', '0:{0}'.format(script_stream), script_path))
video_stream = kwargs.get('video_stream', None)
timecodes_path = kwargs.get('timecodes_path', None)
if timecodes_path is not None:
args.extend(('-map', '0:{0}'.format(video_stream), '-f', 'mkvtimestamp_v2', timecodes_path))
logging.info('ffmpeg args: {0}'.format(' '.join(('"{0}"' if ' ' in a else '{0}').format(a) for a in args)))
try:
subprocess.call(args)
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
@staticmethod
def _get_audio_streams(info):
streams = re.findall(r'Stream\s\#0:(\d+).*?Audio:\s*(.*?(?:\((default)\))?)\s*?(?:\(forced\))?\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [MediaStreamInfo(int(x[0]), x[1], x[2] != '', x[3]) for x in streams]
@staticmethod
def _get_video_streams(info):
streams = re.findall(r'Stream\s\#0:(\d+).*?Video:\s*(.*?(?:\((default)\))?)\s*?(?:\(forced\))?\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [MediaStreamInfo(int(x[0]), x[1], x[2] != '', x[3]) for x in streams]
@staticmethod
def _get_chapters_times(info):
return map(float, re.findall(r'Chapter #0.\d+: start (\d+\.\d+)', info))
@staticmethod
def _get_subtitles_streams(info):
maps = {
'ssa': '.ass',
'ass': '.ass',
'subrip': '.srt'
}
streams = re.findall(r'Stream\s\#0:(\d+).*?Subtitle:\s*((\w*)\s*?(?:\((default)\))?\s*?(?:\(forced\))?)\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [SubtitlesStreamInfo(int(x[0]), x[1], maps.get(x[2], x[2]), x[3] != '', x[4].strip()) for x in streams]
@classmethod
def get_media_info(cls, path):
info = cls.get_info(path)
video_streams = cls._get_video_streams(info)
audio_streams = cls._get_audio_streams(info)
subs_streams = cls._get_subtitles_streams(info)
chapter_times = cls._get_chapters_times(info)
return MediaInfo(video_streams, audio_streams, subs_streams, chapter_times)
class MkvToolnix(object):
@classmethod
def extract_timecodes(cls, mkv_path, stream_idx, output_path):
args = ['mkvextract', 'timecodes_v2', mkv_path, '{0}:{1}'.format(stream_idx, output_path)]
subprocess.call(args)
class SCXviD(object):
@classmethod
def make_keyframes(cls, video_path, log_path):
try:
ffmpeg_process = subprocess.Popen(['ffmpeg', '-i', video_path,
'-f', 'yuv4mpegpipe',
'-vf', 'scale=640:360',
'-pix_fmt', 'yuv420p',
'-vsync', 'drop', '-'], stdout=subprocess.PIPE)
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
try:
scxvid_process = subprocess.Popen(['SCXvid', log_path], stdin=ffmpeg_process.stdout)
except OSError as e:
ffmpeg_process.kill()
if e.errno == 2:
raise SushiError("Couldn't invoke scxvid, check that it's installed")
raise
scxvid_process.wait()
class Timecodes(object):
def __init__(self, times, default_fps):
super(Timecodes, self).__init__()
self.times = times
self.default_frame_duration = 1.0 / default_fps if default_fps else None
def get_frame_time(self, number):
try:
return self.times[number]
except IndexError:
if not self.default_frame_duration:
return self.get_frame_time(len(self.times)-1)
if self.times:
return self.times[-1] + (self.default_frame_duration) * (number - len(self.times) + 1)
else:
return number * self.default_frame_duration
def get_frame_number(self, timestamp):
if (not self.times or self.times[-1] < timestamp) and self.default_frame_duration:
return int((timestamp - sum(self.times)) / self.default_frame_duration)
return bisect.bisect_left(self.times, timestamp)
def get_frame_size(self, timestamp):
try:
number = bisect.bisect_left(self.times, timestamp)
except:
return self.default_frame_duration
c = self.get_frame_time(number)
if number == len(self.times):
p = self.get_frame_time(number - 1)
return c - p
else:
n = self.get_frame_time(number + 1)
return n - c
@classmethod
def _convert_v1_to_v2(cls, default_fps, overrides):
# start, end, fps
overrides = [(int(x[0]), int(x[1]), float(x[2])) for x in overrides]
if not overrides:
return []
fps = [default_fps] * (overrides[-1][1] + 1)
for o in overrides:
fps[o[0]:o[1] + 1] = [o[2]] * (o[1] - o[0] + 1)
v2 = [0]
for d in (1.0 / f for f in fps):
v2.append(v2[-1] + d)
return v2
@classmethod
def parse(cls, text):
lines = text.splitlines()
if not lines:
return []
first = lines[0].lower().lstrip()
if first.startswith('# timecode format v2') or first.startswith('# timestamp format v2'):
tcs = [float(x) / 1000.0 for x in lines[1:]]
return Timecodes(tcs, None)
elif first.startswith('# timecode format v1'):
default = float(lines[1].lower().replace('assume ', ""))
overrides = (x.split(',') for x in lines[2:])
return Timecodes(cls._convert_v1_to_v2(default, overrides), default)
else:
raise SushiError('This timecodes format is not supported')
@classmethod
def from_file(cls, path):
with open(path) as file:
return cls.parse(file.read())
@classmethod
def cfr(cls, fps):
class CfrTimecodes(object):
def __init__(self, fps):
self.frame_duration = 1.0 / fps
def get_frame_time(self, number):
return number * self.frame_duration
def get_frame_size(self, timestamp):
return self.frame_duration
def get_frame_number(self, timestamp):
return int(timestamp / self.frame_duration)
return CfrTimecodes(fps)
class Demuxer(object):
def __init__(self, path):
super(Demuxer, self).__init__()
self._path = path
self._is_wav = get_extension(self._path) == '.wav'
self._mi = None if self._is_wav else FFmpeg.get_media_info(self._path)
self._demux_audio = self._demux_subs = self._make_timecodes = self._make_keyframes = self._write_chapters = False
@property
def is_wav(self):
return self._is_wav
@property
def path(self):
return self._path
@property
def chapters(self):
if self.is_wav:
return []
return self._mi.chapters
@property
def has_video(self):
return not self.is_wav and self._mi.video
def set_audio(self, stream_idx, output_path, sample_rate):
self._audio_stream = self._select_stream(self._mi.audio, stream_idx, 'audio')
self._audio_output_path = output_path
self._audio_sample_rate = sample_rate
self._demux_audio = True
def set_script(self, stream_idx, output_path):
self._script_stream = self._select_stream(self._mi.subtitles, stream_idx, 'subtitles')
self._script_output_path = output_path
self._demux_subs = True
def set_timecodes(self, output_path):
self._timecodes_output_path = output_path
self._make_timecodes = True
def set_chapters(self, output_path):
self._write_chapters = True
self._chapters_output_path = output_path
def set_keyframes(self, output_path):
self._keyframes_output_path = output_path
self._make_keyframes = True
def get_subs_type(self, stream_idx):
return self._select_stream(self._mi.subtitles, stream_idx, 'subtitles').type
def demux(self):
if self._write_chapters:
with open(self._chapters_output_path, "w") as output_file:
output_file.write(chapters.format_ogm_chapters(self.chapters))
if self._make_keyframes:
SCXviD.make_keyframes(self._path, self._keyframes_output_path)
ffargs = {}
if self._demux_audio:
ffargs['audio_stream'] = self._audio_stream.id
ffargs['audio_path'] = self._audio_output_path
ffargs['audio_rate'] = self._audio_sample_rate
if self._demux_subs:
ffargs['script_stream'] = self._script_stream.id
ffargs['script_path'] = self._script_output_path
if self._make_timecodes:
def set_ffmpeg_timecodes():
ffargs['video_stream'] = self._mi.video[0].id
ffargs['timecodes_path'] = self._timecodes_output_path
if get_extension(self._path).lower() == '.mkv':
try:
MkvToolnix.extract_timecodes(self._path,
stream_idx=self._mi.video[0].id,
output_path=self._timecodes_output_path)
except OSError as e:
if e.errno == 2:
set_ffmpeg_timecodes()
else:
raise
else:
set_ffmpeg_timecodes()
if ffargs:
FFmpeg.demux_file(self._path, **ffargs)
def cleanup(self):
if self._demux_audio:
os.remove(self._audio_output_path)
if self._demux_subs:
os.remove(self._script_output_path)
if self._make_timecodes:
os.remove(self._timecodes_output_path)
if self._write_chapters:
os.remove(self._chapters_output_path)
@classmethod
def _format_stream(cls, stream):
return '{0}{1}: {2}'.format(stream.id, ' (%s)' % stream.title if stream.title else '', stream.info)
@classmethod
def _format_streams_list(cls, streams):
return '\n'.join(map(cls._format_stream, streams))
def _select_stream(self, streams, chosen_idx, name):
if not streams:
raise SushiError('No {0} streams found in {1}'.format(name, self._path))
if chosen_idx is None:
if len(streams) > 1:
default_track = next((s for s in streams if s.default), None)
if default_track:
logging.warning('Using default track {0} in {1} because there are multiple candidates'
.format(self._format_stream(default_track), self._path))
return default_track
raise SushiError('More than one {0} stream found in {1}.'
'You need to specify the exact one to demux. Here are all candidates:\n'
'{2}'.format(name, self._path, self._format_streams_list(streams)))
return streams[0]
try:
return next(x for x in streams if x.id == chosen_idx)
except StopIteration:
raise SushiError("Stream with index {0} doesn't exist in {1}.\n"
"Here are all that do:\n"
"{2}".format(chosen_idx, self._path, self._format_streams_list(streams)))
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import json
import unittest
from datetime import datetime
from unittest.mock import Mock, patch
from tests.integration_tests.test_app import app
from superset import db, security_manager
from superset.connectors.druid.views import (
Druid,
DruidClusterModelView,
DruidColumnInlineView,
DruidDatasourceModelView,
DruidMetricInlineView,
)
from .base_tests import SupersetTestCase
try:
from superset.connectors.druid.models import (
DruidCluster,
DruidColumn,
DruidDatasource,
DruidMetric,
)
except ImportError:
pass
class PickableMock(Mock):
def __reduce__(self):
return (Mock, ())
SEGMENT_METADATA = [
{
"id": "some_id",
"intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
"columns": {
"__time": {
"type": "LONG",
"hasMultipleValues": False,
"size": 407240380,
"cardinality": None,
"errorMessage": None,
},
"dim1": {
"type": "STRING",
"hasMultipleValues": False,
"size": 100000,
"cardinality": 1944,
"errorMessage": None,
},
"dim2": {
"type": "STRING",
"hasMultipleValues": True,
"size": 100000,
"cardinality": 1504,
"errorMessage": None,
},
"metric1": {
"type": "FLOAT",
"hasMultipleValues": False,
"size": 100000,
"cardinality": None,
"errorMessage": None,
},
},
"aggregators": {
"metric1": {"type": "longSum", "name": "metric1", "fieldName": "metric1"}
},
"size": 300000,
"numRows": 5000000,
}
]
GB_RESULT_SET = [
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {"dim1": "Canada", "dim2": "boy", "count": 12345678},
},
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {"dim1": "USA", "dim2": "girl", "count": 12345678 / 2},
},
]
DruidCluster.get_druid_version = lambda _: "0.9.1" # type: ignore
class TestDruid(SupersetTestCase):
"""Testing interactions with Druid"""
@classmethod
def setUpClass(cls):
cls.create_druid_test_objects()
def get_test_cluster_obj(self):
return DruidCluster(
cluster_name="test_cluster",
broker_host="localhost",
broker_port=7980,
broker_endpoint="druid/v2",
metadata_last_refreshed=datetime.now(),
)
def get_cluster(self, PyDruid):
instance = PyDruid.return_value
instance.time_boundary.return_value = [{"result": {"maxTime": "2016-01-01"}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name="test_cluster")
.first()
)
if cluster:
for datasource in (
db.session.query(DruidDatasource).filter_by(cluster_id=cluster.id).all()
):
db.session.delete(datasource)
db.session.delete(cluster)
db.session.commit()
cluster = self.get_test_cluster_obj()
db.session.add(cluster)
cluster.get_datasources = PickableMock(return_value=["test_datasource"])
return cluster
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_client(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
cluster.refresh_datasources(merge_flag=True)
datasource_id = cluster.datasources[0].id
db.session.commit()
nres = [
list(v["event"].items()) + [("timestamp", v["timestamp"])]
for v in GB_RESULT_SET
]
nres = [dict(v) for v in nres]
import pandas as pd
df = pd.DataFrame(nres)
instance = PyDruid.return_value
instance.export_pandas.return_value = df
instance.query_dict = {}
instance.query_builder.last_query.query_dict = {}
resp = self.get_resp("/superset/explore/druid/{}/".format(datasource_id))
self.assertIn("test_datasource", resp)
form_data = {
"viz_type": "table",
"granularity": "one+day",
"druid_time_origin": "",
"since": "7 days ago",
"until": "now",
"row_limit": 5000,
"include_search": "false",
"metrics": ["count"],
"groupby": ["dim1"],
"force": "true",
}
# One groupby
url = "/superset/explore_json/druid/{}/".format(datasource_id)
resp = self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual("Canada", resp["data"]["records"][0]["dim1"])
form_data = {
"viz_type": "table",
"granularity": "one+day",
"druid_time_origin": "",
"since": "7 days ago",
"until": "now",
"row_limit": 5000,
"include_search": "false",
"metrics": ["count"],
"groupby": ["dim1", "dim2"],
"force": "true",
}
# two groupby
url = "/superset/explore_json/druid/{}/".format(datasource_id)
resp = self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual("Canada", resp["data"]["records"][0]["dim1"])
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_druid_sync_from_config(self):
CLUSTER_NAME = "new_druid"
self.login()
cluster = self.get_or_create(
DruidCluster, {"cluster_name": CLUSTER_NAME}, db.session
)
db.session.merge(cluster)
db.session.commit()
ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.first()
)
if ds:
db.session.delete(ds)
db.session.commit()
cfg = {
"user": "admin",
"cluster": CLUSTER_NAME,
"config": {
"name": "test_click",
"dimensions": ["affiliate_id", "campaign", "first_seen"],
"metrics_spec": [
{"type": "count", "name": "count"},
{"type": "sum", "name": "sum"},
],
"batch_ingestion": {
"sql": "SELECT * FROM clicks WHERE d='{{ ds }}'",
"ts_column": "d",
"sources": [{"table": "clicks", "partition": "d='{{ ds }}'"}],
},
},
}
def check():
resp = self.client.post("/superset/sync_druid/", data=json.dumps(cfg))
druid_ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.one()
)
col_names = set([c.column_name for c in druid_ds.columns])
assert {"affiliate_id", "campaign", "first_seen"} == col_names
metric_names = {m.metric_name for m in druid_ds.metrics}
assert {"count", "sum"} == metric_names
assert resp.status_code == 201
check()
# checking twice to make sure a second sync yields the same results
check()
# datasource exists, add new metrics and dimensions
cfg = {
"user": "admin",
"cluster": CLUSTER_NAME,
"config": {
"name": "test_click",
"dimensions": ["affiliate_id", "second_seen"],
"metrics_spec": [
{"type": "bla", "name": "sum"},
{"type": "unique", "name": "unique"},
],
},
}
resp = self.client.post("/superset/sync_druid/", data=json.dumps(cfg))
druid_ds = (
db.session.query(DruidDatasource)
.filter_by(datasource_name="test_click")
.one()
)
# columns and metrics are not deleted if config is changed as
# user could define his own dimensions / metrics and want to keep them
assert set([c.column_name for c in druid_ds.columns]) == set(
["affiliate_id", "campaign", "first_seen", "second_seen"]
)
assert set([m.metric_name for m in druid_ds.metrics]) == set(
["count", "sum", "unique"]
)
# metric type will not be overridden, sum stays instead of bla
assert set([m.metric_type for m in druid_ds.metrics]) == set(
["longSum", "sum", "unique"]
)
assert resp.status_code == 201
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@unittest.skipUnless(app.config["DRUID_IS_ACTIVE"], "DRUID_IS_ACTIVE is false")
def test_filter_druid_datasource(self):
CLUSTER_NAME = "new_druid"
cluster = self.get_or_create(
DruidCluster, {"cluster_name": CLUSTER_NAME}, db.session
)
db.session.merge(cluster)
gamma_ds = self.get_or_create(
DruidDatasource,
{"datasource_name": "datasource_for_gamma", "cluster": cluster},
db.session,
)
gamma_ds.cluster = cluster
db.session.merge(gamma_ds)
no_gamma_ds = self.get_or_create(
DruidDatasource,
{"datasource_name": "datasource_not_for_gamma", "cluster": cluster},
db.session,
)
no_gamma_ds.cluster = cluster
db.session.merge(no_gamma_ds)
db.session.commit()
security_manager.add_permission_view_menu("datasource_access", gamma_ds.perm)
security_manager.add_permission_view_menu("datasource_access", no_gamma_ds.perm)
perm = security_manager.find_permission_view_menu(
"datasource_access", gamma_ds.get_perm()
)
security_manager.add_permission_role(security_manager.find_role("Gamma"), perm)
security_manager.get_session.commit()
self.login(username="gamma")
url = "/druiddatasourcemodelview/list/"
resp = self.get_resp(url)
self.assertIn("datasource_for_gamma", resp)
self.assertNotIn("datasource_not_for_gamma", resp)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_sync_druid_perm(self, PyDruid):
self.login(username="admin")
instance = PyDruid.return_value
instance.time_boundary.return_value = [{"result": {"maxTime": "2016-01-01"}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name="test_cluster")
.first()
)
if cluster:
for datasource in (
db.session.query(DruidDatasource).filter_by(cluster_id=cluster.id).all()
):
db.session.delete(datasource)
db.session.delete(cluster)
db.session.commit()
cluster = DruidCluster(
cluster_name="test_cluster",
broker_host="localhost",
broker_port=7980,
metadata_last_refreshed=datetime.now(),
)
db.session.add(cluster)
cluster.get_datasources = PickableMock(return_value=["test_datasource"])
cluster.refresh_datasources()
cluster.datasources[0].merge_flag = True
metadata = cluster.datasources[0].latest_metadata()
self.assertEqual(len(metadata), 4)
db.session.commit()
view_menu_name = cluster.datasources[0].get_perm()
view_menu = security_manager.find_view_menu(view_menu_name)
permission = security_manager.find_permission("datasource_access")
pv = (
security_manager.get_session.query(security_manager.permissionview_model)
.filter_by(permission=permission, view_menu=view_menu)
.first()
)
assert pv is not None
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
cols = db.session.query(DruidColumn).filter(
DruidColumn.datasource_id == datasource.id
)
for col in cols:
self.assertIn(col.column_name, SEGMENT_METADATA[0]["columns"].keys())
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
agg, _ = metric.metric_name.split("__")
self.assertEqual(
json.loads(metric.json)["type"], "double{}".format(agg.capitalize())
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata_augment_type(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
metadata = SEGMENT_METADATA[:]
metadata[0]["columns"]["metric1"]["type"] = "LONG"
instance = PyDruid.return_value
instance.segment_metadata.return_value = metadata
cluster.refresh_datasources()
datasource = cluster.datasources[0]
column = (
db.session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name == "metric1")
).one()
self.assertEqual(column.type, "LONG")
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
agg, _ = metric.metric_name.split("__")
self.assertEqual(metric.json_obj["type"], "long{}".format(agg.capitalize()))
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_refresh_metadata_augment_verbose_name(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
metric.verbose_name = metric.metric_name
db.session.commit()
# The verbose name should not change during a refresh.
cluster.refresh_datasources()
datasource = cluster.datasources[0]
metrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.like("%__metric1"))
)
for metric in metrics:
self.assertEqual(metric.verbose_name, metric.metric_name)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
def test_urls(self):
cluster = self.get_test_cluster_obj()
self.assertEqual(
cluster.get_base_url("localhost", "9999"), "http://localhost:9999"
)
self.assertEqual(
cluster.get_base_url("http://localhost", "9999"), "http://localhost:9999"
)
self.assertEqual(
cluster.get_base_url("https://localhost", "9999"), "https://localhost:9999"
)
self.assertEqual(
cluster.get_base_broker_url(), "http://localhost:7980/druid/v2"
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_druid_time_granularities(self, PyDruid):
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
cluster.refresh_datasources(merge_flag=True)
datasource_id = cluster.datasources[0].id
db.session.commit()
nres = [
list(v["event"].items()) + [("timestamp", v["timestamp"])]
for v in GB_RESULT_SET
]
nres = [dict(v) for v in nres]
import pandas as pd
df = pd.DataFrame(nres)
instance = PyDruid.return_value
instance.export_pandas.return_value = df
instance.query_dict = {}
instance.query_builder.last_query.query_dict = {}
form_data = {
"viz_type": "table",
"since": "7 days ago",
"until": "now",
"metrics": ["count"],
"groupby": [],
"include_time": "true",
}
granularity_map = {
"5 seconds": "PT5S",
"30 seconds": "PT30S",
"1 minute": "PT1M",
"5 minutes": "PT5M",
"1 hour": "PT1H",
"6 hour": "PT6H",
"one day": "P1D",
"1 day": "P1D",
"7 days": "P7D",
"week": "P1W",
"week_starting_sunday": "P1W",
"week_ending_saturday": "P1W",
"month": "P1M",
"quarter": "P3M",
"year": "P1Y",
}
url = "/superset/explore_json/druid/{}/".format(datasource_id)
for granularity_mapping in granularity_map:
form_data["granularity"] = granularity_mapping
self.get_json_resp(url, {"form_data": json.dumps(form_data)})
self.assertEqual(
granularity_map[granularity_mapping],
instance.timeseries.call_args[1]["granularity"]["period"],
)
@unittest.skipUnless(
SupersetTestCase.is_module_installed("pydruid"), "pydruid not installed"
)
@patch("superset.connectors.druid.models.PyDruid")
def test_external_metadata(self, PyDruid):
self.login(username="admin")
self.login(username="admin")
cluster = self.get_cluster(PyDruid)
cluster.refresh_datasources()
datasource = cluster.datasources[0]
url = "/datasource/external_metadata/druid/{}/".format(datasource.id)
resp = self.get_json_resp(url)
col_names = {o.get("name") for o in resp}
self.assertEqual(col_names, {"__time", "dim1", "dim2", "metric1"})
class TestDruidViewEnabling(SupersetTestCase):
def test_druid_disabled(self):
with patch.object(Druid, "is_enabled", return_value=False):
self.login("admin")
uri = "/druid/refresh_datasources/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_druid_enabled(self):
with patch.object(Druid, "is_enabled", return_value=True):
self.login("admin")
uri = "/druid/refresh_datasources/"
rv = self.client.get(uri)
self.assertLess(rv.status_code, 400)
def test_druid_cluster_disabled(self):
with patch.object(DruidClusterModelView, "is_enabled", return_value=False):
self.login("admin")
uri = "/druidclustermodelview/list/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_druid_cluster_enabled(self):
with patch.object(DruidClusterModelView, "is_enabled", return_value=True):
self.login("admin")
uri = "/druidclustermodelview/list/"
rv = self.client.get(uri)
self.assertLess(rv.status_code, 400)
def test_druid_column_disabled(self):
with patch.object(DruidColumnInlineView, "is_enabled", return_value=False):
self.login("admin")
uri = "/druidcolumninlineview/list/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_druid_column_enabled(self):
with patch.object(DruidColumnInlineView, "is_enabled", return_value=True):
self.login("admin")
uri = "/druidcolumninlineview/list/"
rv = self.client.get(uri)
self.assertLess(rv.status_code, 400)
def test_druid_datasource_disabled(self):
with patch.object(DruidDatasourceModelView, "is_enabled", return_value=False):
self.login("admin")
uri = "/druiddatasourcemodelview/list/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_druid_datasource_enabled(self):
with patch.object(DruidDatasourceModelView, "is_enabled", return_value=True):
self.login("admin")
uri = "/druiddatasourcemodelview/list/"
rv = self.client.get(uri)
self.assertLess(rv.status_code, 400)
def test_druid_metric_disabled(self):
with patch.object(DruidMetricInlineView, "is_enabled", return_value=False):
self.login("admin")
uri = "/druidmetricinlineview/list/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_druid_metric_enabled(self):
with patch.object(DruidMetricInlineView, "is_enabled", return_value=True):
self.login("admin")
uri = "/druidmetricinlineview/list/"
rv = self.client.get(uri)
self.assertLess(rv.status_code, 400)
if __name__ == "__main__":
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Starting point for routing EC2 requests.
"""
import urlparse
from eventlet.green import httplib
from oslo.config import cfg
import webob
import webob.dec
import webob.exc
from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
from nova import context
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import memorycache
from nova.openstack.common import timeutils
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
ec2_opts = [
cfg.IntOpt('lockout_attempts',
default=5,
help='Number of failed auths before lockout.'),
cfg.IntOpt('lockout_minutes',
default=15,
help='Number of minutes to lockout if triggered.'),
cfg.IntOpt('lockout_window',
default=15,
help='Number of minutes for lockout window.'),
cfg.StrOpt('keystone_ec2_url',
default='http://localhost:5000/v2.0/ec2tokens',
help='URL to get token from ec2 request.'),
cfg.BoolOpt('ec2_private_dns_show_ip',
default=False,
help='Return the IP address as private dns hostname in '
'describe instances'),
cfg.BoolOpt('ec2_strict_validation',
default=True,
help='Validate security group names'
' according to EC2 specification'),
cfg.IntOpt('ec2_timestamp_expiry',
default=300,
help='Time in seconds before ec2 timestamp expires'),
]
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
## Fault Wrapper around all EC2 requests ##
class FaultWrapper(wsgi.Middleware):
"""Calls the middleware stack, captures any exceptions into faults."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
except Exception as ex:
LOG.exception(_("FaultWrapper: %s"), unicode(ex))
return faults.Fault(webob.exc.HTTPInternalServerError())
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
start = timeutils.utcnow()
rv = req.get_response(self.application)
self.log_request_completion(rv, req, start)
return rv
def log_request_completion(self, response, request, start):
apireq = request.environ.get('ec2.request', None)
if apireq:
controller = apireq.controller
action = apireq.action
else:
controller = None
action = None
ctxt = request.environ.get('nova.context', None)
delta = timeutils.utcnow() - start
seconds = delta.seconds
microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
"%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
request.user_agent,
request.content_type,
response.content_type,
context=ctxt)
class Lockout(wsgi.Middleware):
"""Lockout for x minutes on y failed auths in a z minute period.
x = lockout_timeout flag
y = lockout_window flag
z = lockout_attempts flag
Uses memcached if lockout_memcached_servers flag is set, otherwise it
uses a very simple in-process cache. Due to the simplicity of
the implementation, the timeout window is started with the first
failed request, so it will block if there are x failed logins within
that period.
There is a possible race condition where simultaneous requests could
sneak in before the lockout hits, but this is extremely rare and would
only result in a couple of extra failed attempts.
"""
def __init__(self, application):
"""middleware can use fake for testing."""
self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= CONF.lockout_attempts:
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
failures = self.mc.incr(failures_key)
if failures is None:
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=CONF.lockout_window * 60)
elif failures >= CONF.lockout_attempts:
LOG.warn(_('Access key %(access_key)s has had %(failures)d '
'failed authentications and will be locked out '
'for %(lock_mins)d minutes.'),
{'access_key': access_key,
'failures': failures,
'lock_mins': CONF.lockout_minutes})
self.mc.set(failures_key, str(failures),
time=CONF.lockout_minutes * 60)
return res
class EC2KeystoneAuth(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to context."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
request_id = context.generate_request_id()
signature = req.params.get('Signature')
if not signature:
msg = _("Signature not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
access = req.params.get('AWSAccessKeyId')
if not access:
msg = _("Access key not provided")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# Not part of authentication args
auth_params.pop('Signature')
cred_dict = {
'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
}
if "ec2" in CONF.keystone_ec2_url:
creds = {'ec2Credentials': cred_dict}
else:
creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}}
creds_json = jsonutils.dumps(creds)
headers = {'Content-Type': 'application/json'}
o = urlparse.urlparse(CONF.keystone_ec2_url)
if o.scheme == "http":
conn = httplib.HTTPConnection(o.netloc)
else:
conn = httplib.HTTPSConnection(o.netloc)
conn.request('POST', o.path, body=creds_json, headers=headers)
response = conn.getresponse()
data = response.read()
if response.status != 200:
if response.status == 401:
msg = response.reason
else:
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=response.status)
result = jsonutils.loads(data)
conn.close()
try:
token_id = result['access']['token']['id']
user_id = result['access']['user']['id']
project_id = result['access']['token']['tenant']['id']
user_name = result['access']['user'].get('name')
project_name = result['access']['token']['tenant'].get('name')
roles = [role['name'] for role
in result['access']['user']['roles']]
except (AttributeError, KeyError) as e:
LOG.exception(_("Keystone failure: %s") % e)
msg = _("Failure communicating with keystone")
return faults.ec2_error_response(request_id, "AuthFailure", msg,
status=400)
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For',
remote_address)
catalog = result['access']['serviceCatalog']
ctxt = context.RequestContext(user_id,
project_id,
user_name=user_name,
project_name=project_name,
roles=roles,
auth_token=token_id,
remote_address=remote_address,
service_catalog=catalog)
req.environ['nova.context'] = ctxt
return self.application
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if 'AWSAccessKeyId' not in req.params:
raise webob.exc.HTTPBadRequest()
user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':')
project_id = project_id or user_id
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
ctx = context.RequestContext(user_id,
project_id,
is_admin=True,
remote_address=remote_address)
req.environ['nova.context'] = ctx
return self.application
class Requestify(wsgi.Middleware):
def __init__(self, app, controller):
super(Requestify, self).__init__(app)
self.controller = importutils.import_object(controller)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
args = dict(req.params)
try:
expired = ec2utils.is_ec2_timestamp_expired(req.params,
expires=CONF.ec2_timestamp_expiry)
if expired:
msg = _("Timestamp failed validation.")
LOG.exception(msg)
raise webob.exc.HTTPForbidden(detail=msg)
# Raise KeyError if omitted
action = req.params['Action']
# Fix bug lp:720157 for older (version 1) clients
version = req.params['SignatureVersion']
if int(version) == 1:
non_args.remove('SignatureMethod')
if 'SignatureMethod' in args:
args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
except KeyError:
raise webob.exc.HTTPBadRequest()
except exception.InvalidRequest as err:
raise webob.exc.HTTPBadRequest(explanation=unicode(err))
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %(key)s\t\tval: %(value)s'),
{'key': key, 'value': value})
# Success!
api_request = apirequest.APIRequest(self.controller, action,
req.params['Version'], args)
req.environ['ec2.request'] = api_request
return self.application
class Authorizer(wsgi.Middleware):
"""Authorize an EC2 API request.
Return a 401 if ec2.controller and ec2.action in WSGI environ may not be
executed in nova.context.
"""
def __init__(self, application):
super(Authorizer, self).__init__(application)
self.action_roles = {
'CloudController': {
'DescribeAvailabilityZones': ['all'],
'DescribeRegions': ['all'],
'DescribeSnapshots': ['all'],
'DescribeKeyPairs': ['all'],
'CreateKeyPair': ['all'],
'DeleteKeyPair': ['all'],
'DescribeSecurityGroups': ['all'],
'ImportKeyPair': ['all'],
'AuthorizeSecurityGroupIngress': ['netadmin'],
'RevokeSecurityGroupIngress': ['netadmin'],
'CreateSecurityGroup': ['netadmin'],
'DeleteSecurityGroup': ['netadmin'],
'GetConsoleOutput': ['projectmanager', 'sysadmin'],
'DescribeVolumes': ['projectmanager', 'sysadmin'],
'CreateVolume': ['projectmanager', 'sysadmin'],
'AttachVolume': ['projectmanager', 'sysadmin'],
'DetachVolume': ['projectmanager', 'sysadmin'],
'DescribeInstances': ['all'],
'DescribeAddresses': ['all'],
'AllocateAddress': ['netadmin'],
'ReleaseAddress': ['netadmin'],
'AssociateAddress': ['netadmin'],
'DisassociateAddress': ['netadmin'],
'RunInstances': ['projectmanager', 'sysadmin'],
'TerminateInstances': ['projectmanager', 'sysadmin'],
'RebootInstances': ['projectmanager', 'sysadmin'],
'UpdateInstance': ['projectmanager', 'sysadmin'],
'StartInstances': ['projectmanager', 'sysadmin'],
'StopInstances': ['projectmanager', 'sysadmin'],
'DeleteVolume': ['projectmanager', 'sysadmin'],
'DescribeImages': ['all'],
'DeregisterImage': ['projectmanager', 'sysadmin'],
'RegisterImage': ['projectmanager', 'sysadmin'],
'DescribeImageAttribute': ['all'],
'ModifyImageAttribute': ['projectmanager', 'sysadmin'],
'UpdateImage': ['projectmanager', 'sysadmin'],
'CreateImage': ['projectmanager', 'sysadmin'],
},
'AdminController': {
# All actions have the same permission: ['none'] (the default)
# superusers will be allowed to run them
# all others will get HTTPUnauthorized.
},
}
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
action = req.environ['ec2.request'].action
allowed_roles = self.action_roles[controller].get(action, ['none'])
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s'),
{'controller': controller, 'action': action},
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
"""Return True if any role in roles is allowed in context."""
if context.is_admin:
return True
if 'all' in roles:
return True
if 'none' in roles:
return False
return any(role in context.roles for role in roles)
class Validator(wsgi.Middleware):
def validate_ec2_id(val):
if not validator.validate_str()(val):
return False
try:
ec2utils.ec2_id_to_id(val)
except exception.InvalidEc2Id:
return False
return True
validator.validate_ec2_id = validate_ec2_id
validator.DEFAULT_VALIDATOR = {
'instance_id': validator.validate_ec2_id,
'volume_id': validator.validate_ec2_id,
'image_id': validator.validate_ec2_id,
'attribute': validator.validate_str(),
'image_location': validator.validate_image_path,
'public_ip': utils.is_valid_ipv4,
'region_name': validator.validate_str(),
'group_name': validator.validate_str(max_length=255),
'group_description': validator.validate_str(max_length=255),
'size': validator.validate_int(),
'user_data': validator.validate_user_data
}
def __init__(self, application):
super(Validator, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if validator.validate(req.environ['ec2.request'].args,
validator.DEFAULT_VALIDATOR):
return self.application
else:
raise webob.exc.HTTPBadRequest()
def exception_to_ec2code(ex):
"""Helper to extract EC2 error code from exception.
For other than EC2 exceptions (those without ec2_code attribute),
use exception name.
"""
if hasattr(ex, 'ec2_code'):
code = ex.ec2_code
else:
code = type(ex).__name__
return code
def ec2_error_ex(ex, req, code=None, message=None, unexpected=False):
"""
Return an EC2 error response based on passed exception and log
the exception on an appropriate log level:
* DEBUG: expected errors
* ERROR: unexpected errors
All expected errors are treated as client errors and 4xx HTTP
status codes are always returned for them.
Unexpected 5xx errors may contain sensitive information,
supress their messages for security.
"""
if not code:
code = exception_to_ec2code(ex)
status = getattr(ex, 'code', None)
if not status:
status = 500
if unexpected:
log_fun = LOG.error
if ex.args and status < 500:
log_msg = _("Unexpected %(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("Unexpected %(ex_name)s raised")
else:
log_fun = LOG.debug
if ex.args:
log_msg = _("%(ex_name)s raised: %(ex_str)s")
else:
log_msg = _("%(ex_name)s raised")
# NOTE(jruzicka): For compatibility with EC2 API, treat expected
# exceptions as client (4xx) errors. The exception error code is 500
# by default and most exceptions inherit this from NovaException even
# though they are actually client errors in most cases.
if status >= 500:
status = 400
context = req.environ['nova.context']
request_id = context.request_id
log_msg_args = {
'ex_name': type(ex).__name__,
'ex_str': unicode(ex)
}
log_fun(log_msg % log_msg_args, context=context)
if ex.args and not message and (not unexpected or status < 500):
message = unicode(ex.args[0])
if unexpected:
# Log filtered environment for unexpected errors.
env = req.environ.copy()
for k in env.keys():
if not isinstance(env[k], basestring):
env.pop(k)
log_fun(_('Environment: %s') % jsonutils.dumps(env))
if not message:
message = _('Unknown error occurred.')
return faults.ec2_error_response(request_id, code, message, status=status)
class Executor(wsgi.Application):
"""Execute an EC2 API request.
Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and
'ec2.action_args' (all variables in WSGI environ.) Returns an XML
response, or a 400 upon failure.
"""
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['nova.context']
api_request = req.environ['ec2.request']
result = None
try:
result = api_request.invoke(context)
except exception.InstanceNotFound as ex:
ec2_id = ec2utils.id_to_ec2_inst_id(ex.kwargs['instance_id'])
message = ex.msg_fmt % {'instance_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.VolumeNotFound as ex:
ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id'])
message = ex.msg_fmt % {'volume_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except exception.SnapshotNotFound as ex:
ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id'])
message = ex.msg_fmt % {'snapshot_id': ec2_id}
return ec2_error_ex(ex, req, message=message)
except (exception.CannotDisassociateAutoAssignedFloatingIP,
exception.FloatingIpAssociated,
exception.FloatingIpNotFound,
exception.ImageNotActive,
exception.InvalidInstanceIDMalformed,
exception.InvalidKeypair,
exception.InvalidParameterValue,
exception.InvalidPortRange,
exception.InvalidVolume,
exception.KeyPairExists,
exception.KeypairNotFound,
exception.MissingParameter,
exception.NoFloatingIpInterface,
exception.NoMoreFixedIps,
exception.NotAuthorized,
exception.QuotaError,
exception.QuotaError,
exception.SecurityGroupExists,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupLimitExceeded,
exception.SecurityGroupRuleExists,
exception.VolumeUnattached,
# Following aren't translated to valid EC2 errors.
exception.ImageNotFound,
exception.ImageNotFoundEC2,
exception.InvalidAttribute,
exception.InvalidRequest,
exception.NotFound) as ex:
return ec2_error_ex(ex, req)
except Exception as ex:
return ec2_error_ex(ex, req, unexpected=True)
else:
resp = webob.Response()
resp.status = 200
resp.headers['Content-Type'] = 'text/xml'
resp.body = str(result)
return resp
|
|
#!/usr/bin/env python
# encoding: utf-8
class Stack(object):
"""Docstring for Stack """
def __init__(self):
"""@todo: to be defined1 """
self.items = []
def pushFromList(self, list):
"""Push the list in the stack
:param list: a list
"""
for i in list[::-1]:
self.push(i)
def isEmpty(self):
""" Says if the stack is empty
:returns: @todo
"""
return self.items == []
def push(self, item):
"""Push an item in the stack
:param item: @todo
:returns: @todo
"""
self.items.append(item)
def pop(self):
"""Getting the last item and remove it
:returns: last item
"""
return self.items.pop()
def peek(self, posi = 0):
"""Getting the last item
:param posi: which item to peek 0 (last) 1 (the onebefore the last)...
:returns: the item
"""
return self.items[-1 - posi]
def __len__(self):
return len(self.items)
def __str__(self):
return str(self.items) + " -> "
def __add__(self, addList):
return self.items + addList
def flatten_list(a, result=None):
"""Flattens a nested list.
>>> flatten_list([ [1, 2, [3, 4] ], [5, 6], 7])
[1, 2, 3, 4, 5, 6, 7]
"""
if result is None:
result = []
for x in a:
if isinstance(x, list):
flatten_list(x, result)
else:
result.append(x)
return result
def first_elem(ll):
"""Get the first element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the first element
>>> first_elem(1)
1
>>> first_elem([1,2])
1
>>> first_elem([["abc"]])
'a'
>>> first_elem("abc")
'a'
>>> first_elem([[[1,2],[3,4]], [5,6]])
1
>>> first_elem([[["ab",2],[3,4]], [5,6]])
'a'
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[0]
else:
return first_elem(ll[0])
else:
return ll
def last_elem(ll):
"""Get the last element in imbricates lists
# TODO: Fonction pourrie mais j'ai pas le temps de faire mieux! |mar. janv. 28 22:32:22 CET 2014
:param list: list of lists of lists...
:returns: the last element
>>> last_elem(1)
1
>>> last_elem([1,2])
2
>>> last_elem([["abc"]])
'c'
>>> last_elem("abc")
'c'
>>> last_elem([[[1,2],[3,4]], [5,6]])
6
>>> last_elem([[["ab",2],[3,4]], [5,6]])
6
"""
if hasattr(ll, '__contains__'):
if len(ll) == 1 and type(ll) == str:
return ll[-1]
else:
return last_elem(ll[-1])
else:
return ll
def expand_list(list_list):
"""Expand list of list
>>> expand_list([1,2,[3,4],5,[6,7,8]])
[[1, 2, 3, 5, 6], [1, 2, 4, 5, 7], [1, 2, 4, 5, 8]]
>>> expand_list([1,2,4,5,6,7,8])
[[1, 2, 4, 5, 6, 7, 8]]
"""
list_in_list = [i for i in list_list if type(i) == list].copy()
try:
nbr_ans_list = max([len(i) for i in list_in_list])
ans = [list_list.copy() for i in range(nbr_ans_list)]
for (i,l) in enumerate(ans):
for (j,e) in enumerate(l):
if type(e) == list:
ans[i][j] = e[min(i,len(e)-1)]
# S'il n'y a pas de liste dans la liste (2e exemple)
except ValueError:
ans = [list_list]
return ans
def add_in_dict(dict1, dict2):
"""Merge dictionary keys and add the content from dict1 and dict2
:param dict1: first dictionary
:param dict2: second dictionary
:returns: merged and added dictionary
>>> add_in_dict({'a':1, 'b':2}, {'c':3, 'd': 4}) == {'d': 4, 'a': 1, 'c': 3, 'b': 2}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'b': 4}) == {'a': 4, 'b': 6}
True
>>> add_in_dict({'a':1, 'b':2}, {'a':3, 'c': 4}) == {'a': 4, 'b': 2, 'c': 4}
True
"""
new_dict = {}
new_dict.update(dict1)
for (k,v) in dict2.items():
if k in new_dict.keys():
new_dict[k] += v
else:
new_dict[k] = v
return new_dict
def remove_in_dict(d, value = 0):
""" In a dictionary, remove keys which have certain value
:param d: the dictionary
:param value: value to remove
:returns: new dictionary whithout unwanted value
>>> remove_in_dict({'b': 1, 'a': 0}) == {'b': 1}
True
>>> remove_in_dict({'b': 1, 'a': 0}, 1) == {'a': 0}
True
"""
new_dict = {}
for (k,v) in d.items():
if v != value:
new_dict[k] = v
return new_dict
def convolution_dict(D1, D2, op = lambda x,y:x*y,\
op_key = lambda x,y: x + y, \
commutative = True, op_twice = lambda x,y: x + y):
"""Convolution of two dictionaries
:param D1: First dictionary
:param D2: Second dictionary
:param op: Operation of perform in value
:param commutative: keys are commutative?
:param op_twice: operation on value if the key appear twice
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "":4}) == {"aa":2, "a": 4, "ba":6, "b":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}) == {"aa":2, "ab":10, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":10, "bb":12}
False
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, commutative = False) == {"aa":2, "ab":4,"ba":6, "bb":12}
True
>>> convolution_dict({"a": 1, "b":3}, {"a":2, "b":4}, \
op_twice = lambda x,y:[x,y]) == {"aa":2, "ab":[4,6], "bb":12}
True
"""
new_dict = {}
for k1 in sorted(D1.keys()):
for k2 in sorted(D2.keys()):
if op_key(k1,k2) in new_dict.keys():
key = op_key(k1,k2)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
elif op_key(k2,k1) in new_dict.keys() and commutative:
key = op_key(k2,k1)
new_dict[key] = op_twice(new_dict[key], op(D1[k1],D2[k2]))
else:
key = op_key(k1,k2)
new_dict[key] = op(D1[k1],D2[k2])
return new_dict
if __name__ == '__main__':
import doctest
doctest.testmod()
# -----------------------------
# Reglages pour 'vim'
# vim:set autoindent expandtab tabstop=4 shiftwidth=4:
# cursor: 16 del
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import inspect
import pkg_resources
import six
from trove.openstack.common._i18n import _
from trove.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
This decorator logs a deprecation message when the callable it decorates is
used. The message will include the release where the callable was
deprecated, the release where it may be removed and possibly an optional
replacement.
Examples:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0)
... def d(): pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0)
... def e(): pass
"""
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
BEXAR = 'B'
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
JUNO = 'J'
KILO = 'K'
_RELEASES = {
# NOTE(morganfainberg): Bexar is used for unit test purposes, it is
# expected we maintain a gap between Bexar and Folsom in this list.
'B': 'Bexar',
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
'J': 'Juno',
'K': 'Kilo',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func_or_cls):
if not self.what:
self.what = func_or_cls.__name__ + '()'
msg, details = self._build_message()
if inspect.isfunction(func_or_cls):
@six.wraps(func_or_cls)
def wrapped(*args, **kwargs):
LOG.deprecated(msg, details)
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
# TODO(tsufiev): change `functools` module to `six` as
# soon as six 1.7.4 (with fix for passing `assigned`
# argument to underlying `functools.wraps`) is released
# and added to the trove-incubator requrements
@functools.wraps(orig_init, assigned=('__name__', '__doc__'))
def new_init(self, *args, **kwargs):
LOG.deprecated(msg, details)
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
if self.remove_in > 0:
msg = self._deprecated_msg_with_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_alternative_no_removal
else:
if self.remove_in > 0:
msg = self._deprecated_msg_no_alternative
else:
# There are no plans to remove this function, but it is
# now deprecated.
msg = self._deprecated_msg_with_no_alternative_no_removal
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
|
|
## IAN with randomized IAF
from math import sqrt
import os
import sys
import numpy as np
import lasagne
import lasagne.layers
import lasagne.layers.dnn
from lasagne.layers import SliceLayer as SL
from lasagne.layers import batch_norm as BN
from lasagne.layers import ElemwiseSumLayer as ESL
from lasagne.layers import ElemwiseMergeLayer as EML
from lasagne.layers import NonlinearityLayer as NL
from lasagne.layers import DenseLayer as DL
from lasagne.layers import Upscale2DLayer
from lasagne.init import Normal as initmethod
from lasagne.init import Orthogonal
from lasagne.nonlinearities import elu
from lasagne.nonlinearities import rectify as relu
from lasagne.nonlinearities import LeakyRectify as lrelu
from lasagne.nonlinearities import sigmoid
from lasagne.layers.dnn import Conv2DDNNLayer as C2D
from lasagne.layers.dnn import Pool2DDNNLayer as P2D
from lasagne.layers import TransposedConv2DLayer as TC2D
from lasagne.layers import ConcatLayer as CL
from theano import tensor as T
from theano.sandbox.cuda.basic_ops import (
as_cuda_ndarray_variable,
host_from_gpu,
gpu_contiguous,
HostFromGpu,
gpu_alloc_empty,
)
from theano.sandbox.cuda.dnn import (
GpuDnnConvDesc,
GpuDnnConv,
GpuDnnConvGradI,
dnn_conv,
dnn_pool,
)
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from gan.util.layers import (
MDBLOCK,
DeconvLayer,
MinibatchLayer,
beta_layer,
MADE,
IAFLayer,
GaussianSampleLayer,
MDCL,
)
CFG = {
'batch_size': 16,
'learning_rate': {
0: 0.0002,
25: 0.0001,
50: 0.00005,
75: 0.00001,
},
'optimizer': 'Adam',
'beta1': 0.5,
'update_ratio': 1,
'decay_rate': 0,
'reg': 1e-5,
'momentum': 0.9,
'shuffle': True,
'dims': (64,64),
'n_channels': 3,
'batches_per_chunk': 64,
'max_epochs': 80,
'checkpoint_every_nth': 1,
'num_latents': 100,
'recon_weight': 3.0,
'feature_weight': 1.0,
'dg_weight': 1.0,
'dd_weight': 1.0,
'agr_weight': 1.0,
'ags_weight': 1.0,
'n_shuffles': 1,
'ortho': 1e-3,
}
def get_model(interp=False, dnn=True):
dims, n_channels = tuple(CFG['dims']), CFG['n_channels']
shape = (None, n_channels)+dims
l_in = lasagne.layers.InputLayer(shape=shape)
l_enc_conv1 = C2D(
incoming = l_in,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
name = 'enc_conv1'
)
l_enc_conv2 = BN(C2D(
incoming = l_enc_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
name = 'enc_conv2'
),name = 'bnorm2')
l_enc_conv3 = BN(C2D(
incoming = l_enc_conv2,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
name = 'enc_conv3'
),name = 'bnorm3')
l_enc_conv4 = BN(C2D(
incoming = l_enc_conv3,
num_filters = 1024,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
name = 'enc_conv4'
),name = 'bnorm4')
l_enc_fc1 = BN(DL(
incoming = l_enc_conv4,
num_units = 1000,
W = initmethod(0.02),
nonlinearity = relu,
name = 'enc_fc1'
),
name = 'bnorm_enc_fc1')
# Define latent values
l_enc_mu,l_enc_logsigma = [BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_mu'),name='mu_bnorm'),
BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_logsigma'),name='ls_bnorm')]
l_Z_IAF = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z_IAF')
l_IAF_mu,l_IAF_logsigma = [MADE(l_Z_IAF,[CFG['num_latents']],'l_IAF_mu'),MADE(l_Z_IAF,[CFG['num_latents']],'l_IAF_ls')]
l_Z = IAFLayer(l_Z_IAF,l_IAF_mu,l_IAF_logsigma,name='l_Z')
l_dec_fc2 = DL(
incoming = l_Z,
num_units = 512*16,
nonlinearity = lrelu(0.2),
W=initmethod(0.02),
name='l_dec_fc2')
l_unflatten = lasagne.layers.ReshapeLayer(
incoming = l_dec_fc2,
shape = ([0],512,4,4),
)
l_dec_conv1 = DeconvLayer(
incoming = l_unflatten,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = None,
name = 'dec_conv1'
)
l_dec_conv2a = MDBLOCK(incoming=l_dec_conv1,num_filters=512,scales=[0,2],name='dec_conv2a',nonlinearity=lrelu(0.2))
l_dec_conv2 = DeconvLayer(
incoming = l_dec_conv2a,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = None,
name = 'dec_conv2'
)
l_dec_conv3a = MDBLOCK(incoming=l_dec_conv2,num_filters=256,scales=[0,2,3],name='dec_conv3a',nonlinearity=lrelu(0.2))
l_dec_conv3 = DeconvLayer(
incoming = l_dec_conv3a,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = None,
name = 'dec_conv3'
)
l_dec_conv4a = MDBLOCK(incoming=l_dec_conv3,num_filters=128,scales=[0,2,3],name='dec_conv4a',nonlinearity=lrelu(0.2))
l_dec_conv4 = BN(DeconvLayer(
incoming = l_dec_conv4a,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
name = 'dec_conv4'
),name = 'bnorm_dc4')
R = NL(MDCL(l_dec_conv4,
num_filters=2,
scales = [2,3,4],
name = 'R'),sigmoid)
G = NL(ESL([MDCL(l_dec_conv4,
num_filters=2,
scales = [2,3,4],
name = 'G_a'
),
MDCL(R,
num_filters=2,
scales = [2,3,4],
name = 'G_b'
)]),sigmoid)
B = NL(ESL([MDCL(l_dec_conv4,
num_filters=2,
scales = [2,3,4],
name = 'B_a'
),
MDCL(CL([R,G]),
num_filters=2,
scales = [2,3,4],
name = 'B_b'
)]),sigmoid)
l_out=CL([beta_layer(SL(R,slice(0,1),1),SL(R,slice(1,2),1)),beta_layer(SL(G,slice(0,1),1),SL(G,slice(1,2),1)),beta_layer(SL(B,slice(0,1),1),SL(B,slice(1,2),1))])
minibatch_discrim = MinibatchLayer(lasagne.layers.GlobalPoolLayer(l_enc_conv4), num_kernels=500,name='minibatch_discrim')
l_discrim = DL(incoming = minibatch_discrim,
num_units = 3,
nonlinearity = lasagne.nonlinearities.softmax,
b = None,
W=initmethod(0.02),
name = 'discrimi')
return {'l_in': l_in,
'l_out': l_out,
'l_mu': l_enc_mu,
'l_ls': l_enc_logsigma,
'l_Z': l_Z,
'l_Z_IAF': l_Z_IAF,
'l_IAF_mu': l_IAF_mu,
'l_IAF_ls': l_IAF_logsigma,
'l_introspect': [l_enc_conv1, l_enc_conv2,l_enc_conv3,l_enc_conv4],
'l_discrim': l_discrim}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.